Revision 9ca8a7c5

b/doc/iallocator.rst
111 111
  cluster, indexed by instance name; the contents are similar to the
112 112
  instance definitions for the allocate mode, with the addition of:
113 113

  
114
  admin_up
114
  admin_state
115 115
    if this instance is set to run (but not the actual status of the
116 116
    instance)
117 117

  
b/lib/cmdlib.py
905 905

  
906 906
def _CheckInstanceDown(lu, instance, reason):
907 907
  """Ensure that an instance is not running."""
908
  if instance.admin_up:
908
  if instance.admin_state:
909 909
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
910 910
                               (instance.name, reason), errors.ECODE_STATE)
911 911

  
......
1088 1088
    "primary_node": instance.primary_node,
1089 1089
    "secondary_nodes": instance.secondary_nodes,
1090 1090
    "os_type": instance.os,
1091
    "status": instance.admin_up,
1091
    "status": instance.admin_state,
1092 1092
    "memory": bep[constants.BE_MEMORY],
1093 1093
    "vcpus": bep[constants.BE_VCPUS],
1094 1094
    "nics": _NICListToTuple(lu, instance.nics),
......
2033 2033
        _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2034 2034
                 "volume %s missing on node %s", volume, node)
2035 2035

  
2036
    if instanceconfig.admin_up:
2036
    if instanceconfig.admin_state:
2037 2037
      pri_img = node_image[node_current]
2038 2038
      test = instance not in pri_img.instances and not pri_img.offline
2039 2039
      _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
......
2049 2049
      # node here
2050 2050
      snode = node_image[nname]
2051 2051
      bad_snode = snode.ghost or snode.offline
2052
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
2052
      _ErrorIf(instanceconfig.admin_state and not success and not bad_snode,
2053 2053
               constants.CV_EINSTANCEFAULTYDISK, instance,
2054 2054
               "couldn't retrieve status for disk/%s on %s: %s",
2055 2055
               idx, nname, bdev_status)
2056
      _ErrorIf((instanceconfig.admin_up and success and
2056
      _ErrorIf((instanceconfig.admin_state and success and
2057 2057
                bdev_status.ldisk_status == constants.LDS_FAULTY),
2058 2058
               constants.CV_EINSTANCEFAULTYDISK, instance,
2059 2059
               "disk/%s on %s is faulty", idx, nname)
......
2262 2262
        node_drbd[minor] = (instance, False)
2263 2263
      else:
2264 2264
        instance = instanceinfo[instance]
2265
        node_drbd[minor] = (instance.name, instance.admin_up)
2265
        node_drbd[minor] = (instance.name, instance.admin_state)
2266 2266

  
2267 2267
    # and now check them
2268 2268
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
......
2910 2910
               constants.CV_ENODERPC, pnode, "instance %s, connection to"
2911 2911
               " primary node failed", instance)
2912 2912

  
2913
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2913
      _ErrorIf(inst_config.admin_state and pnode_img.offline,
2914 2914
               constants.CV_EINSTANCEBADNODE, instance,
2915 2915
               "instance is marked as running and lives on offline node %s",
2916 2916
               inst_config.primary_node)
......
3165 3165

  
3166 3166
    nv_dict = _MapInstanceDisksToNodes([inst
3167 3167
                                        for inst in self.instances.values()
3168
                                        if inst.admin_up])
3168
                                        if inst.admin_state])
3169 3169

  
3170 3170
    if nv_dict:
3171 3171
      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
......
7061 7061
    _CheckNodeNotDrained(self, target_node)
7062 7062
    _CheckNodeVmCapable(self, target_node)
7063 7063

  
7064
    if instance.admin_up:
7064
    if instance.admin_state:
7065 7065
      # check memory requirements on the secondary node
7066 7066
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7067 7067
                           instance.name, bep[constants.BE_MEMORY],
......
7155 7155
    _RemoveDisks(self, instance, target_node=source_node)
7156 7156

  
7157 7157
    # Only start the instance if it's marked as up
7158
    if instance.admin_up:
7158
    if instance.admin_state:
7159 7159
      self.LogInfo("Starting instance %s on node %s",
7160 7160
                   instance.name, target_node)
7161 7161

  
......
7293 7293
    assert instance is not None
7294 7294
    self.instance = instance
7295 7295

  
7296
    if (not self.cleanup and not instance.admin_up and not self.failover and
7296
    if (not self.cleanup and not instance.admin_state and not self.failover and
7297 7297
        self.fallback):
7298 7298
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
7299 7299
                      " to failover")
......
7355 7355
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
7356 7356

  
7357 7357
    # check memory requirements on the secondary node
7358
    if not self.failover or instance.admin_up:
7358
    if not self.failover or instance.admin_state:
7359 7359
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
7360 7360
                           instance.name, i_be[constants.BE_MEMORY],
7361 7361
                           instance.hypervisor)
......
7772 7772
    source_node = instance.primary_node
7773 7773
    target_node = self.target_node
7774 7774

  
7775
    if instance.admin_up:
7775
    if instance.admin_state:
7776 7776
      self.feedback_fn("* checking disk consistency between source and target")
7777 7777
      for dev in instance.disks:
7778 7778
        # for drbd, these are drbd over lvm
......
7815 7815
    self.cfg.Update(instance, self.feedback_fn)
7816 7816

  
7817 7817
    # Only start the instance if it's marked as up
7818
    if instance.admin_up:
7818
    if instance.admin_state:
7819 7819
      self.feedback_fn("* activating the instance's disks on target node %s" %
7820 7820
                       target_node)
7821 7821
      logging.info("Starting instance %s on node %s",
......
9217 9217
                            primary_node=pnode_name,
9218 9218
                            nics=self.nics, disks=disks,
9219 9219
                            disk_template=self.op.disk_template,
9220
                            admin_up=False,
9220
                            admin_state=False,
9221 9221
                            network_port=network_port,
9222 9222
                            beparams=self.op.beparams,
9223 9223
                            hvparams=self.op.hvparams,
......
9397 9397
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
9398 9398

  
9399 9399
    if self.op.start:
9400
      iobj.admin_up = True
9400
      iobj.admin_state = True
9401 9401
      self.cfg.Update(iobj, feedback_fn)
9402 9402
      logging.info("Starting instance %s on node %s", instance, pnode_name)
9403 9403
      feedback_fn("* starting instance...")
......
9445 9445
    node_insts.Raise("Can't get node information from %s" % node)
9446 9446

  
9447 9447
    if instance.name not in node_insts.payload:
9448
      if instance.admin_up:
9448
      if instance.admin_state:
9449 9449
        state = constants.INSTST_ERRORDOWN
9450 9450
      else:
9451 9451
        state = constants.INSTST_ADMINDOWN
......
9895 9895
    feedback_fn("Replacing disk(s) %s for %s" %
9896 9896
                (utils.CommaJoin(self.disks), self.instance.name))
9897 9897

  
9898
    activate_disks = (not self.instance.admin_up)
9898
    activate_disks = (not self.instance.admin_state)
9899 9899

  
9900 9900
    # Activate the instance disks if we're replacing them on a down instance
9901 9901
    if activate_disks:
......
10396 10396
    """
10397 10397
    # Check whether any instance on this node has faulty disks
10398 10398
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
10399
      if not inst.admin_up:
10399
      if not inst.admin_state:
10400 10400
        continue
10401 10401
      check_nodes = set(inst.all_nodes)
10402 10402
      check_nodes.discard(self.op.node_name)
......
10758 10758
      if disk_abort:
10759 10759
        self.proc.LogWarning("Disk sync-ing has not returned a good"
10760 10760
                             " status; please check the instance")
10761
      if not instance.admin_up:
10761
      if not instance.admin_state:
10762 10762
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10763
    elif not instance.admin_up:
10763
    elif not instance.admin_state:
10764 10764
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
10765 10765
                           " not supposed to be running because no wait for"
10766 10766
                           " sync mode was requested")
......
10901 10901
        else:
10902 10902
          remote_state = "down"
10903 10903

  
10904
      if instance.admin_up:
10904
      if instance.admin_state:
10905 10905
        config_state = "up"
10906 10906
      else:
10907 10907
        config_state = "down"
......
12000 12000
          "Cannot retrieve locked instance %s" % self.op.instance_name
12001 12001
    _CheckNodeOnline(self, self.instance.primary_node)
12002 12002

  
12003
    if (self.op.remove_instance and self.instance.admin_up and
12003
    if (self.op.remove_instance and self.instance.admin_state and
12004 12004
        not self.op.shutdown):
12005 12005
      raise errors.OpPrereqError("Can not remove instance without shutting it"
12006 12006
                                 " down before")
......
12130 12130
    for disk in instance.disks:
12131 12131
      self.cfg.SetDiskID(disk, src_node)
12132 12132

  
12133
    activate_disks = (not instance.admin_up)
12133
    activate_disks = (not instance.admin_state)
12134 12134

  
12135 12135
    if activate_disks:
12136 12136
      # Activate the instance disks if we'exporting a stopped instance
......
12143 12143

  
12144 12144
      helper.CreateSnapshots()
12145 12145
      try:
12146
        if (self.op.shutdown and instance.admin_up and
12146
        if (self.op.shutdown and instance.admin_state and
12147 12147
            not self.op.remove_instance):
12148 12148
          assert not activate_disks
12149 12149
          feedback_fn("Starting instance %s" % instance.name)
......
13446 13446
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
13447 13447
            remote_info["memory_free"] -= max(0, i_mem_diff)
13448 13448

  
13449
            if iinfo.admin_up:
13449
            if iinfo.admin_state:
13450 13450
              i_p_up_mem += beinfo[constants.BE_MEMORY]
13451 13451

  
13452 13452
        # compute memory used by instances
......
13486 13486
        nic_data.append(nic_dict)
13487 13487
      pir = {
13488 13488
        "tags": list(iinfo.GetTags()),
13489
        "admin_up": iinfo.admin_up,
13489
        "admin_state": iinfo.admin_state,
13490 13490
        "vcpus": beinfo[constants.BE_VCPUS],
13491 13491
        "memory": beinfo[constants.BE_MEMORY],
13492 13492
        "os": iinfo.os,
b/lib/config.py
1182 1182
      raise errors.ConfigurationError("Unknown instance '%s'" %
1183 1183
                                      instance_name)
1184 1184
    instance = self._config_data.instances[instance_name]
1185
    if instance.admin_up != status:
1186
      instance.admin_up = status
1185
    if instance.admin_state != status:
1186
      instance.admin_state = status
1187 1187
      instance.serial_no += 1
1188 1188
      instance.mtime = time.time()
1189 1189
      self._WriteConfig()
b/lib/constants.py
1271 1271
  INSTST_ERRORDOWN,
1272 1272
  ])
1273 1273

  
1274
# Admin states
1275
ADMINST_UP = "up"
1276
ADMINST_DOWN = "down"
1277
ADMINST_OFFLINE = "offline"
1278
ADMINST_ALL = frozenset([
1279
  ADMINST_UP,
1280
  ADMINST_DOWN,
1281
  ADMINST_OFFLINE,
1282
  ])
1283

  
1274 1284
# Node roles
1275 1285
NR_REGULAR = "R"
1276 1286
NR_MASTER = "M"
b/lib/objects.py
744 744
    "hvparams",
745 745
    "beparams",
746 746
    "osparams",
747
    "admin_up",
747
    "admin_state",
748 748
    "nics",
749 749
    "disks",
750 750
    "disk_template",
......
884 884
    """Custom function for instances.
885 885

  
886 886
    """
887
    if "admin_state" not in val:
888
      if val.get("admin_up", False):
889
        val["admin_state"] = constants.ADMINST_UP
890
      else:
891
        val["admin_state"] = constants.ADMINST_DOWN
892
    if "admin_up" in val:
893
      del val["admin_up"]
887 894
    obj = super(Instance, cls).FromDict(val)
888 895
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
889 896
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
b/lib/query.py
1348 1348
  if bool(ctx.live_data.get(inst.name)):
1349 1349
    if inst.name in ctx.wrongnode_inst:
1350 1350
      return constants.INSTST_WRONGNODE
1351
    elif inst.admin_up:
1351
    elif inst.admin_state:
1352 1352
      return constants.INSTST_RUNNING
1353 1353
    else:
1354 1354
      return constants.INSTST_ERRORUP
1355 1355

  
1356
  if inst.admin_up:
1356
  if inst.admin_state:
1357 1357
    return constants.INSTST_ERRORDOWN
1358 1358

  
1359 1359
  return constants.INSTST_ADMINDOWN
......
1778 1778
    (_MakeField("admin_state", "Autostart", QFT_BOOL,
1779 1779
                "Desired state of instance (if set, the instance should be"
1780 1780
                " up)"),
1781
     IQ_CONFIG, 0, _GetItemAttr("admin_up")),
1781
     IQ_CONFIG, 0, _GetItemAttr("admin_state")),
1782 1782
    (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
1783 1783
     lambda ctx, inst: list(inst.GetTags())),
1784 1784
    (_MakeField("console", "Console", QFT_OTHER,
b/test/ganeti.query_unittest.py
644 644
      objects.Instance(name="inst1", hvparams={}, beparams={}, nics=[],
645 645
        uuid="f90eccb3-e227-4e3c-bf2a-94a21ca8f9cd",
646 646
        ctime=1291244000, mtime=1291244400, serial_no=30,
647
        admin_up=True, hypervisor=constants.HT_XEN_PVM, os="linux1",
647
        admin_state=True, hypervisor=constants.HT_XEN_PVM, os="linux1",
648 648
        primary_node="node1",
649 649
        disk_template=constants.DT_PLAIN,
650 650
        disks=[],
......
652 652
      objects.Instance(name="inst2", hvparams={}, nics=[],
653 653
        uuid="73a0f8a7-068c-4630-ada2-c3440015ab1a",
654 654
        ctime=1291211000, mtime=1291211077, serial_no=1,
655
        admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
655
        admin_state=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
656 656
        primary_node="node5",
657 657
        disk_template=constants.DT_DISKLESS,
658 658
        disks=[],
......
663 663
      objects.Instance(name="inst3", hvparams={}, beparams={},
664 664
        uuid="11ec8dff-fb61-4850-bfe0-baa1803ff280",
665 665
        ctime=1291011000, mtime=1291013000, serial_no=1923,
666
        admin_up=False, hypervisor=constants.HT_KVM, os="busybox",
666
        admin_state=False, hypervisor=constants.HT_KVM, os="busybox",
667 667
        primary_node="node6",
668 668
        disk_template=constants.DT_DRBD8,
669 669
        disks=[],
......
678 678
      objects.Instance(name="inst4", hvparams={}, beparams={},
679 679
        uuid="68dab168-3ef5-4c9d-b4d3-801e0672068c",
680 680
        ctime=1291244390, mtime=1291244395, serial_no=25,
681
        admin_up=False, hypervisor=constants.HT_XEN_PVM, os="linux1",
681
        admin_state=False, hypervisor=constants.HT_XEN_PVM, os="linux1",
682 682
        primary_node="nodeoff2",
683 683
        disk_template=constants.DT_DRBD8,
684 684
        disks=[],
......
702 702
      objects.Instance(name="inst5", hvparams={}, nics=[],
703 703
        uuid="0e3dca12-5b42-4e24-98a2-415267545bd0",
704 704
        ctime=1231211000, mtime=1261200000, serial_no=3,
705
        admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
705
        admin_state=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
706 706
        primary_node="nodebad2",
707 707
        disk_template=constants.DT_DISKLESS,
708 708
        disks=[],
......
713 713
      objects.Instance(name="inst6", hvparams={}, nics=[],
714 714
        uuid="72de6580-c8d5-4661-b902-38b5785bb8b3",
715 715
        ctime=7513, mtime=11501, serial_no=13390,
716
        admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
716
        admin_state=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
717 717
        primary_node="node7",
718 718
        disk_template=constants.DT_DISKLESS,
719 719
        disks=[],
......
726 726
      objects.Instance(name="inst7", hvparams={}, nics=[],
727 727
        uuid="ceec5dc4-b729-4f42-ae28-69b3cd24920e",
728 728
        ctime=None, mtime=None, serial_no=1947,
729
        admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
729
        admin_state=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
730 730
        primary_node="node6",
731 731
        disk_template=constants.DT_DISKLESS,
732 732
        disks=[],
......
799 799
      elif inst.name in live_data:
800 800
        if inst.name in wrongnode_inst:
801 801
          exp_status = constants.INSTST_WRONGNODE
802
        elif inst.admin_up:
802
        elif inst.admin_state:
803 803
          exp_status = constants.INSTST_RUNNING
804 804
        else:
805 805
          exp_status = constants.INSTST_ERRORUP
806
      elif inst.admin_up:
806
      elif inst.admin_state:
807 807
        exp_status = constants.INSTST_ERRORDOWN
808 808
      else:
809 809
        exp_status = constants.INSTST_ADMINDOWN

Also available in: Unified diff