Revision 9ca8a7c5 lib/cmdlib.py

b/lib/cmdlib.py
905 905

  
906 906
def _CheckInstanceDown(lu, instance, reason):
907 907
  """Ensure that an instance is not running."""
908
  if instance.admin_up:
908
  if instance.admin_state:
909 909
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
910 910
                               (instance.name, reason), errors.ECODE_STATE)
911 911

  
......
1088 1088
    "primary_node": instance.primary_node,
1089 1089
    "secondary_nodes": instance.secondary_nodes,
1090 1090
    "os_type": instance.os,
1091
    "status": instance.admin_up,
1091
    "status": instance.admin_state,
1092 1092
    "memory": bep[constants.BE_MEMORY],
1093 1093
    "vcpus": bep[constants.BE_VCPUS],
1094 1094
    "nics": _NICListToTuple(lu, instance.nics),
......
2033 2033
        _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
2034 2034
                 "volume %s missing on node %s", volume, node)
2035 2035

  
2036
    if instanceconfig.admin_up:
2036
    if instanceconfig.admin_state:
2037 2037
      pri_img = node_image[node_current]
2038 2038
      test = instance not in pri_img.instances and not pri_img.offline
2039 2039
      _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
......
2049 2049
      # node here
2050 2050
      snode = node_image[nname]
2051 2051
      bad_snode = snode.ghost or snode.offline
2052
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
2052
      _ErrorIf(instanceconfig.admin_state and not success and not bad_snode,
2053 2053
               constants.CV_EINSTANCEFAULTYDISK, instance,
2054 2054
               "couldn't retrieve status for disk/%s on %s: %s",
2055 2055
               idx, nname, bdev_status)
2056
      _ErrorIf((instanceconfig.admin_up and success and
2056
      _ErrorIf((instanceconfig.admin_state and success and
2057 2057
                bdev_status.ldisk_status == constants.LDS_FAULTY),
2058 2058
               constants.CV_EINSTANCEFAULTYDISK, instance,
2059 2059
               "disk/%s on %s is faulty", idx, nname)
......
2262 2262
        node_drbd[minor] = (instance, False)
2263 2263
      else:
2264 2264
        instance = instanceinfo[instance]
2265
        node_drbd[minor] = (instance.name, instance.admin_up)
2265
        node_drbd[minor] = (instance.name, instance.admin_state)
2266 2266

  
2267 2267
    # and now check them
2268 2268
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
......
2910 2910
               constants.CV_ENODERPC, pnode, "instance %s, connection to"
2911 2911
               " primary node failed", instance)
2912 2912

  
2913
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2913
      _ErrorIf(inst_config.admin_state and pnode_img.offline,
2914 2914
               constants.CV_EINSTANCEBADNODE, instance,
2915 2915
               "instance is marked as running and lives on offline node %s",
2916 2916
               inst_config.primary_node)
......
3165 3165

  
3166 3166
    nv_dict = _MapInstanceDisksToNodes([inst
3167 3167
                                        for inst in self.instances.values()
3168
                                        if inst.admin_up])
3168
                                        if inst.admin_state])
3169 3169

  
3170 3170
    if nv_dict:
3171 3171
      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
......
7061 7061
    _CheckNodeNotDrained(self, target_node)
7062 7062
    _CheckNodeVmCapable(self, target_node)
7063 7063

  
7064
    if instance.admin_up:
7064
    if instance.admin_state:
7065 7065
      # check memory requirements on the secondary node
7066 7066
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
7067 7067
                           instance.name, bep[constants.BE_MEMORY],
......
7155 7155
    _RemoveDisks(self, instance, target_node=source_node)
7156 7156

  
7157 7157
    # Only start the instance if it's marked as up
7158
    if instance.admin_up:
7158
    if instance.admin_state:
7159 7159
      self.LogInfo("Starting instance %s on node %s",
7160 7160
                   instance.name, target_node)
7161 7161

  
......
7293 7293
    assert instance is not None
7294 7294
    self.instance = instance
7295 7295

  
7296
    if (not self.cleanup and not instance.admin_up and not self.failover and
7296
    if (not self.cleanup and not instance.admin_state and not self.failover and
7297 7297
        self.fallback):
7298 7298
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
7299 7299
                      " to failover")
......
7355 7355
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
7356 7356

  
7357 7357
    # check memory requirements on the secondary node
7358
    if not self.failover or instance.admin_up:
7358
    if not self.failover or instance.admin_state:
7359 7359
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
7360 7360
                           instance.name, i_be[constants.BE_MEMORY],
7361 7361
                           instance.hypervisor)
......
7772 7772
    source_node = instance.primary_node
7773 7773
    target_node = self.target_node
7774 7774

  
7775
    if instance.admin_up:
7775
    if instance.admin_state:
7776 7776
      self.feedback_fn("* checking disk consistency between source and target")
7777 7777
      for dev in instance.disks:
7778 7778
        # for drbd, these are drbd over lvm
......
7815 7815
    self.cfg.Update(instance, self.feedback_fn)
7816 7816

  
7817 7817
    # Only start the instance if it's marked as up
7818
    if instance.admin_up:
7818
    if instance.admin_state:
7819 7819
      self.feedback_fn("* activating the instance's disks on target node %s" %
7820 7820
                       target_node)
7821 7821
      logging.info("Starting instance %s on node %s",
......
9217 9217
                            primary_node=pnode_name,
9218 9218
                            nics=self.nics, disks=disks,
9219 9219
                            disk_template=self.op.disk_template,
9220
                            admin_up=False,
9220
                            admin_state=False,
9221 9221
                            network_port=network_port,
9222 9222
                            beparams=self.op.beparams,
9223 9223
                            hvparams=self.op.hvparams,
......
9397 9397
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
9398 9398

  
9399 9399
    if self.op.start:
9400
      iobj.admin_up = True
9400
      iobj.admin_state = True
9401 9401
      self.cfg.Update(iobj, feedback_fn)
9402 9402
      logging.info("Starting instance %s on node %s", instance, pnode_name)
9403 9403
      feedback_fn("* starting instance...")
......
9445 9445
    node_insts.Raise("Can't get node information from %s" % node)
9446 9446

  
9447 9447
    if instance.name not in node_insts.payload:
9448
      if instance.admin_up:
9448
      if instance.admin_state:
9449 9449
        state = constants.INSTST_ERRORDOWN
9450 9450
      else:
9451 9451
        state = constants.INSTST_ADMINDOWN
......
9895 9895
    feedback_fn("Replacing disk(s) %s for %s" %
9896 9896
                (utils.CommaJoin(self.disks), self.instance.name))
9897 9897

  
9898
    activate_disks = (not self.instance.admin_up)
9898
    activate_disks = (not self.instance.admin_state)
9899 9899

  
9900 9900
    # Activate the instance disks if we're replacing them on a down instance
9901 9901
    if activate_disks:
......
10396 10396
    """
10397 10397
    # Check whether any instance on this node has faulty disks
10398 10398
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
10399
      if not inst.admin_up:
10399
      if not inst.admin_state:
10400 10400
        continue
10401 10401
      check_nodes = set(inst.all_nodes)
10402 10402
      check_nodes.discard(self.op.node_name)
......
10758 10758
      if disk_abort:
10759 10759
        self.proc.LogWarning("Disk sync-ing has not returned a good"
10760 10760
                             " status; please check the instance")
10761
      if not instance.admin_up:
10761
      if not instance.admin_state:
10762 10762
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10763
    elif not instance.admin_up:
10763
    elif not instance.admin_state:
10764 10764
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
10765 10765
                           " not supposed to be running because no wait for"
10766 10766
                           " sync mode was requested")
......
10901 10901
        else:
10902 10902
          remote_state = "down"
10903 10903

  
10904
      if instance.admin_up:
10904
      if instance.admin_state:
10905 10905
        config_state = "up"
10906 10906
      else:
10907 10907
        config_state = "down"
......
12000 12000
          "Cannot retrieve locked instance %s" % self.op.instance_name
12001 12001
    _CheckNodeOnline(self, self.instance.primary_node)
12002 12002

  
12003
    if (self.op.remove_instance and self.instance.admin_up and
12003
    if (self.op.remove_instance and self.instance.admin_state and
12004 12004
        not self.op.shutdown):
12005 12005
      raise errors.OpPrereqError("Can not remove instance without shutting it"
12006 12006
                                 " down before")
......
12130 12130
    for disk in instance.disks:
12131 12131
      self.cfg.SetDiskID(disk, src_node)
12132 12132

  
12133
    activate_disks = (not instance.admin_up)
12133
    activate_disks = (not instance.admin_state)
12134 12134

  
12135 12135
    if activate_disks:
12136 12136
      # Activate the instance disks if we'exporting a stopped instance
......
12143 12143

  
12144 12144
      helper.CreateSnapshots()
12145 12145
      try:
12146
        if (self.op.shutdown and instance.admin_up and
12146
        if (self.op.shutdown and instance.admin_state and
12147 12147
            not self.op.remove_instance):
12148 12148
          assert not activate_disks
12149 12149
          feedback_fn("Starting instance %s" % instance.name)
......
13446 13446
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
13447 13447
            remote_info["memory_free"] -= max(0, i_mem_diff)
13448 13448

  
13449
            if iinfo.admin_up:
13449
            if iinfo.admin_state:
13450 13450
              i_p_up_mem += beinfo[constants.BE_MEMORY]
13451 13451

  
13452 13452
        # compute memory used by instances
......
13486 13486
        nic_data.append(nic_dict)
13487 13487
      pir = {
13488 13488
        "tags": list(iinfo.GetTags()),
13489
        "admin_up": iinfo.admin_up,
13489
        "admin_state": iinfo.admin_state,
13490 13490
        "vcpus": beinfo[constants.BE_VCPUS],
13491 13491
        "memory": beinfo[constants.BE_MEMORY],
13492 13492
        "os": iinfo.os,

Also available in: Unified diff