Revision 0d68c45d

b/lib/cmdlib.py
455 455
  @param secondary_nodes: list of secondary nodes as strings
456 456
  @type os_type: string
457 457
  @param os_type: the name of the instance's OS
458
  @type status: string
459
  @param status: the desired status of the instances
458
  @type status: boolean
459
  @param status: the should_run status of the instance
460 460
  @type memory: string
461 461
  @param memory: the memory size of the instance
462 462
  @type vcpus: string
......
468 468
  @return: the hook environment for this instance
469 469

  
470 470
  """
471
  if status:
472
    str_status = "up"
473
  else:
474
    str_status = "down"
471 475
  env = {
472 476
    "OP_TARGET": name,
473 477
    "INSTANCE_NAME": name,
474 478
    "INSTANCE_PRIMARY": primary_node,
475 479
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
476 480
    "INSTANCE_OS_TYPE": os_type,
477
    "INSTANCE_STATUS": status,
481
    "INSTANCE_STATUS": str_status,
478 482
    "INSTANCE_MEMORY": memory,
479 483
    "INSTANCE_VCPUS": vcpus,
480 484
  }
......
516 520
    'primary_node': instance.primary_node,
517 521
    'secondary_nodes': instance.secondary_nodes,
518 522
    'os_type': instance.os,
519
    'status': instance.os,
523
    'status': instance.admin_up,
520 524
    'memory': bep[constants.BE_MEMORY],
521 525
    'vcpus': bep[constants.BE_VCPUS],
522 526
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
......
768 772
                          (volume, node))
769 773
          bad = True
770 774

  
771
    if not instanceconfig.status == 'down':
775
    if instanceconfig.admin_up:
772 776
      if ((node_current not in node_instance or
773 777
          not instance in node_instance[node_current]) and
774 778
          node_current not in n_offline):
......
953 957
      node_drbd = {}
954 958
      for minor, instance in all_drbd_map[node].items():
955 959
        instance = instanceinfo[instance]
956
        node_drbd[minor] = (instance.name, instance.status == "up")
960
        node_drbd[minor] = (instance.name, instance.admin_up)
957 961
      result = self._VerifyNode(node_i, file_names, local_checksums,
958 962
                                nresult, feedback_fn, master_files,
959 963
                                node_drbd)
......
1181 1185
    nv_dict = {}
1182 1186
    for inst in instances:
1183 1187
      inst_lvs = {}
1184
      if (inst.status != "up" or
1188
      if (not inst.admin_up or
1185 1189
          inst.disk_template not in constants.DTS_NET_MIRROR):
1186 1190
        continue
1187 1191
      inst.MapLVsByNode(inst_lvs)
......
2821 2825
    if instance.disk_template == constants.DT_DISKLESS:
2822 2826
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2823 2827
                                 self.op.instance_name)
2824
    if instance.status != "down":
2828
    if instance.admin_up:
2825 2829
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2826 2830
                                 self.op.instance_name)
2827 2831
    remote_info = self.rpc.call_instance_info(instance.primary_node,
......
2904 2908
                                 self.op.instance_name)
2905 2909
    _CheckNodeOnline(self, instance.primary_node)
2906 2910

  
2907
    if instance.status != "down":
2911
    if instance.admin_up:
2908 2912
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2909 2913
                                 self.op.instance_name)
2910 2914
    remote_info = self.rpc.call_instance_info(instance.primary_node,
......
3169 3173
        elif field == "snodes":
3170 3174
          val = list(instance.secondary_nodes)
3171 3175
        elif field == "admin_state":
3172
          val = (instance.status != "down")
3176
          val = instance.admin_up
3173 3177
        elif field == "oper_state":
3174 3178
          if instance.primary_node in bad_nodes:
3175 3179
            val = None
......
3183 3187
          else:
3184 3188
            running = bool(live_data.get(instance.name))
3185 3189
            if running:
3186
              if instance.status != "down":
3190
              if instance.admin_up:
3187 3191
                val = "running"
3188 3192
              else:
3189 3193
                val = "ERROR_up"
3190 3194
            else:
3191
              if instance.status != "down":
3195
              if instance.admin_up:
3192 3196
                val = "ERROR_down"
3193 3197
              else:
3194 3198
                val = "ADMIN_down"
......
3362 3366
    for dev in instance.disks:
3363 3367
      # for drbd, these are drbd over lvm
3364 3368
      if not _CheckDiskConsistency(self, dev, target_node, False):
3365
        if instance.status == "up" and not self.op.ignore_consistency:
3369
        if instance.admin_up and not self.op.ignore_consistency:
3366 3370
          raise errors.OpExecError("Disk %s is degraded on target node,"
3367 3371
                                   " aborting failover." % dev.iv_name)
3368 3372

  
......
3390 3394
    self.cfg.Update(instance)
3391 3395

  
3392 3396
    # Only start the instance if it's marked as up
3393
    if instance.status == "up":
3397
    if instance.admin_up:
3394 3398
      feedback_fn("* activating the instance's disks on target node")
3395 3399
      logging.info("Starting instance %s on node %s",
3396 3400
                   instance.name, target_node)
......
4491 4495
                           self.be_full[constants.BE_MEMORY],
4492 4496
                           self.op.hypervisor)
4493 4497

  
4494
    if self.op.start:
4495
      self.instance_status = 'up'
4496
    else:
4497
      self.instance_status = 'down'
4498
    self.instance_status = self.op.start
4498 4499

  
4499 4500
  def Exec(self, feedback_fn):
4500 4501
    """Create and add the instance to the cluster.
......
4541 4542
                            primary_node=pnode_name,
4542 4543
                            nics=self.nics, disks=disks,
4543 4544
                            disk_template=self.op.disk_template,
4544
                            status=self.instance_status,
4545
                            admin_up=self.instance_status,
4545 4546
                            network_port=network_port,
4546 4547
                            beparams=self.op.beparams,
4547 4548
                            hvparams=self.op.hvparams,
......
5212 5213
    instance = self.instance
5213 5214

  
5214 5215
    # Activate the instance disks if we're replacing them on a down instance
5215
    if instance.status == "down":
5216
    if not instance.admin_up:
5216 5217
      _StartInstanceDisks(self, instance, True)
5217 5218

  
5218 5219
    if self.op.mode == constants.REPLACE_DISK_CHG:
......
5223 5224
    ret = fn(feedback_fn)
5224 5225

  
5225 5226
    # Deactivate the instance disks if we're replacing them on a down instance
5226
    if instance.status == "down":
5227
    if not instance.admin_up:
5227 5228
      _SafeShutdownInstanceDisks(self, instance)
5228 5229

  
5229 5230
    return ret
......
5439 5440
          remote_state = "down"
5440 5441
      else:
5441 5442
        remote_state = None
5442
      if instance.status == "down":
5443
        config_state = "down"
5444
      else:
5443
      if instance.admin_up:
5445 5444
        config_state = "up"
5445
      else:
5446
        config_state = "down"
5446 5447

  
5447 5448
      disks = [self._ComputeDiskStatus(instance, None, device)
5448 5449
               for device in instance.disks]
......
5998 5999
          snap_disks.append(new_dev)
5999 6000

  
6000 6001
    finally:
6001
      if self.op.shutdown and instance.status == "up":
6002
      if self.op.shutdown and instance.admin_up:
6002 6003
        result = self.rpc.call_instance_start(src_node, instance, None)
6003 6004
        msg = result.RemoteFailMsg()
6004 6005
        if msg:
......
6421 6422
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6422 6423
          remote_info['memory_free'] -= max(0, i_mem_diff)
6423 6424

  
6424
          if iinfo.status == "up":
6425
          if iinfo.admin_up:
6425 6426
            i_p_up_mem += beinfo[constants.BE_MEMORY]
6426 6427

  
6427 6428
      # compute memory used by instances
......
6449 6450
                  for n in iinfo.nics]
6450 6451
      pir = {
6451 6452
        "tags": list(iinfo.GetTags()),
6452
        "should_run": iinfo.status == "up",
6453
        "should_run": iinfo.admin_up,
6453 6454
        "vcpus": beinfo[constants.BE_VCPUS],
6454 6455
        "memory": beinfo[constants.BE_MEMORY],
6455 6456
        "os": iinfo.os,
b/lib/config.py
588 588
    """Set the instance's status to a given value.
589 589

  
590 590
    """
591
    if status not in ("up", "down"):
592
      raise errors.ProgrammerError("Invalid status '%s' passed to"
593
                                   " ConfigWriter._SetInstanceStatus()" %
594
                                   status)
591
    assert isinstance(status, bool), \
592
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
595 593

  
596 594
    if instance_name not in self._config_data.instances:
597 595
      raise errors.ConfigurationError("Unknown instance '%s'" %
598 596
                                      instance_name)
599 597
    instance = self._config_data.instances[instance_name]
600
    if instance.status != status:
601
      instance.status = status
598
    if instance.admin_up != status:
599
      instance.admin_up = status
602 600
      instance.serial_no += 1
603 601
      self._WriteConfig()
604 602

  
......
607 605
    """Mark the instance status to up in the config.
608 606

  
609 607
    """
610
    self._SetInstanceStatus(instance_name, "up")
608
    self._SetInstanceStatus(instance_name, True)
611 609

  
612 610
  @locking.ssynchronized(_config_lock)
613 611
  def RemoveInstance(self, instance_name):
......
651 649
    """Mark the status of an instance to down in the configuration.
652 650

  
653 651
    """
654
    self._SetInstanceStatus(instance_name, "down")
652
    self._SetInstanceStatus(instance_name, False)
655 653

  
656 654
  def _UnlockedGetInstanceList(self):
657 655
    """Get the list of instances.
b/lib/objects.py
507 507
    "hypervisor",
508 508
    "hvparams",
509 509
    "beparams",
510
    "status",
510
    "admin_up",
511 511
    "nics",
512 512
    "disks",
513 513
    "disk_template",

Also available in: Unified diff