Revision 080fbeea lib/cmdlib.py

b/lib/cmdlib.py
10559 10559
      else:
10560 10560
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
10561 10561

  
10562
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
10562 10563
      self.needed_locks[locking.LEVEL_NODE] = []
10563 10564
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10564 10565

  
10565 10566
  def DeclareLocks(self, level):
10566
    if self.op.use_locking and level == locking.LEVEL_NODE:
10567
      self._LockInstancesNodes()
10567
    if self.op.use_locking:
10568
      if level == locking.LEVEL_NODEGROUP:
10569
        owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
10570

  
10571
        # Lock all groups used by instances optimistically; this requires going
10572
        # via the node before it's locked, requiring verification later on
10573
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
10574
          frozenset(group_uuid
10575
                    for instance_name in owned_instances
10576
                    for group_uuid in
10577
                      self.cfg.GetInstanceNodeGroups(instance_name))
10578

  
10579
      elif level == locking.LEVEL_NODE:
10580
        self._LockInstancesNodes()
10568 10581

  
10569 10582
  def CheckPrereq(self):
10570 10583
    """Check prerequisites.
......
10572 10585
    This only checks the optional instance list against the existing names.
10573 10586

  
10574 10587
    """
10588
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
10589
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
10590
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
10591

  
10575 10592
    if self.wanted_names is None:
10576 10593
      assert self.op.use_locking, "Locking was not used"
10577
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
10594
      self.wanted_names = owned_instances
10578 10595

  
10579
    self.wanted_instances = \
10580
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
10596
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
10597

  
10598
    if self.op.use_locking:
10599
      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
10600
                                None)
10601
    else:
10602
      assert not (owned_instances or owned_groups or owned_nodes)
10603

  
10604
    self.wanted_instances = instances.values()
10581 10605

  
10582 10606
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
10583 10607
    """Returns the status of a block device
......
10642 10666

  
10643 10667
    cluster = self.cfg.GetClusterInfo()
10644 10668

  
10645
    pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
10646
                                          for i in self.wanted_instances)
10647
    for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
10669
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
10670
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
10671

  
10672
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
10673
                                                 for node in nodes.values()))
10674

  
10675
    group2name_fn = lambda uuid: groups[uuid].name
10676

  
10677
    for instance in self.wanted_instances:
10678
      pnode = nodes[instance.primary_node]
10679

  
10648 10680
      if self.op.static or pnode.offline:
10649 10681
        remote_state = None
10650 10682
        if pnode.offline:
......
10670 10702
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
10671 10703
                  instance.disks)
10672 10704

  
10705
      snodes_group_uuids = [nodes[snode_name].group
10706
                            for snode_name in instance.secondary_nodes]
10707

  
10673 10708
      result[instance.name] = {
10674 10709
        "name": instance.name,
10675 10710
        "config_state": config_state,
10676 10711
        "run_state": remote_state,
10677 10712
        "pnode": instance.primary_node,
10713
        "pnode_group_uuid": pnode.group,
10714
        "pnode_group_name": group2name_fn(pnode.group),
10678 10715
        "snodes": instance.secondary_nodes,
10716
        "snodes_group_uuids": snodes_group_uuids,
10717
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
10679 10718
        "os": instance.os,
10680 10719
        # this happens to be the same format used for hooks
10681 10720
        "nics": _NICListToTuple(self, instance.nics),

Also available in: Unified diff