Revision 71333cb9 lib/cmdlib.py

b/lib/cmdlib.py
374 374
    # future we might want to have different behaviors depending on the value
375 375
    # of self.recalculate_locks[locking.LEVEL_NODE]
376 376
    wanted_nodes = []
377
    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
378
      instance = self.context.cfg.GetInstanceInfo(instance_name)
377
    locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
378
    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
379 379
      wanted_nodes.append(instance.primary_node)
380 380
      if not primary_only:
381 381
        wanted_nodes.extend(instance.secondary_nodes)
......
2959 2959
                                 errors.ECODE_STATE)
2960 2960

  
2961 2961
    # Get instance information
2962
    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
2963
                          for name in owned_instances)
2962
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
2964 2963

  
2965 2964
    # Check if node groups for locked instances are still correct
2966 2965
    for (instance_name, inst) in self.instances.items():
......
3061 3060
    if self.wanted_names is None:
3062 3061
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
3063 3062

  
3064
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3065
                             in self.wanted_names]
3063
    self.wanted_instances = \
3064
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3066 3065

  
3067 3066
  def _EnsureChildSizes(self, disk):
3068 3067
    """Ensure children of the disk have the needed disk size.
......
4189 4188
    node = self.cfg.GetNodeInfo(self.op.node_name)
4190 4189
    assert node is not None
4191 4190

  
4192
    instance_list = self.cfg.GetInstanceList()
4193

  
4194 4191
    masternode = self.cfg.GetMasterNode()
4195 4192
    if node.name == masternode:
4196 4193
      raise errors.OpPrereqError("Node is the master node, failover to another"
4197 4194
                                 " node is required", errors.ECODE_INVAL)
4198 4195

  
4199
    for instance_name in instance_list:
4200
      instance = self.cfg.GetInstanceInfo(instance_name)
4196
    for instance_name, instance in self.cfg.GetAllInstancesInfo():
4201 4197
      if node.name in instance.all_nodes:
4202 4198
        raise errors.OpPrereqError("Instance %s is still running on the node,"
4203 4199
                                   " please remove first" % instance_name,
......
5048 5044
        instances_keep = []
5049 5045

  
5050 5046
        # Build list of instances to release
5051
        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
5052
          instance = self.context.cfg.GetInstanceInfo(instance_name)
5047
        locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
5048
        for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
5053 5049
          if (instance.disk_template in constants.DTS_INT_MIRROR and
5054 5050
              self.op.node_name in instance.all_nodes):
5055 5051
            instances_keep.append(instance_name)
......
10303 10299
      assert self.op.use_locking, "Locking was not used"
10304 10300
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
10305 10301

  
10306
    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
10307
                             for name in self.wanted_names]
10302
    self.wanted_instances = \
10303
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
10308 10304

  
10309 10305
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
10310 10306
    """Returns the status of a block device
......
12129 12125
                                 errors.ECODE_STATE)
12130 12126

  
12131 12127
    # Get instance information
12132
    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
12133
                          for name in owned_instances)
12128
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
12134 12129

  
12135 12130
    # Check if node groups for locked instances are still correct
12136 12131
    for instance_name in owned_instances:

Also available in: Unified diff