Fix exit code of “gnt-cluster verify”
[ganeti-local] / lib / cmdlib.py
index 6b8da0c..1e0ac3e 100644 (file)
@@ -119,6 +119,8 @@ class LogicalUnit(object):
     self.op = op
     self.cfg = context.cfg
     self.glm = context.glm
+    # readability alias
+    self.owned_locks = context.glm.list_owned
     self.context = context
     self.rpc = rpc
     # Dicts used to declare locking needs to mcpu
@@ -374,8 +376,8 @@ class LogicalUnit(object):
     # future we might want to have different behaviors depending on the value
     # of self.recalculate_locks[locking.LEVEL_NODE]
     wanted_nodes = []
-    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
-      instance = self.context.cfg.GetInstanceInfo(instance_name)
+    locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
       wanted_nodes.append(instance.primary_node)
       if not primary_only:
         wanted_nodes.extend(instance.secondary_nodes)
@@ -488,7 +490,7 @@ class _QueryBase:
 
     """
     if self.do_locking:
-      names = lu.glm.list_owned(lock_level)
+      names = lu.owned_locks(lock_level)
     else:
       names = all_names
 
@@ -555,6 +557,56 @@ def _ShareAll():
   return dict.fromkeys(locking.LEVELS, 1)
 
 
+def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
+  """Checks if the owned node groups are still correct for an instance.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type instance_name: string
+  @param instance_name: Instance name
+  @type owned_groups: set or frozenset
+  @param owned_groups: List of currently owned node groups
+
+  """
+  inst_groups = cfg.GetInstanceNodeGroups(instance_name)
+
+  if not owned_groups.issuperset(inst_groups):
+    raise errors.OpPrereqError("Instance %s's node groups changed since"
+                               " locks were acquired, current groups are"
+                               " are '%s', owning groups '%s'; retry the"
+                               " operation" %
+                               (instance_name,
+                                utils.CommaJoin(inst_groups),
+                                utils.CommaJoin(owned_groups)),
+                               errors.ECODE_STATE)
+
+  return inst_groups
+
+
+def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
+  """Checks if the instances in a node group are still correct.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type group_uuid: string
+  @param group_uuid: Node group UUID
+  @type owned_instances: set or frozenset
+  @param owned_instances: List of currently owned instances
+
+  """
+  wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
+  if owned_instances != wanted_instances:
+    raise errors.OpPrereqError("Instances in node group '%s' changed since"
+                               " locks were acquired, wanted '%s', have '%s';"
+                               " retry the operation" %
+                               (group_uuid,
+                                utils.CommaJoin(wanted_instances),
+                                utils.CommaJoin(owned_instances)),
+                               errors.ECODE_STATE)
+
+  return wanted_instances
+
+
 def _SupportsOob(cfg, node):
   """Tells if node supports OOB.
 
@@ -665,18 +717,18 @@ def _ReleaseLocks(lu, level, names=None, keep=None):
     release = []
 
     # Determine which locks to release
-    for name in lu.glm.list_owned(level):
+    for name in lu.owned_locks(level):
       if should_release(name):
         release.append(name)
       else:
         retain.append(name)
 
-    assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
+    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
 
     # Release just some locks
     lu.glm.release(level, names=release)
 
-    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
+    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
   else:
     # Release everything
     lu.glm.release(level)
@@ -1201,6 +1253,29 @@ def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
                                  " iallocator")
 
 
+def _GetDefaultIAllocator(cfg, iallocator):
+  """Decides on which iallocator to use.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration object
+  @type iallocator: string or None
+  @param iallocator: Iallocator specified in opcode
+  @rtype: string
+  @return: Iallocator name
+
+  """
+  if not iallocator:
+    # Use default iallocator
+    iallocator = cfg.GetDefaultIAllocator()
+
+  if not iallocator:
+    raise errors.OpPrereqError("No iallocator was specified, neither in the"
+                               " opcode nor as a cluster-wide default",
+                               errors.ECODE_INVAL)
+
+  return iallocator
+
+
 class LUClusterPostInit(LogicalUnit):
   """Logical unit for running hooks after cluster initialization.
 
@@ -1435,6 +1510,47 @@ class _VerifyErrors(object):
       self.bad = self.bad or cond
 
 
+class LUClusterVerify(NoHooksLU):
+  """Submits all jobs necessary to verify the cluster.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+
+  def Exec(self, feedback_fn):
+    jobs = []
+
+    if self.op.group_name:
+      groups = [self.op.group_name]
+      depends_fn = lambda: None
+    else:
+      groups = self.cfg.GetNodeGroupList()
+
+      # Verify global configuration
+      jobs.append([opcodes.OpClusterVerifyConfig()])
+
+      # Always depend on global verification
+      depends_fn = lambda: [(-len(jobs), [])]
+
+    jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
+                                              depends=depends_fn())]
+                for group in groups)
+
+    # Fix up all parameters
+    for op in itertools.chain(*jobs): # pylint: disable-msg=W0142
+      op.debug_simulate_errors = self.op.debug_simulate_errors
+      op.verbose = self.op.verbose
+      op.error_codes = self.op.error_codes
+      try:
+        op.skip_checks = self.op.skip_checks
+      except AttributeError:
+        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
+
+    return ResultWithJobs(jobs)
+
+
 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
   """Verifies the cluster config.
 
@@ -1458,6 +1574,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
   def ExpandNames(self):
     # Information can be safely retrieved as the BGL is acquired in exclusive
     # mode
+    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
     self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
@@ -1519,7 +1636,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
                   "the following instances have a non-existing primary-node:"
                   " %s", utils.CommaJoin(no_node_instances))
 
-    return (not self.bad, [g.name for g in self.all_group_info.values()])
+    return not self.bad
 
 
 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
@@ -1609,7 +1726,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # volumes for these instances are healthy, we will need to do an
       # extra call to their secondaries. We ensure here those nodes will
       # be locked.
-      for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
+      for inst in self.owned_locks(locking.LEVEL_INSTANCE):
         # Important: access only the instances whose lock is owned
         if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
           nodes.update(all_inst_info[inst].secondary_nodes)
@@ -1617,14 +1734,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self.needed_locks[locking.LEVEL_NODE] = nodes
 
   def CheckPrereq(self):
-    group_nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
+    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
+    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
+
+    group_nodes = set(self.group_info.members)
     group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
 
     unlocked_nodes = \
-        group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     unlocked_instances = \
-        group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
+        group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
 
     if unlocked_nodes:
       raise errors.OpPrereqError("Missing lock for nodes: %s" %
@@ -1658,7 +1778,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
             extra_lv_nodes.add(nname)
 
     unlocked_lv_nodes = \
-        extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     if unlocked_lv_nodes:
       raise errors.OpPrereqError("these nodes could be locked: %s" %
@@ -1988,7 +2108,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param all_nvinfo: RPC results
 
     """
-    node_names = frozenset(node.name for node in nodeinfo)
+    node_names = frozenset(node.name for node in nodeinfo if not node.offline)
 
     assert master_node in node_names
     assert (len(files_all | files_all_opt | files_mc | files_vm) ==
@@ -2007,6 +2127,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
 
     for node in nodeinfo:
+      if node.offline:
+        continue
+
       nresult = all_nvinfo[node.name]
 
       if nresult.fail_msg or not nresult.payload:
@@ -2190,11 +2313,6 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
                "OS '%s' has multiple entries (first one shadows the rest): %s",
                os_name, utils.CommaJoin([v[0] for v in os_data]))
-      # this will catched in backend too
-      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
-               and not f_var, self.ENODEOS, node,
-               "OS %s with API at least %d does not declare any variant",
-               os_name, constants.OS_API_V15)
       # comparisons with the 'base' image
       test = os_name not in base.oslist
       _ErrorIf(test, self.ENODEOS, node,
@@ -2451,6 +2569,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     # This method has too many local variables. pylint: disable-msg=R0914
+    feedback_fn("* Verifying group '%s'" % self.group_info.name)
 
     if not self.my_node_names:
       # empty node group
@@ -2884,7 +3003,7 @@ class LUClusterVerifyDisks(NoHooksLU):
       }
 
   def Exec(self, feedback_fn):
-    group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
 
     # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
     return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
@@ -2926,10 +3045,8 @@ class LUGroupVerifyDisks(NoHooksLU):
             # going via the node before it's locked, requiring verification
             # later on
             [group_uuid
-             for instance_name in
-               self.glm.list_owned(locking.LEVEL_INSTANCE)
-             for group_uuid in
-               self.cfg.GetInstanceNodeGroups(instance_name)])
+             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
 
     elif level == locking.LEVEL_NODE:
       # This will only lock the nodes in the group to be verified which contain
@@ -2938,49 +3055,33 @@ class LUGroupVerifyDisks(NoHooksLU):
       self._LockInstancesNodes()
 
       # Lock all nodes in group to be verified
-      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
       member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
       self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
-    if owned_instances != wanted_instances:
-      raise errors.OpPrereqError("Instances in node group %s changed since"
-                                 " locks were acquired, wanted %s, have %s;"
-                                 " retry the operation" %
-                                 (self.op.group_name,
-                                  utils.CommaJoin(wanted_instances),
-                                  utils.CommaJoin(owned_instances)),
-                                 errors.ECODE_STATE)
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
-    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
-                          for name in owned_instances)
+    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
     for (instance_name, inst) in self.instances.items():
-      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
       assert owned_nodes.issuperset(inst.all_nodes), \
         "Instance %s's nodes changed while we kept the lock" % instance_name
 
-      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
-      if not owned_groups.issuperset(inst_groups):
-        raise errors.OpPrereqError("Instance %s's node groups changed since"
-                                   " locks were acquired, current groups are"
-                                   " are '%s', owning groups '%s'; retry the"
-                                   " operation" %
-                                   (instance_name,
-                                    utils.CommaJoin(inst_groups),
-                                    utils.CommaJoin(owned_groups)),
-                                   errors.ECODE_STATE)
+      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
+                                             owned_groups)
+
+      assert self.group_uuid in inst_groups, \
+        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
@@ -3000,7 +3101,7 @@ class LUGroupVerifyDisks(NoHooksLU):
                                         if inst.admin_up])
 
     if nv_dict:
-      nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
+      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
                              set(self.cfg.GetVmCapableNodeList()))
 
       node_lvs = self.rpc.call_lv_list(nodes, [])
@@ -3061,10 +3162,10 @@ class LUClusterRepairDiskSizes(NoHooksLU):
 
     """
     if self.wanted_names is None:
-      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
 
-    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
-                             in self.wanted_names]
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
   def _EnsureChildSizes(self, disk):
     """Ensure children of the disk have the needed disk size.
@@ -3286,7 +3387,7 @@ class LUClusterSetParams(LogicalUnit):
                                    " drbd-based instances exist",
                                    errors.ECODE_INVAL)
 
-    node_list = self.glm.list_owned(locking.LEVEL_NODE)
+    node_list = self.owned_locks(locking.LEVEL_NODE)
 
     # if vg_name not None, checks given volume group on all nodes
     if self.op.vg_name:
@@ -3308,8 +3409,7 @@ class LUClusterSetParams(LogicalUnit):
     if self.op.drbd_helper:
       # checks given drbd helper on all nodes
       helpers = self.rpc.call_drbd_helper(node_list)
-      for node in node_list:
-        ninfo = self.cfg.GetNodeInfo(node)
+      for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
         if ninfo.offline:
           self.LogInfo("Not checking drbd helper on offline node %s", node)
           continue
@@ -3868,9 +3968,7 @@ class LUOobCommand(NoHooksLU):
     if self.op.command in self._SKIP_MASTER:
       assert self.master_node not in self.op.node_names
 
-    for node_name in self.op.node_names:
-      node = self.cfg.GetNodeInfo(node_name)
-
+    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
       if node is None:
         raise errors.OpPrereqError("Node %s not found" % node_name,
                                    errors.ECODE_NOENT)
@@ -4194,15 +4292,12 @@ class LUNodeRemove(LogicalUnit):
     node = self.cfg.GetNodeInfo(self.op.node_name)
     assert node is not None
 
-    instance_list = self.cfg.GetInstanceList()
-
     masternode = self.cfg.GetMasterNode()
     if node.name == masternode:
       raise errors.OpPrereqError("Node is the master node, failover to another"
                                  " node is required", errors.ECODE_INVAL)
 
-    for instance_name in instance_list:
-      instance = self.cfg.GetInstanceInfo(instance_name)
+    for instance_name, instance in self.cfg.GetAllInstancesInfo():
       if node.name in instance.all_nodes:
         raise errors.OpPrereqError("Instance %s is still running on the node,"
                                    " please remove first" % instance_name,
@@ -4248,7 +4343,7 @@ class _NodeQuery(_QueryBase):
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks[locking.LEVEL_NODE] = 1
+    lu.share_locks = _ShareAll()
 
     if self.names:
       self.wanted = _GetWantedNodes(lu, self.names)
@@ -4259,7 +4354,7 @@ class _NodeQuery(_QueryBase):
                        query.NQ_LIVE in self.requested_data)
 
     if self.do_locking:
-      # if we don't request only static fields, we need to lock the nodes
+      # If any non-static field is requested we need to lock the nodes
       lu.needed_locks[locking.LEVEL_NODE] = self.wanted
 
   def DeclareLocks(self, lu, level):
@@ -4363,7 +4458,7 @@ class LUNodeQueryvols(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = self.glm.list_owned(locking.LEVEL_NODE)
+    nodenames = self.owned_locks(locking.LEVEL_NODE)
     volumes = self.rpc.call_node_volumes(nodenames)
 
     ilist = self.cfg.GetAllInstancesInfo()
@@ -4432,7 +4527,7 @@ class LUNodeQueryStorage(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    self.nodes = self.owned_locks(locking.LEVEL_NODE)
 
     # Always get name to sort by
     if constants.SF_NAME in self.op.output_fields:
@@ -4494,8 +4589,7 @@ class _InstanceQuery(_QueryBase):
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks[locking.LEVEL_INSTANCE] = 1
-    lu.share_locks[locking.LEVEL_NODE] = 1
+    lu.share_locks = _ShareAll()
 
     if self.names:
       self.wanted = _GetWantedInstances(lu, self.names)
@@ -4506,17 +4600,43 @@ class _InstanceQuery(_QueryBase):
                        query.IQ_LIVE in self.requested_data)
     if self.do_locking:
       lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
+      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
       lu.needed_locks[locking.LEVEL_NODE] = []
       lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
+    self.do_grouplocks = (self.do_locking and
+                          query.IQ_NODES in self.requested_data)
+
   def DeclareLocks(self, lu, level):
-    if level == locking.LEVEL_NODE and self.do_locking:
-      lu._LockInstancesNodes() # pylint: disable-msg=W0212
+    if self.do_locking:
+      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
+        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
+
+        # Lock all groups used by instances optimistically; this requires going
+        # via the node before it's locked, requiring verification later on
+        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
+          set(group_uuid
+              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
+      elif level == locking.LEVEL_NODE:
+        lu._LockInstancesNodes() # pylint: disable-msg=W0212
+
+  @staticmethod
+  def _CheckGroupLocks(lu):
+    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
+
+    # Check if node groups for locked instances are still correct
+    for instance_name in owned_instances:
+      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
 
   def _GetQueryData(self, lu):
     """Computes the list of instances and their attributes.
 
     """
+    if self.do_grouplocks:
+      self._CheckGroupLocks(lu)
+
     cluster = lu.cfg.GetClusterInfo()
     all_info = lu.cfg.GetAllInstancesInfo()
 
@@ -4579,9 +4699,21 @@ class _InstanceQuery(_QueryBase):
     else:
       consinfo = None
 
+    if query.IQ_NODES in self.requested_data:
+      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
+                                            instance_list)))
+      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
+      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
+                    for uuid in set(map(operator.attrgetter("group"),
+                                        nodes.values())))
+    else:
+      nodes = None
+      groups = None
+
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
-                                   live_data, wrongnode_inst, consinfo)
+                                   live_data, wrongnode_inst, consinfo,
+                                   nodes, groups)
 
 
 class LUQuery(NoHooksLU):
@@ -4594,7 +4726,7 @@ class LUQuery(NoHooksLU):
   def CheckArguments(self):
     qcls = _GetQueryImplementation(self.op.what)
 
-    self.impl = qcls(self.op.filter, self.op.fields, False)
+    self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.impl.ExpandNames(self)
@@ -4750,9 +4882,7 @@ class LUNodeAdd(LogicalUnit):
 
     self.changed_primary_ip = False
 
-    for existing_node_name in node_list:
-      existing_node = cfg.GetNodeInfo(existing_node_name)
-
+    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
       if self.op.readd and node == existing_node_name:
         if existing_node.secondary_ip != secondary_ip:
           raise errors.OpPrereqError("Readded node doesn't have the same IP"
@@ -5007,8 +5137,8 @@ class LUNodeSetParams(LogicalUnit):
         instances_keep = []
 
         # Build list of instances to release
-        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
-          instance = self.context.cfg.GetInstanceInfo(instance_name)
+        locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+        for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
           if (instance.disk_template in constants.DTS_INT_MIRROR and
               self.op.node_name in instance.all_nodes):
             instances_keep.append(instance_name)
@@ -5016,7 +5146,7 @@ class LUNodeSetParams(LogicalUnit):
 
         _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
 
-        assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
+        assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
                 set(instances_keep))
 
   def BuildHooksEnv(self):
@@ -6830,7 +6960,7 @@ class LUNodeMigrate(LogicalUnit):
     # running the iallocator and the actual migration, a good consistency model
     # will have to be found.
 
-    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
             frozenset([self.op.node_name]))
 
     return ResultWithJobs(jobs)
@@ -7383,10 +7513,8 @@ class TLMigrateInstance(Tasklet):
       # directly, or through an iallocator.
 
     self.all_nodes = [self.source_node, self.target_node]
-    self.nodes_ip = {
-      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
-      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
-      }
+    self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
+                         in self.cfg.GetMultiNodeInfo(self.all_nodes))
 
     if self.failover:
       feedback_fn("Failover instance %s" % self.instance.name)
@@ -8228,7 +8356,7 @@ class LUInstanceCreate(LogicalUnit):
     src_path = self.op.src_path
 
     if src_node is None:
-      locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
       exp_list = self.rpc.call_export_list(locked_nodes)
       found = False
       for node in exp_list:
@@ -8809,7 +8937,6 @@ class LUInstanceCreate(LogicalUnit):
       disk_abort = not _WaitForSync(self, iobj)
     elif iobj.disk_template in constants.DTS_INT_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
-      time.sleep(15)
       feedback_fn("* checking mirrors status")
       disk_abort = not _WaitForSync(self, iobj, oneshot=True)
     else:
@@ -8826,10 +8953,30 @@ class LUInstanceCreate(LogicalUnit):
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
       if self.op.mode == constants.INSTANCE_CREATE:
         if not self.op.no_install:
+          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
+                        not self.op.wait_for_sync)
+          if pause_sync:
+            feedback_fn("* pausing disk sync to install instance OS")
+            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+                                                              iobj.disks, True)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("pause-sync of instance %s for disk %d failed",
+                             instance, idx)
+
           feedback_fn("* running the instance OS create scripts...")
           # FIXME: pass debug option from opcode to backend
           result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
                                                  self.op.debug_level)
+          if pause_sync:
+            feedback_fn("* resuming disk sync")
+            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+                                                              iobj.disks, False)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("resume-sync of instance %s for disk %d failed",
+                             instance, idx)
+
           result.Raise("Could not add os for instance %s"
                        " on node %s" % (instance, pnode_name))
 
@@ -9036,7 +9183,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
 
         # Lock member nodes of all locked groups
         self.needed_locks[locking.LEVEL_NODE] = [node_name
-          for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+          for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
           for node_name in self.cfg.GetNodeGroup(group_uuid).members]
       else:
         self._LockInstancesNodes()
@@ -9076,16 +9223,9 @@ class LUInstanceReplaceDisks(LogicalUnit):
     assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
             self.op.iallocator is None)
 
-    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
     if owned_groups:
-      groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
-      if owned_groups != groups:
-        raise errors.OpExecError("Node groups used by instance '%s' changed"
-                                 " since lock was acquired, current list is %r,"
-                                 " used to be '%s'" %
-                                 (self.op.instance_name,
-                                  utils.CommaJoin(groups),
-                                  utils.CommaJoin(owned_groups)))
+      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
 
     return LogicalUnit.CheckPrereq(self)
 
@@ -9150,7 +9290,7 @@ class TLReplaceDisks(Tasklet):
     ial = IAllocator(lu.cfg, lu.rpc,
                      mode=constants.IALLOCATOR_MODE_RELOC,
                      name=instance_name,
-                     relocate_from=relocate_from)
+                     relocate_from=list(relocate_from))
 
     ial.Run(iallocator_name)
 
@@ -9174,6 +9314,9 @@ class TLReplaceDisks(Tasklet):
     return remote_node_name
 
   def _FindFaultyDisks(self, node_name):
+    """Wrapper for L{_FindFaultyInstanceDisks}.
+
+    """
     return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
                                     node_name, True)
 
@@ -9244,7 +9387,7 @@ class TLReplaceDisks(Tasklet):
     if remote_node is None:
       self.remote_node_info = None
     else:
-      assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
+      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
              "Remote node '%s' is not locked" % remote_node
 
       self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
@@ -9350,9 +9493,8 @@ class TLReplaceDisks(Tasklet):
       instance.FindDisk(disk_idx)
 
     # Get secondary node IP addresses
-    self.node_secondary_ip = \
-      dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
-           for node_name in touched_nodes)
+    self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
+                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
 
   def Exec(self, feedback_fn):
     """Execute disk replacement.
@@ -9365,13 +9507,13 @@ class TLReplaceDisks(Tasklet):
 
     if __debug__:
       # Verify owned locks before starting operation
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
-      assert set(owned_locks) == set(self.node_secondary_ip), \
+      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
+      assert set(owned_nodes) == set(self.node_secondary_ip), \
           ("Incorrect node locks, owning %s, expected %s" %
-           (owned_locks, self.node_secondary_ip.keys()))
+           (owned_nodes, self.node_secondary_ip.keys()))
 
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
-      assert list(owned_locks) == [self.instance_name], \
+      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
+      assert list(owned_instances) == [self.instance_name], \
           "Instance '%s' not locked" % self.instance_name
 
       assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
@@ -9406,12 +9548,12 @@ class TLReplaceDisks(Tasklet):
 
     if __debug__:
       # Verify owned locks
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
+      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
       nodes = frozenset(self.node_secondary_ip)
-      assert ((self.early_release and not owned_locks) or
-              (not self.early_release and not (set(owned_locks) - nodes))), \
+      assert ((self.early_release and not owned_nodes) or
+              (not self.early_release and not (set(owned_nodes) - nodes))), \
         ("Not owning the correct locks, early_release=%s, owned=%r,"
-         " nodes=%r" % (self.early_release, owned_locks, nodes))
+         " nodes=%r" % (self.early_release, owned_nodes, nodes))
 
     return result
 
@@ -9970,9 +10112,9 @@ class LUNodeEvacuate(NoHooksLU):
 
   def CheckPrereq(self):
     # Verify locks
-    owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
-    owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
-    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+    owned_nodes = self.owned_locks(locking.LEVEL_NODE)
+    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
 
     assert owned_nodes == self.lock_nodes
 
@@ -10263,10 +10405,10 @@ class LUInstanceQueryData(NoHooksLU):
     """
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
-      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
 
-    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
-                             for name in self.wanted_names]
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
   def _ComputeBlockdevStatus(self, node, instance_name, dev):
     """Returns the status of a block device
@@ -10331,9 +10473,9 @@ class LUInstanceQueryData(NoHooksLU):
 
     cluster = self.cfg.GetClusterInfo()
 
-    for instance in self.wanted_instances:
-      pnode = self.cfg.GetNodeInfo(instance.primary_node)
-
+    pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
+                                          for i in self.wanted_instances)
+    for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
       if self.op.static or pnode.offline:
         remote_state = None
         if pnode.offline:
@@ -11059,6 +11201,147 @@ class LUInstanceSetParams(LogicalUnit):
     }
 
 
+class LUInstanceChangeGroup(LogicalUnit):
+  HPATH = "instance-change-group"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.share_locks = _ShareAll()
+    self.needed_locks = {
+      locking.LEVEL_NODEGROUP: [],
+      locking.LEVEL_NODE: [],
+      }
+
+    self._ExpandAndLockInstance()
+
+    if self.op.target_groups:
+      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
+                                  self.op.target_groups)
+    else:
+      self.req_target_uuids = None
+
+    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODEGROUP:
+      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
+
+      if self.req_target_uuids:
+        lock_groups = set(self.req_target_uuids)
+
+        # Lock all groups used by instance optimistically; this requires going
+        # via the node before it's locked, requiring verification later on
+        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+        lock_groups.update(instance_groups)
+      else:
+        # No target groups, need to lock all of them
+        lock_groups = locking.ALL_SET
+
+      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
+
+    elif level == locking.LEVEL_NODE:
+      if self.req_target_uuids:
+        # Lock all nodes used by instances
+        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+        self._LockInstancesNodes()
+
+        # Lock all nodes in all potential target groups
+        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
+                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
+        member_nodes = [node_name
+                        for group in lock_groups
+                        for node_name in self.cfg.GetNodeGroup(group).members]
+        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
+      else:
+        # Lock all nodes as all groups are potential targets
+        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+  def CheckPrereq(self):
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
+    assert (self.req_target_uuids is None or
+            owned_groups.issuperset(self.req_target_uuids))
+    assert owned_instances == set([self.op.instance_name])
+
+    # Get instance information
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+
+    # Check if node groups for locked instance are still correct
+    assert owned_nodes.issuperset(self.instance.all_nodes), \
+      ("Instance %s's nodes changed while we kept the lock" %
+       self.op.instance_name)
+
+    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+                                           owned_groups)
+
+    if self.req_target_uuids:
+      # User requested specific target groups
+      self.target_uuids = self.req_target_uuids
+    else:
+      # All groups except those used by the instance are potential targets
+      self.target_uuids = owned_groups - inst_groups
+
+    conflicting_groups = self.target_uuids & inst_groups
+    if conflicting_groups:
+      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
+                                 " used by the instance '%s'" %
+                                 (utils.CommaJoin(conflicting_groups),
+                                  self.op.instance_name),
+                                 errors.ECODE_INVAL)
+
+    if not self.target_uuids:
+      raise errors.OpPrereqError("There are no possible target groups",
+                                 errors.ECODE_INVAL)
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    assert self.target_uuids
+
+    env = {
+      "TARGET_GROUPS": " ".join(self.target_uuids),
+      }
+
+    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def Exec(self, feedback_fn):
+    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
+
+    assert instances == [self.op.instance_name], "Instance not locked"
+
+    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
+                     instances=instances, target_groups=list(self.target_uuids))
+
+    ial.Run(self.op.iallocator)
+
+    if not ial.success:
+      raise errors.OpPrereqError("Can't compute solution for changing group of"
+                                 " instance '%s' using iallocator '%s': %s" %
+                                 (self.op.instance_name, self.op.iallocator,
+                                  ial.info),
+                                 errors.ECODE_NORES)
+
+    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+
+    self.LogInfo("Iallocator returned %s job(s) for changing group of"
+                 " instance '%s'", len(jobs), self.op.instance_name)
+
+    return ResultWithJobs(jobs)
+
+
 class LUBackupQuery(NoHooksLU):
   """Query the exports list
 
@@ -11083,7 +11366,7 @@ class LUBackupQuery(NoHooksLU):
         that node.
 
     """
-    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    self.nodes = self.owned_locks(locking.LEVEL_NODE)
     rpcresult = self.rpc.call_export_list(self.nodes)
     result = {}
     for node in rpcresult:
@@ -11466,7 +11749,7 @@ class LUBackupRemove(NoHooksLU):
       fqdn_warn = True
       instance_name = self.op.instance_name
 
-    locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    locked_nodes = self.owned_locks(locking.LEVEL_NODE)
     exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
     for node in exportlist:
@@ -11586,12 +11869,12 @@ class LUGroupAssignNodes(NoHooksLU):
 
     """
     assert self.needed_locks[locking.LEVEL_NODEGROUP]
-    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
             frozenset(self.op.nodes))
 
     expected_locks = (set([self.group_uuid]) |
                       self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
-    actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
     if actual_locks != expected_locks:
       raise errors.OpExecError("Nodes changed groups since locks were acquired,"
                                " current groups are '%s', used to be '%s'" %
@@ -11779,6 +12062,9 @@ class LUGroupQuery(NoHooksLU):
   def ExpandNames(self):
     self.gq.ExpandNames(self)
 
+  def DeclareLocks(self, level):
+    self.gq.DeclareLocks(self, level)
+
   def Exec(self, feedback_fn):
     return self.gq.OldStyleQuery(self)
 
@@ -12016,14 +12302,7 @@ class LUGroupEvacuate(LogicalUnit):
                                   utils.CommaJoin(self.req_target_uuids)),
                                  errors.ECODE_INVAL)
 
-    if not self.op.iallocator:
-      # Use default iallocator
-      self.op.iallocator = self.cfg.GetDefaultIAllocator()
-
-    if not self.op.iallocator:
-      raise errors.OpPrereqError("No iallocator was specified, neither in the"
-                                 " opcode nor as a cluster-wide default",
-                                 errors.ECODE_INVAL)
+    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
 
     self.share_locks = _ShareAll()
     self.needed_locks = {
@@ -12051,7 +12330,7 @@ class LUGroupEvacuate(LogicalUnit):
         # via the node before it's locked, requiring verification later on
         lock_groups.update(group_uuid
                            for instance_name in
-                             self.glm.list_owned(locking.LEVEL_INSTANCE)
+                             self.owned_locks(locking.LEVEL_INSTANCE)
                            for group_uuid in
                              self.cfg.GetInstanceNodeGroups(instance_name))
       else:
@@ -12066,52 +12345,39 @@ class LUGroupEvacuate(LogicalUnit):
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
       self._LockInstancesNodes()
 
-      # Lock all nodes in group to be evacuated
-      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
-      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+      # Lock all nodes in group to be evacuated and target groups
+      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+      assert self.group_uuid in owned_groups
+      member_nodes = [node_name
+                      for group in owned_groups
+                      for node_name in self.cfg.GetNodeGroup(group).members]
       self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert owned_groups.issuperset(self.req_target_uuids)
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
-    if owned_instances != wanted_instances:
-      raise errors.OpPrereqError("Instances in node group to be evacuated (%s)"
-                                 " changed since locks were acquired, wanted"
-                                 " %s, have %s; retry the operation" %
-                                 (self.group_uuid,
-                                  utils.CommaJoin(wanted_instances),
-                                  utils.CommaJoin(owned_instances)),
-                                 errors.ECODE_STATE)
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
-    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
-                          for name in owned_instances)
+    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
     for instance_name in owned_instances:
       inst = self.instances[instance_name]
-      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
       assert owned_nodes.issuperset(inst.all_nodes), \
         "Instance %s's nodes changed while we kept the lock" % instance_name
 
-      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
-      if not owned_groups.issuperset(inst_groups):
-        raise errors.OpPrereqError("Instance %s's node groups changed since"
-                                   " locks were acquired, current groups"
-                                   " are '%s', owning groups '%s'; retry the"
-                                   " operation" %
-                                   (instance_name,
-                                    utils.CommaJoin(inst_groups),
-                                    utils.CommaJoin(owned_groups)),
-                                   errors.ECODE_STATE)
+      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
+                                             owned_groups)
+
+      assert self.group_uuid in inst_groups, \
+        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -12122,7 +12388,8 @@ class LUGroupEvacuate(LogicalUnit):
                            if group_uuid != self.group_uuid]
 
       if not self.target_uuids:
-        raise errors.OpExecError("There are no possible target groups")
+        raise errors.OpPrereqError("There are no possible target groups",
+                                   errors.ECODE_INVAL)
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -12139,14 +12406,14 @@ class LUGroupEvacuate(LogicalUnit):
     """
     mn = self.cfg.GetMasterNode()
 
-    assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
 
     run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
 
     return (run_nodes, run_nodes)
 
   def Exec(self, feedback_fn):
-    instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
+    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
 
     assert self.group_uuid not in self.target_uuids
 
@@ -12541,7 +12808,6 @@ class IAllocator(object):
     self.hypervisor = None
     self.relocate_from = None
     self.name = None
-    self.evac_nodes = None
     self.instances = None
     self.evac_mode = None
     self.target_groups = []
@@ -12823,15 +13089,6 @@ class IAllocator(object):
       }
     return request
 
-  def _AddEvacuateNodes(self):
-    """Add evacuate nodes data to allocator structure.
-
-    """
-    request = {
-      "evac_nodes": self.evac_nodes
-      }
-    return request
-
   def _AddNodeEvacuate(self):
     """Get data for node-evacuate requests.
 
@@ -12912,9 +13169,6 @@ class IAllocator(object):
       (_AddRelocateInstance,
        [("name", ht.TString), ("relocate_from", _STRING_LIST)],
        ht.TList),
-    constants.IALLOCATOR_MODE_MEVAC:
-      (_AddEvacuateNodes, [("evac_nodes", _STRING_LIST)],
-       ht.TListOf(ht.TAnd(ht.TIsLength(2), _STRING_LIST))),
      constants.IALLOCATOR_MODE_NODE_EVAC:
       (_AddNodeEvacuate, [
         ("instances", _STRING_LIST),
@@ -12973,39 +13227,25 @@ class IAllocator(object):
                                (self._result_check, self.result),
                                errors.ECODE_INVAL)
 
-    if self.mode in (constants.IALLOCATOR_MODE_RELOC,
-                     constants.IALLOCATOR_MODE_MEVAC):
+    if self.mode == constants.IALLOCATOR_MODE_RELOC:
+      assert self.relocate_from is not None
+      assert self.required_nodes == 1
+
       node2group = dict((name, ndata["group"])
                         for (name, ndata) in self.in_data["nodes"].items())
 
       fn = compat.partial(self._NodesToGroups, node2group,
                           self.in_data["nodegroups"])
 
-      if self.mode == constants.IALLOCATOR_MODE_RELOC:
-        assert self.relocate_from is not None
-        assert self.required_nodes == 1
-
-        request_groups = fn(self.relocate_from)
-        result_groups = fn(rdict["result"])
-
-        if result_groups != request_groups:
-          raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
-                                   " differ from original groups (%s)" %
-                                   (utils.CommaJoin(result_groups),
-                                    utils.CommaJoin(request_groups)))
-      elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
-        request_groups = fn(self.evac_nodes)
-        for (instance_name, secnode) in self.result:
-          result_groups = fn([secnode])
-          if result_groups != request_groups:
-            raise errors.OpExecError("Iallocator returned new secondary node"
-                                     " '%s' (group '%s') for instance '%s'"
-                                     " which is not in original group '%s'" %
-                                     (secnode, utils.CommaJoin(result_groups),
-                                      instance_name,
-                                      utils.CommaJoin(request_groups)))
-      else:
-        raise errors.ProgrammerError("Unhandled mode '%s'" % self.mode)
+      instance = self.cfg.GetInstanceInfo(self.name)
+      request_groups = fn(self.relocate_from + [instance.primary_node])
+      result_groups = fn(rdict["result"] + [instance.primary_node])
+
+      if self.success and not set(result_groups).issubset(request_groups):
+        raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
+                                 " differ from original groups (%s)" %
+                                 (utils.CommaJoin(result_groups),
+                                  utils.CommaJoin(request_groups)))
 
     elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
       assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
@@ -13087,11 +13327,8 @@ class LUTestAllocator(NoHooksLU):
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       fname = _ExpandInstanceName(self.cfg, self.op.name)
       self.op.name = fname
-      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
-    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
-      if not hasattr(self.op, "evac_nodes"):
-        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
-                                   " opcode input", errors.ECODE_INVAL)
+      self.relocate_from = \
+          list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
     elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
                           constants.IALLOCATOR_MODE_NODE_EVAC):
       if not self.op.instances:
@@ -13132,10 +13369,6 @@ class LUTestAllocator(NoHooksLU):
                        name=self.op.name,
                        relocate_from=list(self.relocate_from),
                        )
-    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
-      ial = IAllocator(self.cfg, self.rpc,
-                       mode=self.op.mode,
-                       evac_nodes=self.op.evac_nodes)
     elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
       ial = IAllocator(self.cfg, self.rpc,
                        mode=self.op.mode,