Allow per-hypervisor optional files
[ganeti-local] / lib / cmdlib.py
index 3ca19b7..daec49d 100644 (file)
@@ -21,7 +21,7 @@
 
 """Module implementing the master-side code."""
 
-# pylint: disable-msg=W0201,C0302
+# pylint: disable=W0201,C0302
 
 # W0201 since most LU attributes are defined in CheckPrereq or similar
 # functions
@@ -60,7 +60,7 @@ from ganeti import qlang
 from ganeti import opcodes
 from ganeti import ht
 
-import ganeti.masterd.instance # pylint: disable-msg=W0611
+import ganeti.masterd.instance # pylint: disable=W0611
 
 
 class ResultWithJobs:
@@ -119,6 +119,8 @@ class LogicalUnit(object):
     self.op = op
     self.cfg = context.cfg
     self.glm = context.glm
+    # readability alias
+    self.owned_locks = context.glm.list_owned
     self.context = context
     self.rpc = rpc
     # Dicts used to declare locking needs to mcpu
@@ -129,10 +131,10 @@ class LogicalUnit(object):
     # Used to force good behavior when calling helper functions
     self.recalculate_locks = {}
     # logging
-    self.Log = processor.Log # pylint: disable-msg=C0103
-    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
-    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
-    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
+    self.Log = processor.Log # pylint: disable=C0103
+    self.LogWarning = processor.LogWarning # pylint: disable=C0103
+    self.LogInfo = processor.LogInfo # pylint: disable=C0103
+    self.LogStep = processor.LogStep # pylint: disable=C0103
     # support for dry-run
     self.dry_run_result = None
     # support for generic debug attribute
@@ -320,7 +322,7 @@ class LogicalUnit(object):
     """
     # API must be kept, thus we ignore the unused argument and could
     # be a function warnings
-    # pylint: disable-msg=W0613,R0201
+    # pylint: disable=W0613,R0201
     return lu_result
 
   def _ExpandAndLockInstance(self):
@@ -374,8 +376,8 @@ class LogicalUnit(object):
     # future we might want to have different behaviors depending on the value
     # of self.recalculate_locks[locking.LEVEL_NODE]
     wanted_nodes = []
-    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
-      instance = self.context.cfg.GetInstanceInfo(instance_name)
+    locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
       wanted_nodes.append(instance.primary_node)
       if not primary_only:
         wanted_nodes.extend(instance.secondary_nodes)
@@ -388,7 +390,7 @@ class LogicalUnit(object):
     del self.recalculate_locks[locking.LEVEL_NODE]
 
 
-class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
+class NoHooksLU(LogicalUnit): # pylint: disable=W0223
   """Simple LU which runs no hooks.
 
   This LU is intended as a parent for other LogicalUnits which will
@@ -488,7 +490,7 @@ class _QueryBase:
 
     """
     if self.do_locking:
-      names = lu.glm.list_owned(lock_level)
+      names = lu.owned_locks(lock_level)
     else:
       names = all_names
 
@@ -555,6 +557,56 @@ def _ShareAll():
   return dict.fromkeys(locking.LEVELS, 1)
 
 
+def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
+  """Checks if the owned node groups are still correct for an instance.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type instance_name: string
+  @param instance_name: Instance name
+  @type owned_groups: set or frozenset
+  @param owned_groups: List of currently owned node groups
+
+  """
+  inst_groups = cfg.GetInstanceNodeGroups(instance_name)
+
+  if not owned_groups.issuperset(inst_groups):
+    raise errors.OpPrereqError("Instance %s's node groups changed since"
+                               " locks were acquired, current groups are"
+                               " are '%s', owning groups '%s'; retry the"
+                               " operation" %
+                               (instance_name,
+                                utils.CommaJoin(inst_groups),
+                                utils.CommaJoin(owned_groups)),
+                               errors.ECODE_STATE)
+
+  return inst_groups
+
+
+def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
+  """Checks if the instances in a node group are still correct.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type group_uuid: string
+  @param group_uuid: Node group UUID
+  @type owned_instances: set or frozenset
+  @param owned_instances: List of currently owned instances
+
+  """
+  wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
+  if owned_instances != wanted_instances:
+    raise errors.OpPrereqError("Instances in node group '%s' changed since"
+                               " locks were acquired, wanted '%s', have '%s';"
+                               " retry the operation" %
+                               (group_uuid,
+                                utils.CommaJoin(wanted_instances),
+                                utils.CommaJoin(owned_instances)),
+                               errors.ECODE_STATE)
+
+  return wanted_instances
+
+
 def _SupportsOob(cfg, node):
   """Tells if node supports OOB.
 
@@ -665,18 +717,18 @@ def _ReleaseLocks(lu, level, names=None, keep=None):
     release = []
 
     # Determine which locks to release
-    for name in lu.glm.list_owned(level):
+    for name in lu.owned_locks(level):
       if should_release(name):
         release.append(name)
       else:
         retain.append(name)
 
-    assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
+    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
 
     # Release just some locks
     lu.glm.release(level, names=release)
 
-    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
+    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
   else:
     # Release everything
     lu.glm.release(level)
@@ -705,7 +757,7 @@ def _RunPostHook(lu, node_name):
   try:
     hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
   except:
-    # pylint: disable-msg=W0702
+    # pylint: disable=W0702
     lu.LogWarning("Errors occurred running hooks on %s" % node_name)
 
 
@@ -1035,7 +1087,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   }
   if override:
     args.update(override)
-  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
+  return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
 
 
 def _AdjustCandidatePool(lu, exceptions):
@@ -1201,6 +1253,29 @@ def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
                                  " iallocator")
 
 
+def _GetDefaultIAllocator(cfg, iallocator):
+  """Decides on which iallocator to use.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration object
+  @type iallocator: string or None
+  @param iallocator: Iallocator specified in opcode
+  @rtype: string
+  @return: Iallocator name
+
+  """
+  if not iallocator:
+    # Use default iallocator
+    iallocator = cfg.GetDefaultIAllocator()
+
+  if not iallocator:
+    raise errors.OpPrereqError("No iallocator was specified, neither in the"
+                               " opcode nor as a cluster-wide default",
+                               errors.ECODE_INVAL)
+
+  return iallocator
+
+
 class LUClusterPostInit(LogicalUnit):
   """Logical unit for running hooks after cluster initialization.
 
@@ -1280,7 +1355,7 @@ class LUClusterDestroy(LogicalUnit):
     # Run post hooks on master node before it's removed
     _RunPostHook(self, master)
 
-    result = self.rpc.call_node_stop_master(master, False)
+    result = self.rpc.call_node_deactivate_master_ip(master)
     result.Raise("Could not disable the master role")
 
     return master
@@ -1296,7 +1371,7 @@ def _VerifyCertificate(filename):
   try:
     cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
                                            utils.ReadFile(filename))
-  except Exception, err: # pylint: disable-msg=W0703
+  except Exception, err: # pylint: disable=W0703
     return (LUClusterVerifyConfig.ETYPE_ERROR,
             "Failed to load X509 certificate %s: %s" % (filename, err))
 
@@ -1411,7 +1486,7 @@ class _VerifyErrors(object):
     if args:
       msg = msg % args
     # then format the whole message
-    if self.op.error_codes: # This is a mix-in. pylint: disable-msg=E1101
+    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
       msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
     else:
       if item:
@@ -1420,14 +1495,14 @@ class _VerifyErrors(object):
         item = ""
       msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
     # and finally report it via the feedback_fn
-    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable-msg=E1101
+    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
 
   def _ErrorIf(self, cond, *args, **kwargs):
     """Log an error message if the passed condition is True.
 
     """
     cond = (bool(cond)
-            or self.op.debug_simulate_errors) # pylint: disable-msg=E1101
+            or self.op.debug_simulate_errors) # pylint: disable=E1101
     if cond:
       self._Error(*args, **kwargs)
     # do not mark the operation as failed for WARN cases only
@@ -1435,6 +1510,47 @@ class _VerifyErrors(object):
       self.bad = self.bad or cond
 
 
+class LUClusterVerify(NoHooksLU):
+  """Submits all jobs necessary to verify the cluster.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+
+  def Exec(self, feedback_fn):
+    jobs = []
+
+    if self.op.group_name:
+      groups = [self.op.group_name]
+      depends_fn = lambda: None
+    else:
+      groups = self.cfg.GetNodeGroupList()
+
+      # Verify global configuration
+      jobs.append([opcodes.OpClusterVerifyConfig()])
+
+      # Always depend on global verification
+      depends_fn = lambda: [(-len(jobs), [])]
+
+    jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
+                                              depends=depends_fn())]
+                for group in groups)
+
+    # Fix up all parameters
+    for op in itertools.chain(*jobs): # pylint: disable=W0142
+      op.debug_simulate_errors = self.op.debug_simulate_errors
+      op.verbose = self.op.verbose
+      op.error_codes = self.op.error_codes
+      try:
+        op.skip_checks = self.op.skip_checks
+      except AttributeError:
+        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
+
+    return ResultWithJobs(jobs)
+
+
 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
   """Verifies the cluster config.
 
@@ -1458,6 +1574,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
   def ExpandNames(self):
     # Information can be safely retrieved as the BGL is acquired in exclusive
     # mode
+    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
     self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
@@ -1519,7 +1636,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
                   "the following instances have a non-existing primary-node:"
                   " %s", utils.CommaJoin(no_node_instances))
 
-    return (not self.bad, [g.name for g in self.all_group_info.values()])
+    return not self.bad
 
 
 class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
@@ -1609,7 +1726,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # volumes for these instances are healthy, we will need to do an
       # extra call to their secondaries. We ensure here those nodes will
       # be locked.
-      for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
+      for inst in self.owned_locks(locking.LEVEL_INSTANCE):
         # Important: access only the instances whose lock is owned
         if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
           nodes.update(all_inst_info[inst].secondary_nodes)
@@ -1617,14 +1734,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self.needed_locks[locking.LEVEL_NODE] = nodes
 
   def CheckPrereq(self):
-    group_nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
+    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
+    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
+
+    group_nodes = set(self.group_info.members)
     group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
 
     unlocked_nodes = \
-        group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     unlocked_instances = \
-        group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
+        group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
 
     if unlocked_nodes:
       raise errors.OpPrereqError("Missing lock for nodes: %s" %
@@ -1658,7 +1778,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
             extra_lv_nodes.add(nname)
 
     unlocked_lv_nodes = \
-        extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     if unlocked_lv_nodes:
       raise errors.OpPrereqError("these nodes could be locked: %s" %
@@ -1681,7 +1801,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # main result, nresult should be a non-empty dict
     test = not nresult or not isinstance(nresult, dict)
@@ -1750,7 +1870,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     ntime = nresult.get(constants.NV_TIME, None)
     try:
@@ -1783,7 +1903,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       return
 
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # checks vg existence and size > 20G
     vglist = nresult.get(constants.NV_VGLIST, None)
@@ -1820,7 +1940,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       return
 
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     missing = nresult.get(constants.NV_BRIDGES, None)
     test = not isinstance(missing, list)
@@ -1839,7 +1959,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     test = constants.NV_NODELIST not in nresult
     _ErrorIf(test, self.ENODESSH, node,
@@ -1880,7 +2000,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     available on the instance's node.
 
     """
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
     node_current = instanceconfig.primary_node
 
     node_vol_should = {}
@@ -1979,7 +2099,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
   @classmethod
   def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
-                   (files_all, files_all_opt, files_mc, files_vm)):
+                   (files_all, files_opt, files_mc, files_vm)):
     """Verifies file checksums collected from all nodes.
 
     @param errorif: Callback for reporting errors
@@ -1988,26 +2108,33 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param all_nvinfo: RPC results
 
     """
-    node_names = frozenset(node.name for node in nodeinfo if not node.offline)
+    # Define functions determining which nodes to consider for a file
+    files2nodefn = [
+      (files_all, None),
+      (files_mc, lambda node: (node.master_candidate or
+                               node.name == master_node)),
+      (files_vm, lambda node: node.vm_capable),
+      ]
 
-    assert master_node in node_names
-    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
-            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
-           "Found file listed in more than one file list"
+    # Build mapping from filename to list of nodes which should have the file
+    nodefiles = {}
+    for (files, fn) in files2nodefn:
+      if fn is None:
+        filenodes = nodeinfo
+      else:
+        filenodes = filter(fn, nodeinfo)
+      nodefiles.update((filename,
+                        frozenset(map(operator.attrgetter("name"), filenodes)))
+                       for filename in files)
 
-    # Define functions determining which nodes to consider for a file
-    file2nodefn = dict([(filename, fn)
-      for (files, fn) in [(files_all, None),
-                          (files_all_opt, None),
-                          (files_mc, lambda node: (node.master_candidate or
-                                                   node.name == master_node)),
-                          (files_vm, lambda node: node.vm_capable)]
-      for filename in files])
+    assert set(nodefiles) == (files_all | files_mc | files_vm)
 
-    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
+    fileinfo = dict((filename, {}) for filename in nodefiles)
+    ignore_nodes = set()
 
     for node in nodeinfo:
       if node.offline:
+        ignore_nodes.add(node.name)
         continue
 
       nresult = all_nvinfo[node.name]
@@ -2021,13 +2148,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       errorif(test, cls.ENODEFILECHECK, node.name,
               "Node did not return file checksum data")
       if test:
+        ignore_nodes.add(node.name)
         continue
 
+      # Build per-checksum mapping from filename to nodes having it
       for (filename, checksum) in node_files.items():
-        # Check if the file should be considered for a node
-        fn = file2nodefn[filename]
-        if fn is None or fn(node):
-          fileinfo[filename].setdefault(checksum, set()).add(node.name)
+        assert filename in nodefiles
+        fileinfo[filename].setdefault(checksum, set()).add(node.name)
 
     for (filename, checksums) in fileinfo.items():
       assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
@@ -2035,23 +2162,33 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # Nodes having the file
       with_file = frozenset(node_name
                             for nodes in fileinfo[filename].values()
-                            for node_name in nodes)
+                            for node_name in nodes) - ignore_nodes
+
+      expected_nodes = nodefiles[filename] - ignore_nodes
 
       # Nodes missing file
-      missing_file = node_names - with_file
+      missing_file = expected_nodes - with_file
 
-      if filename in files_all_opt:
+      if filename in files_opt:
         # All or no nodes
-        errorif(missing_file and missing_file != node_names,
+        errorif(missing_file and missing_file != expected_nodes,
                 cls.ECLUSTERFILECHECK, None,
                 "File %s is optional, but it must exist on all or no"
                 " nodes (not found on %s)",
                 filename, utils.CommaJoin(utils.NiceSort(missing_file)))
       else:
+        # Non-optional files
         errorif(missing_file, cls.ECLUSTERFILECHECK, None,
                 "File %s is missing from node(s) %s", filename,
                 utils.CommaJoin(utils.NiceSort(missing_file)))
 
+        # Warn if a node has a file it shouldn't
+        unexpected = with_file - expected_nodes
+        errorif(unexpected,
+                cls.ECLUSTERFILECHECK, None,
+                "File %s should not exist on node(s) %s",
+                filename, utils.CommaJoin(utils.NiceSort(unexpected)))
+
       # See if there are multiple versions of the file
       test = len(checksums) > 1
       if test:
@@ -2080,7 +2217,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     if drbd_helper:
       helper_result = nresult.get(constants.NV_DRBDHELPER, None)
@@ -2139,7 +2276,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     remote_os = nresult.get(constants.NV_OSLIST, None)
     test = (not isinstance(remote_os, list) or
@@ -2180,7 +2317,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
 
@@ -2250,7 +2387,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     nimg.lvm_fail = True
     lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
@@ -2298,7 +2435,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node = ninfo.name
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # try to read free memory (from the hypervisor)
     hv_info = nresult.get(constants.NV_HVINFO, None)
@@ -2340,7 +2477,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         list of tuples (success, payload)
 
     """
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     node_disks = {}
     node_disks_devonly = {}
@@ -2422,6 +2559,40 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     return instdisk
 
+  @staticmethod
+  def _SshNodeSelector(group_uuid, all_nodes):
+    """Create endless iterators for all potential SSH check hosts.
+
+    """
+    nodes = [node for node in all_nodes
+             if (node.group != group_uuid and
+                 not node.offline)]
+    keyfunc = operator.attrgetter("group")
+
+    return map(itertools.cycle,
+               [sorted(map(operator.attrgetter("name"), names))
+                for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
+                                                  keyfunc)])
+
+  @classmethod
+  def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
+    """Choose which nodes should talk to which other nodes.
+
+    We will make nodes contact all nodes in their group, and one node from
+    every other group.
+
+    @warning: This algorithm has a known issue if one node group is much
+      smaller than others (e.g. just one node). In such a case all other
+      nodes will talk to the single node.
+
+    """
+    online_nodes = sorted(node.name for node in group_nodes if not node.offline)
+    sel = cls._SshNodeSelector(group_uuid, all_nodes)
+
+    return (online_nodes,
+            dict((name, sorted([i.next() for i in sel]))
+                 for name in online_nodes))
+
   def BuildHooksEnv(self):
     """Build hooks env.
 
@@ -2448,7 +2619,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     """Verify integrity of the node group, performing various test on nodes.
 
     """
-    # This method has too many local variables. pylint: disable-msg=R0914
+    # This method has too many local variables. pylint: disable=R0914
+    feedback_fn("* Verifying group '%s'" % self.group_info.name)
 
     if not self.my_node_names:
       # empty node group
@@ -2456,7 +2628,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       return True
 
     self.bad = False
-    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
+    _ErrorIf = self._ErrorIf # pylint: disable=C0103
     verbose = self.op.verbose
     self._feedback_fn = feedback_fn
 
@@ -2484,25 +2656,14 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
 
-    # We will make nodes contact all nodes in their group, and one node from
-    # every other group.
-    # TODO: should it be a *random* node, different every time?
-    online_nodes = [node.name for node in node_data_list if not node.offline]
-    other_group_nodes = {}
-
-    for name in sorted(self.all_node_info):
-      node = self.all_node_info[name]
-      if (node.group not in other_group_nodes
-          and node.group != self.group_uuid
-          and not node.offline):
-        other_group_nodes[node.group] = node.name
-
     node_verify_param = {
       constants.NV_FILELIST:
         utils.UniqueSequence(filename
                              for files in filemap
                              for filename in files),
-      constants.NV_NODELIST: online_nodes + other_group_nodes.values(),
+      constants.NV_NODELIST:
+        self._SelectSshCheckNodes(node_data_list, self.group_uuid,
+                                  self.all_node_info.values()),
       constants.NV_HYPERVISOR: hypervisors,
       constants.NV_HVPARAMS:
         _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
@@ -2852,10 +3013,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         self._ErrorIf(test, self.ENODEHOOKS, node_name,
                       "Communication failure in hooks execution: %s", msg)
         if res.offline or msg:
-          # No need to investigate payload if node is offline or gave an error.
-          # override manually lu_result here as _ErrorIf only
-          # overrides self.bad
-          lu_result = 1
+          # No need to investigate payload if node is offline or gave
+          # an error.
           continue
         for script, hkr, output in res.payload:
           test = hkr == constants.HKR_FAIL
@@ -2864,7 +3023,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           if test:
             output = self._HOOKS_INDENT_RE.sub("      ", output)
             feedback_fn("%s" % output)
-            lu_result = 0
+            lu_result = False
 
     return lu_result
 
@@ -2882,7 +3041,7 @@ class LUClusterVerifyDisks(NoHooksLU):
       }
 
   def Exec(self, feedback_fn):
-    group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
 
     # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
     return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
@@ -2924,10 +3083,8 @@ class LUGroupVerifyDisks(NoHooksLU):
             # going via the node before it's locked, requiring verification
             # later on
             [group_uuid
-             for instance_name in
-               self.glm.list_owned(locking.LEVEL_INSTANCE)
-             for group_uuid in
-               self.cfg.GetInstanceNodeGroups(instance_name)])
+             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
 
     elif level == locking.LEVEL_NODE:
       # This will only lock the nodes in the group to be verified which contain
@@ -2936,49 +3093,33 @@ class LUGroupVerifyDisks(NoHooksLU):
       self._LockInstancesNodes()
 
       # Lock all nodes in group to be verified
-      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
       member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
       self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
-    if owned_instances != wanted_instances:
-      raise errors.OpPrereqError("Instances in node group %s changed since"
-                                 " locks were acquired, wanted %s, have %s;"
-                                 " retry the operation" %
-                                 (self.op.group_name,
-                                  utils.CommaJoin(wanted_instances),
-                                  utils.CommaJoin(owned_instances)),
-                                 errors.ECODE_STATE)
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
-    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
-                          for name in owned_instances)
+    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
     for (instance_name, inst) in self.instances.items():
-      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
       assert owned_nodes.issuperset(inst.all_nodes), \
         "Instance %s's nodes changed while we kept the lock" % instance_name
 
-      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
-      if not owned_groups.issuperset(inst_groups):
-        raise errors.OpPrereqError("Instance %s's node groups changed since"
-                                   " locks were acquired, current groups are"
-                                   " are '%s', owning groups '%s'; retry the"
-                                   " operation" %
-                                   (instance_name,
-                                    utils.CommaJoin(inst_groups),
-                                    utils.CommaJoin(owned_groups)),
-                                   errors.ECODE_STATE)
+      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
+                                             owned_groups)
+
+      assert self.group_uuid in inst_groups, \
+        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
@@ -2998,7 +3139,7 @@ class LUGroupVerifyDisks(NoHooksLU):
                                         if inst.admin_up])
 
     if nv_dict:
-      nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
+      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
                              set(self.cfg.GetVmCapableNodeList()))
 
       node_lvs = self.rpc.call_lv_list(nodes, [])
@@ -3059,10 +3200,10 @@ class LUClusterRepairDiskSizes(NoHooksLU):
 
     """
     if self.wanted_names is None:
-      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
 
-    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
-                             in self.wanted_names]
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
   def _EnsureChildSizes(self, disk):
     """Ensure children of the disk have the needed disk size.
@@ -3194,7 +3335,7 @@ class LUClusterRename(LogicalUnit):
 
     # shutdown the master IP
     master = self.cfg.GetMasterNode()
-    result = self.rpc.call_node_stop_master(master, False)
+    result = self.rpc.call_node_deactivate_master_ip(master)
     result.Raise("Could not disable the master role")
 
     try:
@@ -3212,7 +3353,7 @@ class LUClusterRename(LogicalUnit):
         pass
       _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
     finally:
-      result = self.rpc.call_node_start_master(master, False, False)
+      result = self.rpc.call_node_activate_master_ip(master)
       msg = result.fail_msg
       if msg:
         self.LogWarning("Could not re-enable the master role on"
@@ -3284,7 +3425,7 @@ class LUClusterSetParams(LogicalUnit):
                                    " drbd-based instances exist",
                                    errors.ECODE_INVAL)
 
-    node_list = self.glm.list_owned(locking.LEVEL_NODE)
+    node_list = self.owned_locks(locking.LEVEL_NODE)
 
     # if vg_name not None, checks given volume group on all nodes
     if self.op.vg_name:
@@ -3545,7 +3686,7 @@ class LUClusterSetParams(LogicalUnit):
       master = self.cfg.GetMasterNode()
       feedback_fn("Shutting down master ip on the current netdev (%s)" %
                   self.cluster.master_netdev)
-      result = self.rpc.call_node_stop_master(master, False)
+      result = self.rpc.call_node_deactivate_master_ip(master)
       result.Raise("Could not disable the master ip")
       feedback_fn("Changing master_netdev from %s to %s" %
                   (self.cluster.master_netdev, self.op.master_netdev))
@@ -3556,7 +3697,7 @@ class LUClusterSetParams(LogicalUnit):
     if self.op.master_netdev:
       feedback_fn("Starting the master ip on the new master netdev (%s)" %
                   self.op.master_netdev)
-      result = self.rpc.call_node_start_master(master, False, False)
+      result = self.rpc.call_node_activate_master_ip(master)
       if result.fail_msg:
         self.LogWarning("Could not re-enable the master ip on"
                         " the master, please restart manually: %s",
@@ -3589,17 +3730,23 @@ def _ComputeAncillaryFiles(cluster, redist):
     constants.SSH_KNOWN_HOSTS_FILE,
     constants.CONFD_HMAC_KEY,
     constants.CLUSTER_DOMAIN_SECRET_FILE,
+    constants.RAPI_USERS_FILE,
     ])
 
   if not redist:
     files_all.update(constants.ALL_CERT_FILES)
     files_all.update(ssconf.SimpleStore().GetFileList())
+  else:
+    # we need to ship at least the RAPI certificate
+    files_all.add(constants.RAPI_CERT_FILE)
 
   if cluster.modify_etc_hosts:
     files_all.add(constants.ETC_HOSTS)
 
-  # Files which must either exist on all nodes or on none
-  files_all_opt = set([
+  # Files which are optional, these must:
+  # - be present in one other category as well
+  # - either exist or not exist on all nodes of that category (mc, vm all)
+  files_opt = set([
     constants.RAPI_USERS_FILE,
     ])
 
@@ -3611,14 +3758,23 @@ def _ComputeAncillaryFiles(cluster, redist):
   # Files which should only be on VM-capable nodes
   files_vm = set(filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
+    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
+
+  files_opt |= set(filename
+    for hv_name in cluster.enabled_hypervisors
+    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
 
-  # Filenames must be unique
-  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
-          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
+  # Filenames in each category must be unique
+  all_files_set = files_all | files_mc | files_vm
+  assert (len(all_files_set) ==
+          sum(map(len, [files_all, files_mc, files_vm]))), \
          "Found file listed in more than one file list"
 
-  return (files_all, files_all_opt, files_mc, files_vm)
+  # Optional files must be present in one other category
+  assert all_files_set.issuperset(files_opt), \
+         "Optional file not in a different required list"
+
+  return (files_all, files_opt, files_mc, files_vm)
 
 
 def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
@@ -3652,7 +3808,7 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
       nodelist.remove(master_info.name)
 
   # Gather file lists
-  (files_all, files_all_opt, files_mc, files_vm) = \
+  (files_all, _, files_mc, files_vm) = \
     _ComputeAncillaryFiles(cluster, True)
 
   # Never re-distribute configuration file from here
@@ -3662,7 +3818,6 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
 
   filemap = [
     (online_nodes, files_all),
-    (online_nodes, files_all_opt),
     (vm_nodes, files_vm),
     ]
 
@@ -3694,6 +3849,30 @@ class LUClusterRedistConf(NoHooksLU):
     _RedistributeAncillaryFiles(self)
 
 
+class LUClusterActivateMasterIp(NoHooksLU):
+  """Activate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Activate the master IP.
+
+    """
+    master = self.cfg.GetMasterNode()
+    self.rpc.call_node_activate_master_ip(master)
+
+
+class LUClusterDeactivateMasterIp(NoHooksLU):
+  """Deactivate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Deactivate the master IP.
+
+    """
+    master = self.cfg.GetMasterNode()
+    self.rpc.call_node_deactivate_master_ip(master)
+
+
 def _WaitForSync(lu, instance, disks=None, oneshot=False):
   """Sleep and poll for an instance's disk to sync.
 
@@ -3982,6 +4161,7 @@ class LUOobCommand(NoHooksLU):
       raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
                                utils.CommaJoin(errs))
 
+
 class _OsQuery(_QueryBase):
   FIELDS = query.OS_FIELDS
 
@@ -4189,15 +4369,12 @@ class LUNodeRemove(LogicalUnit):
     node = self.cfg.GetNodeInfo(self.op.node_name)
     assert node is not None
 
-    instance_list = self.cfg.GetInstanceList()
-
     masternode = self.cfg.GetMasterNode()
     if node.name == masternode:
       raise errors.OpPrereqError("Node is the master node, failover to another"
                                  " node is required", errors.ECODE_INVAL)
 
-    for instance_name in instance_list:
-      instance = self.cfg.GetInstanceInfo(instance_name)
+    for instance_name, instance in self.cfg.GetAllInstancesInfo():
       if node.name in instance.all_nodes:
         raise errors.OpPrereqError("Instance %s is still running on the node,"
                                    " please remove first" % instance_name,
@@ -4243,7 +4420,7 @@ class _NodeQuery(_QueryBase):
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks[locking.LEVEL_NODE] = 1
+    lu.share_locks = _ShareAll()
 
     if self.names:
       self.wanted = _GetWantedNodes(lu, self.names)
@@ -4254,7 +4431,7 @@ class _NodeQuery(_QueryBase):
                        query.NQ_LIVE in self.requested_data)
 
     if self.do_locking:
-      # if we don't request only static fields, we need to lock the nodes
+      # If any non-static field is requested we need to lock the nodes
       lu.needed_locks[locking.LEVEL_NODE] = self.wanted
 
   def DeclareLocks(self, lu, level):
@@ -4318,7 +4495,7 @@ class LUNodeQuery(NoHooksLU):
   """Logical unit for querying nodes.
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
   REQ_BGL = False
 
   def CheckArguments(self):
@@ -4358,7 +4535,7 @@ class LUNodeQueryvols(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = self.glm.list_owned(locking.LEVEL_NODE)
+    nodenames = self.owned_locks(locking.LEVEL_NODE)
     volumes = self.rpc.call_node_volumes(nodenames)
 
     ilist = self.cfg.GetAllInstancesInfo()
@@ -4427,7 +4604,7 @@ class LUNodeQueryStorage(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    self.nodes = self.owned_locks(locking.LEVEL_NODE)
 
     # Always get name to sort by
     if constants.SF_NAME in self.op.output_fields:
@@ -4516,30 +4693,19 @@ class _InstanceQuery(_QueryBase):
         # via the node before it's locked, requiring verification later on
         lu.needed_locks[locking.LEVEL_NODEGROUP] = \
           set(group_uuid
-              for instance_name in
-                lu.glm.list_owned(locking.LEVEL_INSTANCE)
-              for group_uuid in
-                lu.cfg.GetInstanceNodeGroups(instance_name))
+              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
       elif level == locking.LEVEL_NODE:
-        lu._LockInstancesNodes() # pylint: disable-msg=W0212
+        lu._LockInstancesNodes() # pylint: disable=W0212
 
   @staticmethod
   def _CheckGroupLocks(lu):
-    owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP))
+    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
 
     # Check if node groups for locked instances are still correct
     for instance_name in owned_instances:
-      inst_groups = lu.cfg.GetInstanceNodeGroups(instance_name)
-      if not owned_groups.issuperset(inst_groups):
-        raise errors.OpPrereqError("Instance %s's node groups changed since"
-                                   " locks were acquired, current groups are"
-                                   " are '%s', owning groups '%s'; retry the"
-                                   " operation" %
-                                   (instance_name,
-                                    utils.CommaJoin(inst_groups),
-                                    utils.CommaJoin(owned_groups)),
-                                   errors.ECODE_STATE)
+      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
 
   def _GetQueryData(self, lu):
     """Computes the list of instances and their attributes.
@@ -4631,13 +4797,13 @@ class LUQuery(NoHooksLU):
   """Query for resources/items of a certain kind.
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
   REQ_BGL = False
 
   def CheckArguments(self):
     qcls = _GetQueryImplementation(self.op.what)
 
-    self.impl = qcls(self.op.filter, self.op.fields, False)
+    self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.impl.ExpandNames(self)
@@ -4653,7 +4819,7 @@ class LUQueryFields(NoHooksLU):
   """Query for resources/items of a certain kind.
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
   REQ_BGL = False
 
   def CheckArguments(self):
@@ -4900,7 +5066,7 @@ class LUNodeAdd(LogicalUnit):
     # later in the procedure; this also means that if the re-add
     # fails, we are left with a non-offlined, broken node
     if self.op.readd:
-      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
+      new_node.drained = new_node.offline = False # pylint: disable=W0201
       self.LogInfo("Readding a node, the offline/drained flags were reset")
       # if we demote the node, we do cleanup later in the procedure
       new_node.master_candidate = self.master_candidate
@@ -4946,7 +5112,7 @@ class LUNodeAdd(LogicalUnit):
 
     node_verify_list = [self.cfg.GetMasterNode()]
     node_verify_param = {
-      constants.NV_NODELIST: [node],
+      constants.NV_NODELIST: ([node], {}),
       # TODO: do a node-net-test as well?
     }
 
@@ -5048,8 +5214,8 @@ class LUNodeSetParams(LogicalUnit):
         instances_keep = []
 
         # Build list of instances to release
-        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
-          instance = self.context.cfg.GetInstanceInfo(instance_name)
+        locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
+        for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
           if (instance.disk_template in constants.DTS_INT_MIRROR and
               self.op.node_name in instance.all_nodes):
             instances_keep.append(instance_name)
@@ -5057,7 +5223,7 @@ class LUNodeSetParams(LogicalUnit):
 
         _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
 
-        assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
+        assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
                 set(instances_keep))
 
   def BuildHooksEnv(self):
@@ -6460,7 +6626,7 @@ class LUInstanceQuery(NoHooksLU):
   """Logical unit for querying instances.
 
   """
-  # pylint: disable-msg=W0142
+  # pylint: disable=W0142
   REQ_BGL = False
 
   def CheckArguments(self):
@@ -6871,7 +7037,7 @@ class LUNodeMigrate(LogicalUnit):
     # running the iallocator and the actual migration, a good consistency model
     # will have to be found.
 
-    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
             frozenset([self.op.node_name]))
 
     return ResultWithJobs(jobs)
@@ -7253,6 +7419,21 @@ class TLMigrateInstance(Tasklet):
     target_node = self.target_node
     source_node = self.source_node
 
+    # Check for hypervisor version mismatch and warn the user.
+    nodeinfo = self.rpc.call_node_info([source_node, target_node],
+                                       None, self.instance.hypervisor)
+    src_info = nodeinfo[source_node]
+    dst_info = nodeinfo[target_node]
+
+    if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
+        (constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
+      src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
+      dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
+      if src_version != dst_version:
+        self.feedback_fn("* warning: hypervisor version mismatch between"
+                         " source (%s) and target (%s) node" %
+                         (src_version, dst_version))
+
     self.feedback_fn("* checking disk consistency between source and target")
     for dev in instance.disks:
       if not _CheckDiskConsistency(self.lu, dev, target_node, False):
@@ -7861,7 +8042,7 @@ def _ComputeDiskSizePerVG(disk_template, disks):
 
   if disk_template not in req_size_dict:
     raise errors.ProgrammerError("Disk template '%s' size requirement"
-                                 " is unknown" %  disk_template)
+                                 " is unknown" % disk_template)
 
   return req_size_dict[disk_template]
 
@@ -7883,7 +8064,7 @@ def _ComputeDiskSize(disk_template, disks):
 
   if disk_template not in req_size_dict:
     raise errors.ProgrammerError("Disk template '%s' size requirement"
-                                 " is unknown" %  disk_template)
+                                 " is unknown" % disk_template)
 
   return req_size_dict[disk_template]
 
@@ -8163,8 +8344,8 @@ class LUInstanceCreate(LogicalUnit):
         self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
         self.op.src_node = None
         if os.path.isabs(src_path):
-          raise errors.OpPrereqError("Importing an instance from an absolute"
-                                     " path requires a source node option",
+          raise errors.OpPrereqError("Importing an instance from a path"
+                                     " requires a source node option",
                                      errors.ECODE_INVAL)
       else:
         self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
@@ -8267,7 +8448,7 @@ class LUInstanceCreate(LogicalUnit):
     src_path = self.op.src_path
 
     if src_node is None:
-      locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
       exp_list = self.rpc.call_export_list(locked_nodes)
       found = False
       for node in exp_list:
@@ -8425,7 +8606,7 @@ class LUInstanceCreate(LogicalUnit):
 
       joinargs.append(self.op.instance_name)
 
-      # pylint: disable-msg=W0142
+      # pylint: disable=W0142
       self.instance_file_storage_dir = utils.PathJoin(*joinargs)
 
   def CheckPrereq(self):
@@ -8797,7 +8978,7 @@ class LUInstanceCreate(LogicalUnit):
         # 'fake' LV disks with the old data, plus the new unique_id
         tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
         rename_to = []
-        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
+        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
           rename_to.append(t_dsk.logical_id)
           t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
           self.cfg.SetDiskID(t_dsk, pnode_name)
@@ -8848,7 +9029,6 @@ class LUInstanceCreate(LogicalUnit):
       disk_abort = not _WaitForSync(self, iobj)
     elif iobj.disk_template in constants.DTS_INT_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
-      time.sleep(15)
       feedback_fn("* checking mirrors status")
       disk_abort = not _WaitForSync(self, iobj, oneshot=True)
     else:
@@ -8865,12 +9045,33 @@ class LUInstanceCreate(LogicalUnit):
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
       if self.op.mode == constants.INSTANCE_CREATE:
         if not self.op.no_install:
+          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
+                        not self.op.wait_for_sync)
+          if pause_sync:
+            feedback_fn("* pausing disk sync to install instance OS")
+            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+                                                              iobj.disks, True)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("pause-sync of instance %s for disk %d failed",
+                             instance, idx)
+
           feedback_fn("* running the instance OS create scripts...")
           # FIXME: pass debug option from opcode to backend
-          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
-                                                 self.op.debug_level)
-          result.Raise("Could not add os for instance %s"
-                       " on node %s" % (instance, pnode_name))
+          os_add_result = \
+            self.rpc.call_instance_os_add(pnode_name, iobj, False,
+                                          self.op.debug_level)
+          if pause_sync:
+            feedback_fn("* resuming disk sync")
+            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+                                                              iobj.disks, False)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("resume-sync of instance %s for disk %d failed",
+                             instance, idx)
+
+          os_add_result.Raise("Could not add os for instance %s"
+                              " on node %s" % (instance, pnode_name))
 
       elif self.op.mode == constants.INSTANCE_IMPORT:
         feedback_fn("* running the instance OS import scripts...")
@@ -9075,7 +9276,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
 
         # Lock member nodes of all locked groups
         self.needed_locks[locking.LEVEL_NODE] = [node_name
-          for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+          for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
           for node_name in self.cfg.GetNodeGroup(group_uuid).members]
       else:
         self._LockInstancesNodes()
@@ -9115,16 +9316,9 @@ class LUInstanceReplaceDisks(LogicalUnit):
     assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
             self.op.iallocator is None)
 
-    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
     if owned_groups:
-      groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
-      if owned_groups != groups:
-        raise errors.OpExecError("Node groups used by instance '%s' changed"
-                                 " since lock was acquired, current list is %r,"
-                                 " used to be '%s'" %
-                                 (self.op.instance_name,
-                                  utils.CommaJoin(groups),
-                                  utils.CommaJoin(owned_groups)))
+      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
 
     return LogicalUnit.CheckPrereq(self)
 
@@ -9213,6 +9407,9 @@ class TLReplaceDisks(Tasklet):
     return remote_node_name
 
   def _FindFaultyDisks(self, node_name):
+    """Wrapper for L{_FindFaultyInstanceDisks}.
+
+    """
     return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
                                     node_name, True)
 
@@ -9283,7 +9480,7 @@ class TLReplaceDisks(Tasklet):
     if remote_node is None:
       self.remote_node_info = None
     else:
-      assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
+      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
              "Remote node '%s' is not locked" % remote_node
 
       self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
@@ -9403,13 +9600,13 @@ class TLReplaceDisks(Tasklet):
 
     if __debug__:
       # Verify owned locks before starting operation
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
-      assert set(owned_locks) == set(self.node_secondary_ip), \
+      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
+      assert set(owned_nodes) == set(self.node_secondary_ip), \
           ("Incorrect node locks, owning %s, expected %s" %
-           (owned_locks, self.node_secondary_ip.keys()))
+           (owned_nodes, self.node_secondary_ip.keys()))
 
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
-      assert list(owned_locks) == [self.instance_name], \
+      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
+      assert list(owned_instances) == [self.instance_name], \
           "Instance '%s' not locked" % self.instance_name
 
       assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
@@ -9444,12 +9641,12 @@ class TLReplaceDisks(Tasklet):
 
     if __debug__:
       # Verify owned locks
-      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
+      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
       nodes = frozenset(self.node_secondary_ip)
-      assert ((self.early_release and not owned_locks) or
-              (not self.early_release and not (set(owned_locks) - nodes))), \
+      assert ((self.early_release and not owned_nodes) or
+              (not self.early_release and not (set(owned_nodes) - nodes))), \
         ("Not owning the correct locks, early_release=%s, owned=%r,"
-         " nodes=%r" % (self.early_release, owned_locks, nodes))
+         " nodes=%r" % (self.early_release, owned_nodes, nodes))
 
     return result
 
@@ -9569,7 +9766,7 @@ class TLReplaceDisks(Tasklet):
           self.lu.LogWarning("Can't remove old LV: %s" % msg,
                              hint="remove unused LVs manually")
 
-  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable-msg=W0613
+  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
     """Replace a disk on the primary or secondary for DRBD 8.
 
     The algorithm for replace is quite complicated:
@@ -9726,6 +9923,8 @@ class TLReplaceDisks(Tasklet):
     """
     steps_total = 6
 
+    pnode = self.instance.primary_node
+
     # Step: check device activation
     self.lu.LogStep(1, steps_total, "Check device existence")
     self._CheckDisksExistence([self.instance.primary_node])
@@ -9800,10 +9999,8 @@ class TLReplaceDisks(Tasklet):
                                  " soon as possible"))
 
     self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
-    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
-                                               self.node_secondary_ip,
-                                               self.instance.disks)\
-                                              [self.instance.primary_node]
+    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
+                                               self.instance.disks)[pnode]
 
     msg = result.fail_msg
     if msg:
@@ -10008,9 +10205,9 @@ class LUNodeEvacuate(NoHooksLU):
 
   def CheckPrereq(self):
     # Verify locks
-    owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
-    owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
-    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+    owned_nodes = self.owned_locks(locking.LEVEL_NODE)
+    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
 
     assert owned_nodes == self.lock_nodes
 
@@ -10301,10 +10498,10 @@ class LUInstanceQueryData(NoHooksLU):
     """
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
-      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
 
-    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
-                             for name in self.wanted_names]
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
   def _ComputeBlockdevStatus(self, node, instance_name, dev):
     """Returns the status of a block device
@@ -10722,7 +10919,7 @@ class LUInstanceSetParams(LogicalUnit):
       if msg:
         # Assume the primary node is unreachable and go ahead
         self.warn.append("Can't get info from primary node %s: %s" %
-                         (pnode,  msg))
+                         (pnode, msg))
       elif not isinstance(pninfo.payload.get("memory_free", None), int):
         self.warn.append("Node data from primary node %s doesn't contain"
                          " free memory information" % pnode)
@@ -11097,6 +11294,147 @@ class LUInstanceSetParams(LogicalUnit):
     }
 
 
+class LUInstanceChangeGroup(LogicalUnit):
+  HPATH = "instance-change-group"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.share_locks = _ShareAll()
+    self.needed_locks = {
+      locking.LEVEL_NODEGROUP: [],
+      locking.LEVEL_NODE: [],
+      }
+
+    self._ExpandAndLockInstance()
+
+    if self.op.target_groups:
+      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
+                                  self.op.target_groups)
+    else:
+      self.req_target_uuids = None
+
+    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODEGROUP:
+      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
+
+      if self.req_target_uuids:
+        lock_groups = set(self.req_target_uuids)
+
+        # Lock all groups used by instance optimistically; this requires going
+        # via the node before it's locked, requiring verification later on
+        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+        lock_groups.update(instance_groups)
+      else:
+        # No target groups, need to lock all of them
+        lock_groups = locking.ALL_SET
+
+      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
+
+    elif level == locking.LEVEL_NODE:
+      if self.req_target_uuids:
+        # Lock all nodes used by instances
+        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+        self._LockInstancesNodes()
+
+        # Lock all nodes in all potential target groups
+        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
+                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
+        member_nodes = [node_name
+                        for group in lock_groups
+                        for node_name in self.cfg.GetNodeGroup(group).members]
+        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
+      else:
+        # Lock all nodes as all groups are potential targets
+        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+  def CheckPrereq(self):
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
+    assert (self.req_target_uuids is None or
+            owned_groups.issuperset(self.req_target_uuids))
+    assert owned_instances == set([self.op.instance_name])
+
+    # Get instance information
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+
+    # Check if node groups for locked instance are still correct
+    assert owned_nodes.issuperset(self.instance.all_nodes), \
+      ("Instance %s's nodes changed while we kept the lock" %
+       self.op.instance_name)
+
+    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+                                           owned_groups)
+
+    if self.req_target_uuids:
+      # User requested specific target groups
+      self.target_uuids = self.req_target_uuids
+    else:
+      # All groups except those used by the instance are potential targets
+      self.target_uuids = owned_groups - inst_groups
+
+    conflicting_groups = self.target_uuids & inst_groups
+    if conflicting_groups:
+      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
+                                 " used by the instance '%s'" %
+                                 (utils.CommaJoin(conflicting_groups),
+                                  self.op.instance_name),
+                                 errors.ECODE_INVAL)
+
+    if not self.target_uuids:
+      raise errors.OpPrereqError("There are no possible target groups",
+                                 errors.ECODE_INVAL)
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    assert self.target_uuids
+
+    env = {
+      "TARGET_GROUPS": " ".join(self.target_uuids),
+      }
+
+    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def Exec(self, feedback_fn):
+    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
+
+    assert instances == [self.op.instance_name], "Instance not locked"
+
+    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
+                     instances=instances, target_groups=list(self.target_uuids))
+
+    ial.Run(self.op.iallocator)
+
+    if not ial.success:
+      raise errors.OpPrereqError("Can't compute solution for changing group of"
+                                 " instance '%s' using iallocator '%s': %s" %
+                                 (self.op.instance_name, self.op.iallocator,
+                                  ial.info),
+                                 errors.ECODE_NORES)
+
+    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+
+    self.LogInfo("Iallocator returned %s job(s) for changing group of"
+                 " instance '%s'", len(jobs), self.op.instance_name)
+
+    return ResultWithJobs(jobs)
+
+
 class LUBackupQuery(NoHooksLU):
   """Query the exports list
 
@@ -11121,7 +11459,7 @@ class LUBackupQuery(NoHooksLU):
         that node.
 
     """
-    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    self.nodes = self.owned_locks(locking.LEVEL_NODE)
     rpcresult = self.rpc.call_export_list(self.nodes)
     result = {}
     for node in rpcresult:
@@ -11504,7 +11842,7 @@ class LUBackupRemove(NoHooksLU):
       fqdn_warn = True
       instance_name = self.op.instance_name
 
-    locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+    locked_nodes = self.owned_locks(locking.LEVEL_NODE)
     exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
     for node in exportlist:
@@ -11624,12 +11962,12 @@ class LUGroupAssignNodes(NoHooksLU):
 
     """
     assert self.needed_locks[locking.LEVEL_NODEGROUP]
-    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
             frozenset(self.op.nodes))
 
     expected_locks = (set([self.group_uuid]) |
                       self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
-    actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
     if actual_locks != expected_locks:
       raise errors.OpExecError("Nodes changed groups since locks were acquired,"
                                " current groups are '%s', used to be '%s'" %
@@ -11817,6 +12155,9 @@ class LUGroupQuery(NoHooksLU):
   def ExpandNames(self):
     self.gq.ExpandNames(self)
 
+  def DeclareLocks(self, level):
+    self.gq.DeclareLocks(self, level)
+
   def Exec(self, feedback_fn):
     return self.gq.OldStyleQuery(self)
 
@@ -11895,7 +12236,6 @@ class LUGroupSetParams(LogicalUnit):
     return result
 
 
-
 class LUGroupRemove(LogicalUnit):
   HPATH = "group-remove"
   HTYPE = constants.HTYPE_GROUP
@@ -12054,14 +12394,7 @@ class LUGroupEvacuate(LogicalUnit):
                                   utils.CommaJoin(self.req_target_uuids)),
                                  errors.ECODE_INVAL)
 
-    if not self.op.iallocator:
-      # Use default iallocator
-      self.op.iallocator = self.cfg.GetDefaultIAllocator()
-
-    if not self.op.iallocator:
-      raise errors.OpPrereqError("No iallocator was specified, neither in the"
-                                 " opcode nor as a cluster-wide default",
-                                 errors.ECODE_INVAL)
+    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
 
     self.share_locks = _ShareAll()
     self.needed_locks = {
@@ -12089,7 +12422,7 @@ class LUGroupEvacuate(LogicalUnit):
         # via the node before it's locked, requiring verification later on
         lock_groups.update(group_uuid
                            for instance_name in
-                             self.glm.list_owned(locking.LEVEL_INSTANCE)
+                             self.owned_locks(locking.LEVEL_INSTANCE)
                            for group_uuid in
                              self.cfg.GetInstanceNodeGroups(instance_name))
       else:
@@ -12104,52 +12437,39 @@ class LUGroupEvacuate(LogicalUnit):
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
       self._LockInstancesNodes()
 
-      # Lock all nodes in group to be evacuated
-      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
-      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
+      # Lock all nodes in group to be evacuated and target groups
+      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+      assert self.group_uuid in owned_groups
+      member_nodes = [node_name
+                      for group in owned_groups
+                      for node_name in self.cfg.GetNodeGroup(group).members]
       self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert owned_groups.issuperset(self.req_target_uuids)
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
-    if owned_instances != wanted_instances:
-      raise errors.OpPrereqError("Instances in node group to be evacuated (%s)"
-                                 " changed since locks were acquired, wanted"
-                                 " %s, have %s; retry the operation" %
-                                 (self.group_uuid,
-                                  utils.CommaJoin(wanted_instances),
-                                  utils.CommaJoin(owned_instances)),
-                                 errors.ECODE_STATE)
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
-    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
-                          for name in owned_instances)
+    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
     for instance_name in owned_instances:
       inst = self.instances[instance_name]
-      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
       assert owned_nodes.issuperset(inst.all_nodes), \
         "Instance %s's nodes changed while we kept the lock" % instance_name
 
-      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
-      if not owned_groups.issuperset(inst_groups):
-        raise errors.OpPrereqError("Instance %s's node groups changed since"
-                                   " locks were acquired, current groups"
-                                   " are '%s', owning groups '%s'; retry the"
-                                   " operation" %
-                                   (instance_name,
-                                    utils.CommaJoin(inst_groups),
-                                    utils.CommaJoin(owned_groups)),
-                                   errors.ECODE_STATE)
+      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
+                                             owned_groups)
+
+      assert self.group_uuid in inst_groups, \
+        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -12160,7 +12480,8 @@ class LUGroupEvacuate(LogicalUnit):
                            if group_uuid != self.group_uuid]
 
       if not self.target_uuids:
-        raise errors.OpExecError("There are no possible target groups")
+        raise errors.OpPrereqError("There are no possible target groups",
+                                   errors.ECODE_INVAL)
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -12177,14 +12498,14 @@ class LUGroupEvacuate(LogicalUnit):
     """
     mn = self.cfg.GetMasterNode()
 
-    assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
 
     run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
 
     return (run_nodes, run_nodes)
 
   def Exec(self, feedback_fn):
-    instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
+    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
 
     assert self.group_uuid not in self.target_uuids
 
@@ -12207,7 +12528,7 @@ class LUGroupEvacuate(LogicalUnit):
     return ResultWithJobs(jobs)
 
 
-class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
+class TagsLU(NoHooksLU): # pylint: disable=W0223
   """Generic tags LU.
 
   This is an abstract class which is the parent of all the other tags LUs.
@@ -12467,7 +12788,7 @@ class LUTestJqueue(NoHooksLU):
     # Wait for client to close
     try:
       try:
-        # pylint: disable-msg=E1101
+        # pylint: disable=E1101
         # Instance of '_socketobject' has no ... member
         conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
         conn.recv(1)
@@ -12564,7 +12885,7 @@ class IAllocator(object):
       easy usage
 
   """
-  # pylint: disable-msg=R0902
+  # pylint: disable=R0902
   # lots of instance attributes
 
   def __init__(self, cfg, rpc, mode, **kwargs):
@@ -12579,7 +12900,6 @@ class IAllocator(object):
     self.hypervisor = None
     self.relocate_from = None
     self.name = None
-    self.evac_nodes = None
     self.instances = None
     self.evac_mode = None
     self.target_groups = []
@@ -12861,15 +13181,6 @@ class IAllocator(object):
       }
     return request
 
-  def _AddEvacuateNodes(self):
-    """Add evacuate nodes data to allocator structure.
-
-    """
-    request = {
-      "evac_nodes": self.evac_nodes
-      }
-    return request
-
   def _AddNodeEvacuate(self):
     """Get data for node-evacuate requests.
 
@@ -12911,7 +13222,7 @@ class IAllocator(object):
 
   _STRING_LIST = ht.TListOf(ht.TString)
   _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
-     # pylint: disable-msg=E1101
+     # pylint: disable=E1101
      # Class '...' has no 'OP_ID' member
      "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
                           opcodes.OpInstanceMigrate.OP_ID,
@@ -12950,9 +13261,6 @@ class IAllocator(object):
       (_AddRelocateInstance,
        [("name", ht.TString), ("relocate_from", _STRING_LIST)],
        ht.TList),
-    constants.IALLOCATOR_MODE_MEVAC:
-      (_AddEvacuateNodes, [("evac_nodes", _STRING_LIST)],
-       ht.TListOf(ht.TAnd(ht.TIsLength(2), _STRING_LIST))),
      constants.IALLOCATOR_MODE_NODE_EVAC:
       (_AddNodeEvacuate, [
         ("instances", _STRING_LIST),
@@ -13011,39 +13319,25 @@ class IAllocator(object):
                                (self._result_check, self.result),
                                errors.ECODE_INVAL)
 
-    if self.mode in (constants.IALLOCATOR_MODE_RELOC,
-                     constants.IALLOCATOR_MODE_MEVAC):
+    if self.mode == constants.IALLOCATOR_MODE_RELOC:
+      assert self.relocate_from is not None
+      assert self.required_nodes == 1
+
       node2group = dict((name, ndata["group"])
                         for (name, ndata) in self.in_data["nodes"].items())
 
       fn = compat.partial(self._NodesToGroups, node2group,
                           self.in_data["nodegroups"])
 
-      if self.mode == constants.IALLOCATOR_MODE_RELOC:
-        assert self.relocate_from is not None
-        assert self.required_nodes == 1
-
-        request_groups = fn(self.relocate_from)
-        result_groups = fn(rdict["result"])
-
-        if result_groups != request_groups:
-          raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
-                                   " differ from original groups (%s)" %
-                                   (utils.CommaJoin(result_groups),
-                                    utils.CommaJoin(request_groups)))
-      elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
-        request_groups = fn(self.evac_nodes)
-        for (instance_name, secnode) in self.result:
-          result_groups = fn([secnode])
-          if result_groups != request_groups:
-            raise errors.OpExecError("Iallocator returned new secondary node"
-                                     " '%s' (group '%s') for instance '%s'"
-                                     " which is not in original group '%s'" %
-                                     (secnode, utils.CommaJoin(result_groups),
-                                      instance_name,
-                                      utils.CommaJoin(request_groups)))
-      else:
-        raise errors.ProgrammerError("Unhandled mode '%s'" % self.mode)
+      instance = self.cfg.GetInstanceInfo(self.name)
+      request_groups = fn(self.relocate_from + [instance.primary_node])
+      result_groups = fn(rdict["result"] + [instance.primary_node])
+
+      if self.success and not set(result_groups).issubset(request_groups):
+        raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
+                                 " differ from original groups (%s)" %
+                                 (utils.CommaJoin(result_groups),
+                                  utils.CommaJoin(request_groups)))
 
     elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
       assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
@@ -13127,10 +13421,6 @@ class LUTestAllocator(NoHooksLU):
       self.op.name = fname
       self.relocate_from = \
           list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
-    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
-      if not hasattr(self.op, "evac_nodes"):
-        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
-                                   " opcode input", errors.ECODE_INVAL)
     elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
                           constants.IALLOCATOR_MODE_NODE_EVAC):
       if not self.op.instances:
@@ -13171,10 +13461,6 @@ class LUTestAllocator(NoHooksLU):
                        name=self.op.name,
                        relocate_from=list(self.relocate_from),
                        )
-    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
-      ial = IAllocator(self.cfg, self.rpc,
-                       mode=self.op.mode,
-                       evac_nodes=self.op.evac_nodes)
     elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
       ial = IAllocator(self.cfg, self.rpc,
                        mode=self.op.mode,