Merge branch 'devel-2.5'
[ganeti-local] / lib / cmdlib.py
index 81f21ae..71076b9 100644 (file)
@@ -32,7 +32,6 @@ import os
 import os.path
 import time
 import re
-import platform
 import logging
 import copy
 import OpenSSL
@@ -60,6 +59,7 @@ from ganeti import qlang
 from ganeti import opcodes
 from ganeti import ht
 from ganeti import rpc
+from ganeti import runtime
 
 import ganeti.masterd.instance # pylint: disable=W0611
 
@@ -68,12 +68,15 @@ import ganeti.masterd.instance # pylint: disable=W0611
 DRBD_META_SIZE = 128
 
 # States of instance
-INSTANCE_UP = [constants.ADMINST_UP]
 INSTANCE_DOWN = [constants.ADMINST_DOWN]
-INSTANCE_OFFLINE = [constants.ADMINST_OFFLINE]
 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
 
+#: Instance status in which an instance can be marked as offline/online
+CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
+  constants.ADMINST_OFFLINE,
+  ]))
+
 
 class ResultWithJobs:
   """Data container for LU results with jobs.
@@ -193,9 +196,15 @@ class LogicalUnit(object):
     as values. Rules:
 
       - use an empty dict if you don't need any lock
-      - if you don't need any lock at a particular level omit that level
+      - if you don't need any lock at a particular level omit that
+        level (note that in this case C{DeclareLocks} won't be called
+        at all for that level)
+      - if you need locks at a level, but you can't calculate it in
+        this function, initialise that level with an empty list and do
+        further processing in L{LogicalUnit.DeclareLocks} (see that
+        function's docstring)
       - don't put anything for the BGL level
-      - if you want all locks at a level use locking.ALL_SET as a value
+      - if you want all locks at a level use L{locking.ALL_SET} as a value
 
     If you need to share locks (rather than acquire them exclusively) at one
     level you can modify self.share_locks, setting a true value (usually 1) for
@@ -242,7 +251,7 @@ class LogicalUnit(object):
     self.needed_locks for the level.
 
     @param level: Locking level which is going to be locked
-    @type level: member of ganeti.locking.LEVELS
+    @type level: member of L{ganeti.locking.LEVELS}
 
     """
 
@@ -587,6 +596,32 @@ def _MakeLegacyNodeInfo(data):
     })
 
 
+def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
+                              cur_group_uuid):
+  """Checks if node groups for locked instances are still correct.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
+  @type instances: dict; string as key, L{objects.Instance} as value
+  @param instances: Dictionary, instance name as key, instance object as value
+  @type owned_groups: iterable of string
+  @param owned_groups: List of owned groups
+  @type owned_nodes: iterable of string
+  @param owned_nodes: List of owned nodes
+  @type cur_group_uuid: string or None
+  @param cur_group_uuid: Optional group UUID to check against instance's groups
+
+  """
+  for (name, inst) in instances.items():
+    assert owned_nodes.issuperset(inst.all_nodes), \
+      "Instance %s's nodes changed while we kept the lock" % name
+
+    inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
+
+    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
+      "Instance %s has no node in group %s" % (name, cur_group_uuid)
+
+
 def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
   """Checks if the owned node groups are still correct for an instance.
 
@@ -1084,7 +1119,7 @@ def _ComputeMinMaxSpec(name, ipolicy, value):
 
 
 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
-                                 nic_count, disk_sizes,
+                                 nic_count, disk_sizes, spindle_use,
                                  _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
@@ -1100,6 +1135,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param nic_count: Number of nics used
   @type disk_sizes: list of ints
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
+  @type spindle_use: int
+  @param spindle_use: The number of spindles this instance uses
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
@@ -1111,6 +1148,7 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
     (constants.ISPEC_CPU_COUNT, cpu_count),
     (constants.ISPEC_DISK_COUNT, disk_count),
     (constants.ISPEC_NIC_COUNT, nic_count),
+    (constants.ISPEC_SPINDLE_USE, spindle_use),
     ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
 
   return filter(None,
@@ -1132,12 +1170,13 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   """
   mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
   cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
+  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes)
+                     disk_sizes, spindle_use)
 
 
 def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
@@ -1157,9 +1196,10 @@ def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
   disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
   disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
   nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
+  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes)
+                     disk_sizes, spindle_use)
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
@@ -1871,7 +1911,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
   """Verifies the cluster config.
 
   """
-  REQ_BGL = True
+  REQ_BGL = False
 
   def _VerifyHVP(self, hvp_data):
     """Verifies locally the syntax of the hypervisor parameters.
@@ -1888,13 +1928,17 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
         self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
 
   def ExpandNames(self):
-    # Information can be safely retrieved as the BGL is acquired in exclusive
-    # mode
-    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
+    self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
+    self.share_locks = _ShareAll()
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    # Retrieve all information
     self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
-    self.needed_locks = {}
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster, performing various test on nodes.
@@ -2022,7 +2066,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
 
     # Get instances in node group; this is unsafe and needs verification later
-    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
+    inst_names = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: inst_names,
@@ -2056,7 +2101,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
 
     group_nodes = set(self.group_info.members)
-    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
+    group_instances = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
 
     unlocked_nodes = \
         group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
@@ -2066,11 +2112,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     if unlocked_nodes:
       raise errors.OpPrereqError("Missing lock for nodes: %s" %
-                                 utils.CommaJoin(unlocked_nodes))
+                                 utils.CommaJoin(unlocked_nodes),
+                                 errors.ECODE_STATE)
 
     if unlocked_instances:
       raise errors.OpPrereqError("Missing lock for instances: %s" %
-                                 utils.CommaJoin(unlocked_instances))
+                                 utils.CommaJoin(unlocked_instances),
+                                 errors.ECODE_STATE)
 
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
@@ -2090,17 +2138,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     for inst in self.my_inst_info.values():
       if inst.disk_template in constants.DTS_INT_MIRROR:
-        group = self.my_node_info[inst.primary_node].group
-        for nname in inst.secondary_nodes:
-          if self.all_node_info[nname].group != group:
+        for nname in inst.all_nodes:
+          if self.all_node_info[nname].group != self.group_uuid:
             extra_lv_nodes.add(nname)
 
     unlocked_lv_nodes = \
         extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
 
     if unlocked_lv_nodes:
-      raise errors.OpPrereqError("these nodes could be locked: %s" %
-                                 utils.CommaJoin(unlocked_lv_nodes))
+      raise errors.OpPrereqError("Missing node locks for LV check: %s" %
+                                 utils.CommaJoin(unlocked_lv_nodes),
+                                 errors.ECODE_STATE)
     self.extra_lv_nodes = list(extra_lv_nodes)
 
   def _VerifyNode(self, ninfo, nresult):
@@ -2396,7 +2444,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     for node, n_img in node_image.items():
-      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
+      if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
+          self.all_node_info[node].group != self.group_uuid):
         # skip non-healthy nodes
         continue
       for volume in n_img.volumes:
@@ -2423,11 +2472,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # WARNING: we currently take into account down instances as well
       # as up ones, considering that even if they're down someone
       # might want to start them even in the event of a node failure.
-      if n_img.offline:
-        # we're skipping offline nodes from the N+1 warning, since
-        # most likely we don't have good memory infromation from them;
-        # we already list instances living on such nodes, and that's
-        # enough warning
+      if n_img.offline or self.all_node_info[node].group != self.group_uuid:
+        # we're skipping nodes marked offline and nodes in other groups from
+        # the N+1 warning, since most likely we don't have good memory
+        # infromation from them; we already list instances living on such
+        # nodes, and that's enough warning
         continue
       #TODO(dynmem): also consider ballooning out other instances
       for prinode, instances in n_img.sbp.items():
@@ -3480,15 +3529,8 @@ class LUGroupVerifyDisks(NoHooksLU):
     self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
-    for (instance_name, inst) in self.instances.items():
-      assert owned_nodes.issuperset(inst.all_nodes), \
-        "Instance %s's nodes changed while we kept the lock" % instance_name
-
-      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
-                                             owned_groups)
-
-      assert self.group_uuid in inst_groups, \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
+    _CheckInstancesNodeGroups(self.cfg, self.instances,
+                              owned_groups, owned_nodes, self.group_uuid)
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
@@ -4493,7 +4535,7 @@ class LUOobCommand(NoHooksLU):
   """Logical unit for OOB handling.
 
   """
-  REG_BGL = False
+  REQ_BGL = False
   _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
 
   def ExpandNames(self):
@@ -4840,9 +4882,6 @@ class LUNodeRemove(LogicalUnit):
   def BuildHooksEnv(self):
     """Build hooks env.
 
-    This doesn't run on the target node in the pre phase as a failed
-    node would then be impossible to remove.
-
     """
     return {
       "OP_TARGET": self.op.node_name,
@@ -4852,13 +4891,15 @@ class LUNodeRemove(LogicalUnit):
   def BuildHooksNodes(self):
     """Build hooks nodes.
 
+    This doesn't run on the target node in the pre phase as a failed
+    node would then be impossible to remove.
+
     """
     all_nodes = self.cfg.GetNodeList()
     try:
       all_nodes.remove(self.op.node_name)
     except ValueError:
-      logging.warning("Node '%s', which is about to be removed, was not found"
-                      " in the list of all nodes", self.op.node_name)
+      pass
     return (all_nodes, all_nodes)
 
   def CheckPrereq(self):
@@ -5571,6 +5612,19 @@ class LUNodeAdd(LogicalUnit):
     if self.op.disk_state:
       self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
 
+    # TODO: If we need to have multiple DnsOnlyRunner we probably should make
+    #       it a property on the base class.
+    result = rpc.DnsOnlyRunner().call_version([node])[node]
+    result.Raise("Can't get version information from node %s" % node)
+    if constants.PROTOCOL_VERSION == result.payload:
+      logging.info("Communication to node %s fine, sw version %s match",
+                   node, result.payload)
+    else:
+      raise errors.OpPrereqError("Version mismatch master version %s,"
+                                 " node version %s" %
+                                 (constants.PROTOCOL_VERSION, result.payload),
+                                 errors.ECODE_ENVIRON)
+
   def Exec(self, feedback_fn):
     """Adds the new node to the cluster.
 
@@ -5615,17 +5669,6 @@ class LUNodeAdd(LogicalUnit):
     if self.op.disk_state:
       new_node.disk_state_static = self.new_disk_state
 
-    # check connectivity
-    result = self.rpc.call_version([node])[node]
-    result.Raise("Can't get version information from node %s" % node)
-    if constants.PROTOCOL_VERSION == result.payload:
-      logging.info("Communication to node %s fine, sw version %s match",
-                   node, result.payload)
-    else:
-      raise errors.OpExecError("Version mismatch master version %s,"
-                               " node version %s" %
-                               (constants.PROTOCOL_VERSION, result.payload))
-
     # Add node to our /etc/hosts, and add key to known_hosts
     if self.cfg.GetClusterInfo().modify_etc_hosts:
       master_node = self.cfg.GetMasterNode()
@@ -5891,9 +5934,7 @@ class LUNodeSetParams(LogicalUnit):
 
     if old_role == self._ROLE_OFFLINE and new_role != old_role:
       # Trying to transition out of offline status
-      # TODO: Use standard RPC runner, but make sure it works when the node is
-      # still marked offline
-      result = rpc.BootstrapRunner().call_version([node.name])[node.name]
+      result = self.rpc.call_version([node.name])[node.name]
       if result.fail_msg:
         raise errors.OpPrereqError("Node %s is being de-offlined but fails"
                                    " to report its version: %s" %
@@ -6078,7 +6119,7 @@ class LUClusterQuery(NoHooksLU):
       "config_version": constants.CONFIG_VERSION,
       "os_api_version": max(constants.OS_API_VERSIONS),
       "export_version": constants.EXPORT_VERSION,
-      "architecture": (platform.architecture()[0], platform.machine()),
+      "architecture": runtime.GetArchInfo(),
       "name": cluster.cluster_name,
       "master": cluster.master_node,
       "default_hypervisor": cluster.primary_hypervisor,
@@ -7301,7 +7342,7 @@ def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
   """
   logging.info("Removing block devices for instance %s", instance.name)
 
-  if not _RemoveDisks(lu, instance):
+  if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
     if not ignore_failures:
       raise errors.OpExecError("Can't remove instance's disks")
     feedback_fn("Warning: can't remove instance's disks")
@@ -7995,9 +8036,7 @@ class TLMigrateInstance(Tasklet):
     ial = IAllocator(self.cfg, self.rpc,
                      mode=constants.IALLOCATOR_MODE_RELOC,
                      name=self.instance_name,
-                     # TODO See why hail breaks with a single node below
-                     relocate_from=[self.instance.primary_node,
-                                    self.instance.primary_node],
+                     relocate_from=[self.instance.primary_node],
                      )
 
     ial.Run(self.lu.op.iallocator)
@@ -8226,11 +8265,11 @@ class TLMigrateInstance(Tasklet):
                          (src_version, dst_version))
 
     self.feedback_fn("* checking disk consistency between source and target")
-    for dev in instance.disks:
+    for (idx, dev) in enumerate(instance.disks):
       if not _CheckDiskConsistency(self.lu, dev, target_node, False):
         raise errors.OpExecError("Disk %s is degraded or not fully"
                                  " synchronized on target node,"
-                                 " aborting migration" % dev.iv_name)
+                                 " aborting migration" % idx)
 
     if self.current_mem > self.tgt_free_mem:
       if not self.allow_runtime_changes:
@@ -8388,16 +8427,16 @@ class TLMigrateInstance(Tasklet):
 
     if instance.admin_state == constants.ADMINST_UP:
       self.feedback_fn("* checking disk consistency between source and target")
-      for dev in instance.disks:
+      for (idx, dev) in enumerate(instance.disks):
         # for drbd, these are drbd over lvm
         if not _CheckDiskConsistency(self.lu, dev, target_node, False):
           if primary_node.offline:
             self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
                              " target node %s" %
-                             (primary_node.name, dev.iv_name, target_node))
+                             (primary_node.name, idx, target_node))
           elif not self.ignore_consistency:
             raise errors.OpExecError("Disk %s is degraded on target node,"
-                                     " aborting failover" % dev.iv_name)
+                                     " aborting failover" % idx)
     else:
       self.feedback_fn("* not checking disk consistency as instance is not"
                        " running")
@@ -8675,11 +8714,26 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   return drbd_dev
 
 
-def _GenerateDiskTemplate(lu, template_name,
-                          instance_name, primary_node,
-                          secondary_nodes, disk_info,
-                          file_storage_dir, file_driver,
-                          base_index, feedback_fn, disk_params):
+_DISK_TEMPLATE_NAME_PREFIX = {
+  constants.DT_PLAIN: "",
+  constants.DT_RBD: ".rbd",
+  }
+
+
+_DISK_TEMPLATE_DEVICE_TYPE = {
+  constants.DT_PLAIN: constants.LD_LV,
+  constants.DT_FILE: constants.LD_FILE,
+  constants.DT_SHARED_FILE: constants.LD_FILE,
+  constants.DT_BLOCK: constants.LD_BLOCKDEV,
+  constants.DT_RBD: constants.LD_RBD,
+  }
+
+
+def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
+    secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
+    feedback_fn, disk_params,
+    _req_file_storage=opcodes.RequireFileStorage,
+    _req_shr_file_storage=opcodes.RequireSharedFileStorage):
   """Generate the entire disk layout for a given template type.
 
   """
@@ -8689,25 +8743,9 @@ def _GenerateDiskTemplate(lu, template_name,
   disk_count = len(disk_info)
   disks = []
   ld_params = _ComputeLDParams(template_name, disk_params)
+
   if template_name == constants.DT_DISKLESS:
     pass
-  elif template_name == constants.DT_PLAIN:
-    if secondary_nodes:
-      raise errors.ProgrammerError("Wrong template configuration")
-
-    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
-                                      for i in range(disk_count)])
-    for idx, disk in enumerate(disk_info):
-      disk_index = idx + base_index
-      vg = disk.get(constants.IDISK_VG, vgname)
-      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
-      disk_dev = objects.Disk(dev_type=constants.LD_LV,
-                              size=disk[constants.IDISK_SIZE],
-                              logical_id=(vg, names[idx]),
-                              iv_name="disk/%d" % disk_index,
-                              mode=disk[constants.IDISK_MODE],
-                              params=ld_params[0])
-      disks.append(disk_dev)
   elif template_name == constants.DT_DRBD8:
     drbd_params, data_params, meta_params = ld_params
     if len(secondary_nodes) != 1:
@@ -8735,73 +8773,54 @@ def _GenerateDiskTemplate(lu, template_name,
                                       drbd_params, data_params, meta_params)
       disk_dev.mode = disk[constants.IDISK_MODE]
       disks.append(disk_dev)
-  elif template_name == constants.DT_FILE:
-    if secondary_nodes:
-      raise errors.ProgrammerError("Wrong template configuration")
-
-    opcodes.RequireFileStorage()
-
-    for idx, disk in enumerate(disk_info):
-      disk_index = idx + base_index
-      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
-                              size=disk[constants.IDISK_SIZE],
-                              iv_name="disk/%d" % disk_index,
-                              logical_id=(file_driver,
-                                          "%s/disk%d" % (file_storage_dir,
-                                                         disk_index)),
-                              mode=disk[constants.IDISK_MODE],
-                              params=ld_params[0])
-      disks.append(disk_dev)
-  elif template_name == constants.DT_SHARED_FILE:
-    if secondary_nodes:
-      raise errors.ProgrammerError("Wrong template configuration")
-
-    opcodes.RequireSharedFileStorage()
-
-    for idx, disk in enumerate(disk_info):
-      disk_index = idx + base_index
-      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
-                              size=disk[constants.IDISK_SIZE],
-                              iv_name="disk/%d" % disk_index,
-                              logical_id=(file_driver,
-                                          "%s/disk%d" % (file_storage_dir,
-                                                         disk_index)),
-                              mode=disk[constants.IDISK_MODE],
-                              params=ld_params[0])
-      disks.append(disk_dev)
-  elif template_name == constants.DT_BLOCK:
+  else:
     if secondary_nodes:
       raise errors.ProgrammerError("Wrong template configuration")
 
-    for idx, disk in enumerate(disk_info):
-      disk_index = idx + base_index
-      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
-                              size=disk[constants.IDISK_SIZE],
-                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
-                                          disk[constants.IDISK_ADOPT]),
-                              iv_name="disk/%d" % disk_index,
-                              mode=disk[constants.IDISK_MODE],
-                              params=ld_params[0])
-      disks.append(disk_dev)
-  elif template_name == constants.DT_RBD:
-    if secondary_nodes:
-      raise errors.ProgrammerError("Wrong template configuration")
+    if template_name == constants.DT_FILE:
+      _req_file_storage()
+    elif template_name == constants.DT_SHARED_FILE:
+      _req_shr_file_storage()
 
-    names = _GenerateUniqueNames(lu, [".rbd.disk%d" % (base_index + i)
-                                      for i in range(disk_count)])
+    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
+    if name_prefix is None:
+      names = None
+    else:
+      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
+                                        (name_prefix, base_index + i)
+                                        for i in range(disk_count)])
+
+    dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
+
+    if template_name == constants.DT_PLAIN:
+      def logical_id_fn(idx, _, disk):
+        vg = disk.get(constants.IDISK_VG, vgname)
+        return (vg, names[idx])
+    elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
+      logical_id_fn = \
+        lambda _, disk_index, disk: (file_driver,
+                                     "%s/disk%d" % (file_storage_dir,
+                                                    disk_index))
+    elif template_name == constants.DT_BLOCK:
+      logical_id_fn = \
+        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
+                                       disk[constants.IDISK_ADOPT])
+    elif template_name == constants.DT_RBD:
+      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+    else:
+      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
 
     for idx, disk in enumerate(disk_info):
       disk_index = idx + base_index
-      disk_dev = objects.Disk(dev_type=constants.LD_RBD,
-                              size=disk[constants.IDISK_SIZE],
-                              logical_id=("rbd", names[idx]),
-                              iv_name="disk/%d" % disk_index,
-                              mode=disk[constants.IDISK_MODE],
-                              params=ld_params[0])
-      disks.append(disk_dev)
+      size = disk[constants.IDISK_SIZE]
+      feedback_fn("* disk %s, size %s" %
+                  (disk_index, utils.FormatUnit(size, "h")))
+      disks.append(objects.Disk(dev_type=dev_type, size=size,
+                                logical_id=logical_id_fn(idx, disk_index, disk),
+                                iv_name="disk/%d" % disk_index,
+                                mode=disk[constants.IDISK_MODE],
+                                params=ld_params[0]))
 
-  else:
-    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
 
 
@@ -8931,15 +8950,14 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
   for idx, device in enumerate(instance.disks):
     if to_skip and idx in to_skip:
       continue
-    logging.info("Creating volume %s for instance %s",
-                 device.iv_name, instance.name)
+    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
       _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
 
 
-def _RemoveDisks(lu, instance, target_node=None):
+def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
   """Remove all disks for an instance.
 
   This abstracts away some work from `AddInstance()` and
@@ -8960,7 +8978,8 @@ def _RemoveDisks(lu, instance, target_node=None):
   logging.info("Removing block devices for instance %s", instance.name)
 
   all_result = True
-  for device in instance.disks:
+  ports_to_release = set()
+  for (idx, device) in enumerate(instance.disks):
     if target_node:
       edata = [(target_node, device)]
     else:
@@ -8969,14 +8988,17 @@ def _RemoveDisks(lu, instance, target_node=None):
       lu.cfg.SetDiskID(disk, node)
       msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
       if msg:
-        lu.LogWarning("Could not remove block device %s on node %s,"
-                      " continuing anyway: %s", device.iv_name, node, msg)
+        lu.LogWarning("Could not remove disk %s on node %s,"
+                      " continuing anyway: %s", idx, node, msg)
         all_result = False
 
     # if this is a DRBD disk, return its port to the pool
     if device.dev_type in constants.LDS_DRBD:
-      tcp_port = device.logical_id[2]
-      lu.cfg.AddTcpUdpPort(tcp_port)
+      ports_to_release.add(device.logical_id[2])
+
+  if all_result or ignore_failures:
+    for port in ports_to_release:
+      lu.cfg.AddTcpUdpPort(port)
 
   if instance.disk_template == constants.DT_FILE:
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
@@ -9357,6 +9379,7 @@ class LUInstanceCreate(LogicalUnit):
                      os=self.op.os_type,
                      vcpus=self.be_full[constants.BE_VCPUS],
                      memory=self.be_full[constants.BE_MAXMEM],
+                     spindle_use=self.be_full[constants.BE_SPINDLE_USE],
                      disks=self.disks,
                      nics=nics,
                      hypervisor=self.op.hypervisor,
@@ -9852,12 +9875,14 @@ class LUInstanceCreate(LogicalUnit):
     nodenames = [pnode.name] + self.secondaries
 
     # Verify instance specs
+    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
     ispec = {
       constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
       constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
       constants.ISPEC_DISK_COUNT: len(self.disks),
       constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
       constants.ISPEC_NIC_COUNT: len(self.nics),
+      constants.ISPEC_SPINDLE_USE: spindle_use,
       }
 
     group_info = self.cfg.GetNodeGroup(pnode.group)
@@ -11658,12 +11683,25 @@ class LUInstanceQueryData(NoHooksLU):
       else:
         self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
 
+      self.needed_locks[locking.LEVEL_NODEGROUP] = []
       self.needed_locks[locking.LEVEL_NODE] = []
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
-    if self.op.use_locking and level == locking.LEVEL_NODE:
-      self._LockInstancesNodes()
+    if self.op.use_locking:
+      if level == locking.LEVEL_NODEGROUP:
+        owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+
+        # Lock all groups used by instances optimistically; this requires going
+        # via the node before it's locked, requiring verification later on
+        self.needed_locks[locking.LEVEL_NODEGROUP] = \
+          frozenset(group_uuid
+                    for instance_name in owned_instances
+                    for group_uuid in
+                      self.cfg.GetInstanceNodeGroups(instance_name))
+
+      elif level == locking.LEVEL_NODE:
+        self._LockInstancesNodes()
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -11671,12 +11709,23 @@ class LUInstanceQueryData(NoHooksLU):
     This only checks the optional instance list against the existing names.
 
     """
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
-      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
+      self.wanted_names = owned_instances
 
-    self.wanted_instances = \
-        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
+    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
+
+    if self.op.use_locking:
+      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
+                                None)
+    else:
+      assert not (owned_instances or owned_groups or owned_nodes)
+
+    self.wanted_instances = instances.values()
 
   def _ComputeBlockdevStatus(self, node, instance_name, dev):
     """Returns the status of a block device
@@ -11741,9 +11790,17 @@ class LUInstanceQueryData(NoHooksLU):
 
     cluster = self.cfg.GetClusterInfo()
 
-    pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
-                                          for i in self.wanted_instances)
-    for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
+    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
+    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
+
+    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
+                                                 for node in nodes.values()))
+
+    group2name_fn = lambda uuid: groups[uuid].name
+
+    for instance in self.wanted_instances:
+      pnode = nodes[instance.primary_node]
+
       if self.op.static or pnode.offline:
         remote_state = None
         if pnode.offline:
@@ -11767,12 +11824,19 @@ class LUInstanceQueryData(NoHooksLU):
       disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
                   instance.disks)
 
+      snodes_group_uuids = [nodes[snode_name].group
+                            for snode_name in instance.secondary_nodes]
+
       result[instance.name] = {
         "name": instance.name,
         "config_state": instance.admin_state,
         "run_state": remote_state,
         "pnode": instance.primary_node,
+        "pnode_group_uuid": pnode.group,
+        "pnode_group_name": group2name_fn(pnode.group),
         "snodes": instance.secondary_nodes,
+        "snodes_group_uuids": snodes_group_uuids,
+        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
         "os": instance.os,
         # this happens to be the same format used for hooks
         "nics": _NICListToTuple(self, instance.nics),
@@ -11795,6 +11859,144 @@ class LUInstanceQueryData(NoHooksLU):
     return result
 
 
+def PrepareContainerMods(mods, private_fn):
+  """Prepares a list of container modifications by adding a private data field.
+
+  @type mods: list of tuples; (operation, index, parameters)
+  @param mods: List of modifications
+  @type private_fn: callable or None
+  @param private_fn: Callable for constructing a private data field for a
+    modification
+  @rtype: list
+
+  """
+  if private_fn is None:
+    fn = lambda: None
+  else:
+    fn = private_fn
+
+  return [(op, idx, params, fn()) for (op, idx, params) in mods]
+
+
+#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: callbacks
+_TApplyContModsCbChanges = \
+  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
+    ht.TNonEmptyString,
+    ht.TAny,
+    ])))
+
+
+def ApplyContainerMods(kind, container, chgdesc, mods,
+                       create_fn, modify_fn, remove_fn):
+  """Applies descriptions in C{mods} to C{container}.
+
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to modify
+  @type chgdesc: None or list
+  @param chgdesc: List of applied changes
+  @type mods: list
+  @param mods: Modifications as returned by L{PrepareContainerMods}
+  @type create_fn: callable
+  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
+    receives absolute item index, parameters and private data object as added
+    by L{PrepareContainerMods}, returns tuple containing new item and changes
+    as list
+  @type modify_fn: callable
+  @param modify_fn: Callback for modifying an existing item
+    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
+    and private data object as added by L{PrepareContainerMods}, returns
+    changes as list
+  @type remove_fn: callable
+  @param remove_fn: Callback on removing item; receives absolute item index,
+    item and private data object as added by L{PrepareContainerMods}
+
+  """
+  for (op, idx, params, private) in mods:
+    if idx == -1:
+      # Append
+      absidx = len(container) - 1
+    elif idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                       (kind, idx, len(container)))
+    else:
+      absidx = idx
+
+    changes = None
+
+    if op == constants.DDM_ADD:
+      # Calculate where item will be added
+      if idx == -1:
+        addidx = len(container)
+      else:
+        addidx = idx
+
+      if create_fn is None:
+        item = params
+      else:
+        (item, changes) = create_fn(addidx, params, private)
+
+      if idx == -1:
+        container.append(item)
+      else:
+        assert idx >= 0
+        assert idx <= len(container)
+        # list.insert does so before the specified index
+        container.insert(idx, item)
+    else:
+      # Retrieve existing item
+      try:
+        item = container[absidx]
+      except IndexError:
+        raise IndexError("Invalid %s index %s" % (kind, idx))
+
+      if op == constants.DDM_REMOVE:
+        assert not params
+
+        if remove_fn is not None:
+          remove_fn(absidx, item, private)
+
+        changes = [("%s/%s" % (kind, absidx), "remove")]
+
+        assert container[absidx] == item
+        del container[absidx]
+      elif op == constants.DDM_MODIFY:
+        if modify_fn is not None:
+          changes = modify_fn(absidx, item, params, private)
+      else:
+        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+    assert _TApplyContModsCbChanges(changes)
+
+    if not (chgdesc is None or changes is None):
+      chgdesc.extend(changes)
+
+
+def _UpdateIvNames(base_index, disks):
+  """Updates the C{iv_name} attribute of disks.
+
+  @type disks: list of L{objects.Disk}
+
+  """
+  for (idx, disk) in enumerate(disks):
+    disk.iv_name = "disk/%s" % (base_index + idx, )
+
+
+class _InstNicModPrivate:
+  """Data structure for network interface modifications.
+
+  Used by L{LUInstanceSetParams}.
+
+  """
+  def __init__(self):
+    self.params = None
+    self.filled = None
+
+
 class LUInstanceSetParams(LogicalUnit):
   """Modifies an instances's parameters.
 
@@ -11803,56 +12005,140 @@ class LUInstanceSetParams(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
+  @staticmethod
+  def _UpgradeDiskNicMods(kind, mods, verify_fn):
+    assert ht.TList(mods)
+    assert not mods or len(mods[0]) in (2, 3)
+
+    if mods and len(mods[0]) == 2:
+      result = []
+
+      addremove = 0
+      for op, params in mods:
+        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
+          result.append((op, -1, params))
+          addremove += 1
+
+          if addremove > 1:
+            raise errors.OpPrereqError("Only one %s add or remove operation is"
+                                       " supported at a time" % kind,
+                                       errors.ECODE_INVAL)
+        else:
+          result.append((constants.DDM_MODIFY, op, params))
+
+      assert verify_fn(result)
+    else:
+      result = mods
+
+    return result
+
+  @staticmethod
+  def _CheckMods(kind, mods, key_types, item_fn):
+    """Ensures requested disk/NIC modifications are valid.
+
+    """
+    for (op, _, params) in mods:
+      assert ht.TDict(params)
+
+      utils.ForceDictType(params, key_types)
+
+      if op == constants.DDM_REMOVE:
+        if params:
+          raise errors.OpPrereqError("No settings should be passed when"
+                                     " removing a %s" % kind,
+                                     errors.ECODE_INVAL)
+      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
+        item_fn(op, params)
+      else:
+        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+  @staticmethod
+  def _VerifyDiskModification(op, params):
+    """Verifies a disk modification.
+
+    """
+    if op == constants.DDM_ADD:
+      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
+      if mode not in constants.DISK_ACCESS_SET:
+        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
+                                   errors.ECODE_INVAL)
+
+      size = params.get(constants.IDISK_SIZE, None)
+      if size is None:
+        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
+                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
+
+      try:
+        size = int(size)
+      except (TypeError, ValueError), err:
+        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
+                                   errors.ECODE_INVAL)
+
+      params[constants.IDISK_SIZE] = size
+
+    elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
+      raise errors.OpPrereqError("Disk size change not possible, use"
+                                 " grow-disk", errors.ECODE_INVAL)
+
+  @staticmethod
+  def _VerifyNicModification(op, params):
+    """Verifies a network interface modification.
+
+    """
+    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
+      ip = params.get(constants.INIC_IP, None)
+      if ip is None:
+        pass
+      elif ip.lower() == constants.VALUE_NONE:
+        params[constants.INIC_IP] = None
+      elif not netutils.IPAddress.IsValid(ip):
+        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+                                   errors.ECODE_INVAL)
+
+      bridge = params.get("bridge", None)
+      link = params.get(constants.INIC_LINK, None)
+      if bridge and link:
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
+                                   " at the same time", errors.ECODE_INVAL)
+      elif bridge and bridge.lower() == constants.VALUE_NONE:
+        params["bridge"] = None
+      elif link and link.lower() == constants.VALUE_NONE:
+        params[constants.INIC_LINK] = None
+
+      if op == constants.DDM_ADD:
+        macaddr = params.get(constants.INIC_MAC, None)
+        if macaddr is None:
+          params[constants.INIC_MAC] = constants.VALUE_AUTO
+
+      if constants.INIC_MAC in params:
+        macaddr = params[constants.INIC_MAC]
+        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+          macaddr = utils.NormalizeAndValidateMac(macaddr)
+
+        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
+          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
+                                     " modifying an existing NIC",
+                                     errors.ECODE_INVAL)
+
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.online_inst or self.op.offline_inst or
-            self.op.runtime_mem):
+            self.op.offline is not None or self.op.runtime_mem):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
       _CheckGlobalHvParams(self.op.hvparams)
 
-    # Disk validation
-    disk_addremove = 0
-    for disk_op, disk_dict in self.op.disks:
-      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
-      if disk_op == constants.DDM_REMOVE:
-        disk_addremove += 1
-        continue
-      elif disk_op == constants.DDM_ADD:
-        disk_addremove += 1
-      else:
-        if not isinstance(disk_op, int):
-          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
-        if not isinstance(disk_dict, dict):
-          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
-          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
-      if disk_op == constants.DDM_ADD:
-        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
-        if mode not in constants.DISK_ACCESS_SET:
-          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
-                                     errors.ECODE_INVAL)
-        size = disk_dict.get(constants.IDISK_SIZE, None)
-        if size is None:
-          raise errors.OpPrereqError("Required disk parameter size missing",
-                                     errors.ECODE_INVAL)
-        try:
-          size = int(size)
-        except (TypeError, ValueError), err:
-          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
-                                     str(err), errors.ECODE_INVAL)
-        disk_dict[constants.IDISK_SIZE] = size
-      else:
-        # modification of disk
-        if constants.IDISK_SIZE in disk_dict:
-          raise errors.OpPrereqError("Disk size change not possible, use"
-                                     " grow-disk", errors.ECODE_INVAL)
+    self.op.disks = \
+      self._UpgradeDiskNicMods("disk", self.op.disks,
+        opcodes.OpInstanceSetParams.TestDiskModifications)
+    self.op.nics = \
+      self._UpgradeDiskNicMods("NIC", self.op.nics,
+        opcodes.OpInstanceSetParams.TestNicModifications)
 
-    if disk_addremove > 1:
-      raise errors.OpPrereqError("Only one disk add or remove operation"
-                                 " supported at a time", errors.ECODE_INVAL)
+    # Check disk modifications
+    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+                    self._VerifyDiskModification)
 
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
@@ -11866,60 +12152,9 @@ class LUInstanceSetParams(LogicalUnit):
                                  " one requires specifying a secondary node",
                                  errors.ECODE_INVAL)
 
-    # NIC validation
-    nic_addremove = 0
-    for nic_op, nic_dict in self.op.nics:
-      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
-      if nic_op == constants.DDM_REMOVE:
-        nic_addremove += 1
-        continue
-      elif nic_op == constants.DDM_ADD:
-        nic_addremove += 1
-      else:
-        if not isinstance(nic_op, int):
-          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
-        if not isinstance(nic_dict, dict):
-          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
-          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
-      # nic_dict should be a dict
-      nic_ip = nic_dict.get(constants.INIC_IP, None)
-      if nic_ip is not None:
-        if nic_ip.lower() == constants.VALUE_NONE:
-          nic_dict[constants.INIC_IP] = None
-        else:
-          if not netutils.IPAddress.IsValid(nic_ip):
-            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
-                                       errors.ECODE_INVAL)
-
-      nic_bridge = nic_dict.get("bridge", None)
-      nic_link = nic_dict.get(constants.INIC_LINK, None)
-      if nic_bridge and nic_link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
-                                   " at the same time", errors.ECODE_INVAL)
-      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
-        nic_dict["bridge"] = None
-      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
-        nic_dict[constants.INIC_LINK] = None
-
-      if nic_op == constants.DDM_ADD:
-        nic_mac = nic_dict.get(constants.INIC_MAC, None)
-        if nic_mac is None:
-          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
-
-      if constants.INIC_MAC in nic_dict:
-        nic_mac = nic_dict[constants.INIC_MAC]
-        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
-
-        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
-          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
-                                     " modifying an existing nic",
-                                     errors.ECODE_INVAL)
-
-    if nic_addremove > 1:
-      raise errors.OpPrereqError("Only one NIC add or remove operation"
-                                 " supported at a time", errors.ECODE_INVAL)
+    # Check NIC modifications
+    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
+                    self._VerifyNicModification)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
@@ -11930,6 +12165,7 @@ class LUInstanceSetParams(LogicalUnit):
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
+    # TODO: Acquire group lock in shared mode (disk parameters)
     if level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
       if self.op.disk_template and self.op.remote_node:
@@ -11955,38 +12191,17 @@ class LUInstanceSetParams(LogicalUnit):
       args["vcpus"] = self.be_new[constants.BE_VCPUS]
     # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
     # information at all.
-    if self.op.nics:
-      args["nics"] = []
-      nic_override = dict(self.op.nics)
-      for idx, nic in enumerate(self.instance.nics):
-        if idx in nic_override:
-          this_nic_override = nic_override[idx]
-        else:
-          this_nic_override = {}
-        if constants.INIC_IP in this_nic_override:
-          ip = this_nic_override[constants.INIC_IP]
-        else:
-          ip = nic.ip
-        if constants.INIC_MAC in this_nic_override:
-          mac = this_nic_override[constants.INIC_MAC]
-        else:
-          mac = nic.mac
-        if idx in self.nic_pnew:
-          nicparams = self.nic_pnew[idx]
-        else:
-          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
-        mode = nicparams[constants.NIC_MODE]
-        link = nicparams[constants.NIC_LINK]
-        args["nics"].append((ip, mac, mode, link))
-      if constants.DDM_ADD in nic_override:
-        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
-        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
-        nicparams = self.nic_pnew[constants.DDM_ADD]
+
+    if self._new_nics is not None:
+      nics = []
+
+      for nic in self._new_nics:
+        nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
         mode = nicparams[constants.NIC_MODE]
         link = nicparams[constants.NIC_LINK]
-        args["nics"].append((ip, mac, mode, link))
-      elif constants.DDM_REMOVE in nic_override:
-        del args["nics"][-1]
+        nics.append((nic.ip, nic.mac, mode, link))
+
+      args["nics"] = nics
 
     env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
     if self.op.disk_template:
@@ -12003,6 +12218,61 @@ class LUInstanceSetParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return (nl, nl)
 
+  def _PrepareNicModification(self, params, private, old_ip, old_params,
+                              cluster, pnode):
+    update_params_dict = dict([(key, params[key])
+                               for key in constants.NICS_PARAMETERS
+                               if key in params])
+
+    if "bridge" in params:
+      update_params_dict[constants.NIC_LINK] = params["bridge"]
+
+    new_params = _GetUpdatedParams(old_params, update_params_dict)
+    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
+
+    new_filled_params = cluster.SimpleFillNIC(new_params)
+    objects.NIC.CheckParameterSyntax(new_filled_params)
+
+    new_mode = new_filled_params[constants.NIC_MODE]
+    if new_mode == constants.NIC_MODE_BRIDGED:
+      bridge = new_filled_params[constants.NIC_LINK]
+      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
+      if msg:
+        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
+        if self.op.force:
+          self.warn.append(msg)
+        else:
+          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
+
+    elif new_mode == constants.NIC_MODE_ROUTED:
+      ip = params.get(constants.INIC_IP, old_ip)
+      if ip is None:
+        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
+                                   " on a routed NIC", errors.ECODE_INVAL)
+
+    if constants.INIC_MAC in params:
+      mac = params[constants.INIC_MAC]
+      if mac is None:
+        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
+                                   errors.ECODE_INVAL)
+      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+        # otherwise generate the MAC address
+        params[constants.INIC_MAC] = \
+          self.cfg.GenerateMAC(self.proc.GetECId())
+      else:
+        # or validate/reserve the current one
+        try:
+          self.cfg.ReserveMAC(mac, self.proc.GetECId())
+        except errors.ReservationError:
+          raise errors.OpPrereqError("MAC address '%s' already in use"
+                                     " in cluster" % mac,
+                                     errors.ECODE_NOTUNIQUE)
+
+    private.params = new_params
+    private.filled = new_filled_params
+
+    return (None, None)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -12020,6 +12290,10 @@ class LUInstanceSetParams(LogicalUnit):
     pnode_info = self.cfg.GetNodeInfo(pnode)
     self.diskparams = self.cfg.GetNodeGroup(pnode_info.group).diskparams
 
+    # Prepare disk/NIC modifications
+    self.diskmod = PrepareContainerMods(self.op.disks, None)
+    self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+
     # OS change
     if self.op.os_name and not self.op.force:
       _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
@@ -12028,6 +12302,9 @@ class LUInstanceSetParams(LogicalUnit):
     else:
       instance_os = instance.os
 
+    assert not (self.op.disk_template and self.op.disks), \
+      "Can't modify disk template and apply disk changes at the same time"
+
     if self.op.disk_template:
       if instance.disk_template == self.op.disk_template:
         raise errors.OpPrereqError("Instance already has disk template %s" %
@@ -12224,118 +12501,53 @@ class LUInstanceSetParams(LogicalUnit):
                              self.op.memory - current_memory,
                              instance.hypervisor)
 
-    # NIC processing
-    self.nic_pnew = {}
-    self.nic_pinst = {}
-    for nic_op, nic_dict in self.op.nics:
-      if nic_op == constants.DDM_REMOVE:
-        if not instance.nics:
-          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
-                                     errors.ECODE_INVAL)
-        continue
-      if nic_op != constants.DDM_ADD:
-        # an existing nic
-        if not instance.nics:
-          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
-                                     " no NICs" % nic_op,
-                                     errors.ECODE_INVAL)
-        if nic_op < 0 or nic_op >= len(instance.nics):
-          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
-                                     " are 0 to %d" %
-                                     (nic_op, len(instance.nics) - 1),
-                                     errors.ECODE_INVAL)
-        old_nic_params = instance.nics[nic_op].nicparams
-        old_nic_ip = instance.nics[nic_op].ip
-      else:
-        old_nic_params = {}
-        old_nic_ip = None
-
-      update_params_dict = dict([(key, nic_dict[key])
-                                 for key in constants.NICS_PARAMETERS
-                                 if key in nic_dict])
-
-      if "bridge" in nic_dict:
-        update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
-
-      new_nic_params = _GetUpdatedParams(old_nic_params,
-                                         update_params_dict)
-      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
-      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
-      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
-      self.nic_pinst[nic_op] = new_nic_params
-      self.nic_pnew[nic_op] = new_filled_nic_params
-      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
-
-      if new_nic_mode == constants.NIC_MODE_BRIDGED:
-        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
-        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
-        if msg:
-          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
-          if self.op.force:
-            self.warn.append(msg)
-          else:
-            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
-      if new_nic_mode == constants.NIC_MODE_ROUTED:
-        if constants.INIC_IP in nic_dict:
-          nic_ip = nic_dict[constants.INIC_IP]
-        else:
-          nic_ip = old_nic_ip
-        if nic_ip is None:
-          raise errors.OpPrereqError("Cannot set the nic ip to None"
-                                     " on a routed nic", errors.ECODE_INVAL)
-      if constants.INIC_MAC in nic_dict:
-        nic_mac = nic_dict[constants.INIC_MAC]
-        if nic_mac is None:
-          raise errors.OpPrereqError("Cannot set the nic mac to None",
-                                     errors.ECODE_INVAL)
-        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-          # otherwise generate the mac
-          nic_dict[constants.INIC_MAC] = \
-            self.cfg.GenerateMAC(self.proc.GetECId())
-        else:
-          # or validate/reserve the current one
-          try:
-            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
-          except errors.ReservationError:
-            raise errors.OpPrereqError("MAC address %s already in use"
-                                       " in cluster" % nic_mac,
-                                       errors.ECODE_NOTUNIQUE)
-
-    # DISK processing
     if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Disk operations not supported for"
                                  " diskless instances",
                                  errors.ECODE_INVAL)
-    for disk_op, _ in self.op.disks:
-      if disk_op == constants.DDM_REMOVE:
-        if len(instance.disks) == 1:
-          raise errors.OpPrereqError("Cannot remove the last disk of"
-                                     " an instance", errors.ECODE_INVAL)
-        _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                            msg="cannot remove disks")
-
-      if (disk_op == constants.DDM_ADD and
-          len(instance.disks) >= constants.MAX_DISKS):
-        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
-                                   " add more" % constants.MAX_DISKS,
-                                   errors.ECODE_STATE)
-      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
-        # an existing disk
-        if disk_op < 0 or disk_op >= len(instance.disks):
-          raise errors.OpPrereqError("Invalid disk index %s, valid values"
-                                     " are 0 to %d" %
-                                     (disk_op, len(instance.disks)),
-                                     errors.ECODE_INVAL)
 
-    # disabling the instance
-    if self.op.offline_inst:
-      _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                          msg="cannot change instance state to offline")
+    def _PrepareNicCreate(_, params, private):
+      return self._PrepareNicModification(params, private, None, {},
+                                          cluster, pnode)
+
+    def _PrepareNicMod(_, nic, params, private):
+      return self._PrepareNicModification(params, private, nic.ip,
+                                          nic.nicparams, cluster, pnode)
+
+    # Verify NIC changes (operating on copy)
+    nics = instance.nics[:]
+    ApplyContainerMods("NIC", nics, None, self.nicmod,
+                       _PrepareNicCreate, _PrepareNicMod, None)
+    if len(nics) > constants.MAX_NICS:
+      raise errors.OpPrereqError("Instance has too many network interfaces"
+                                 " (%d), cannot add more" % constants.MAX_NICS,
+                                 errors.ECODE_STATE)
 
-    # enabling the instance
-    if self.op.online_inst:
-      _CheckInstanceState(self, instance, INSTANCE_OFFLINE,
-                          msg="cannot make instance go online")
+    # Verify disk changes (operating on a copy)
+    disks = instance.disks[:]
+    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    if len(disks) > constants.MAX_DISKS:
+      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
+                                 " more" % constants.MAX_DISKS,
+                                 errors.ECODE_STATE)
+
+    if self.op.offline is not None:
+      if self.op.offline:
+        msg = "can't change to offline"
+      else:
+        msg = "can't change to online"
+      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
+
+    # Pre-compute NIC changes (necessary to use result in hooks)
+    self._nic_chgdesc = []
+    if self.nicmod:
+      # Operate on copies as this is still in prereq
+      nics = [nic.Copy() for nic in instance.nics]
+      ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+                         self._CreateNewNic, self._ApplyNicMods, None)
+      self._new_nics = nics
+    else:
+      self._new_nics = None
 
   def _ConvertPlainToDrbd(self, feedback_fn):
     """Converts an instance from plain to drbd.
@@ -12357,7 +12569,7 @@ class LUInstanceSetParams(LogicalUnit):
                                       disk_info, None, None, 0, feedback_fn,
                                       self.diskparams)
     info = _GetInstanceInfoText(instance)
-    feedback_fn("Creating aditional volumes...")
+    feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in new_disks:
       # unfortunately this is... not too nice
@@ -12418,6 +12630,12 @@ class LUInstanceSetParams(LogicalUnit):
       child.size = parent.size
       child.mode = parent.mode
 
+    # this is a DRBD disk, return its port to the pool
+    # NOTE: this must be done right before the call to cfg.Update!
+    for disk in old_disks:
+      tcp_port = disk.logical_id[2]
+      self.cfg.AddTcpUdpPort(tcp_port)
+
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
@@ -12443,12 +12661,104 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Could not remove metadata for disk %d on node %s,"
                         " continuing anyway: %s", idx, pnode, msg)
 
-    # this is a DRBD disk, return its port to the pool
-    for disk in old_disks:
-      tcp_port = disk.logical_id[2]
-      self.cfg.AddTcpUdpPort(tcp_port)
+  def _CreateNewDisk(self, idx, params, _):
+    """Creates a new disk.
 
-    # Node resource locks will be released by caller
+    """
+    instance = self.instance
+
+    # add a new disk
+    if instance.disk_template in constants.DTS_FILEBASED:
+      (file_driver, file_path) = instance.disks[0].logical_id
+      file_path = os.path.dirname(file_path)
+    else:
+      file_driver = file_path = None
+
+    disk = \
+      _GenerateDiskTemplate(self, instance.disk_template, instance.name,
+                            instance.primary_node, instance.secondary_nodes,
+                            [params], file_path, file_driver, idx,
+                            self.Log, self.diskparams)[0]
+
+    info = _GetInstanceInfoText(instance)
+
+    logging.info("Creating volume %s for instance %s",
+                 disk.iv_name, instance.name)
+    # Note: this needs to be kept in sync with _CreateDisks
+    #HARDCODE
+    for node in instance.all_nodes:
+      f_create = (node == instance.primary_node)
+      try:
+        _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
+      except errors.OpExecError, err:
+        self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
+                        disk.iv_name, disk, node, err)
+
+    return (disk, [
+      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
+      ])
+
+  @staticmethod
+  def _ModifyDisk(idx, disk, params, _):
+    """Modifies a disk.
+
+    """
+    disk.mode = params[constants.IDISK_MODE]
+
+    return [
+      ("disk.mode/%d" % idx, disk.mode),
+      ]
+
+  def _RemoveDisk(self, idx, root, _):
+    """Removes a disk.
+
+    """
+    for node, disk in root.ComputeNodeTree(self.instance.primary_node):
+      self.cfg.SetDiskID(disk, node)
+      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
+      if msg:
+        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
+                        " continuing anyway", idx, node, msg)
+
+    # if this is a DRBD disk, return its port to the pool
+    if root.dev_type in constants.LDS_DRBD:
+      self.cfg.AddTcpUdpPort(root.logical_id[2])
+
+  @staticmethod
+  def _CreateNewNic(idx, params, private):
+    """Creates data structure for a new network interface.
+
+    """
+    mac = params[constants.INIC_MAC]
+    ip = params.get(constants.INIC_IP, None)
+    nicparams = private.params
+
+    return (objects.NIC(mac=mac, ip=ip, nicparams=nicparams), [
+      ("nic.%d" % idx,
+       "add:mac=%s,ip=%s,mode=%s,link=%s" %
+       (mac, ip, private.filled[constants.NIC_MODE],
+       private.filled[constants.NIC_LINK])),
+      ])
+
+  @staticmethod
+  def _ApplyNicMods(idx, nic, params, private):
+    """Modifies a network interface.
+
+    """
+    changes = []
+
+    for key in [constants.INIC_MAC, constants.INIC_IP]:
+      if key in params:
+        changes.append(("nic.%s/%d" % (key, idx), params[key]))
+        setattr(nic, key, params[key])
+
+    if private.params:
+      nic.nicparams = private.params
+
+      for (key, val) in params.items():
+        changes.append(("nic.%s/%d" % (key, idx), val))
+
+    return changes
 
   def Exec(self, feedback_fn):
     """Modifies an instance.
@@ -12458,6 +12768,7 @@ class LUInstanceSetParams(LogicalUnit):
     """
     # Process here the warnings from CheckPrereq, as we don't have a
     # feedback_fn there.
+    # TODO: Replace with self.LogWarning
     for warn in self.warn:
       feedback_fn("WARNING: %s" % warn)
 
@@ -12476,66 +12787,10 @@ class LUInstanceSetParams(LogicalUnit):
       rpcres.Raise("Cannot modify instance runtime memory")
       result.append(("runtime_memory", self.op.runtime_mem))
 
-    # disk changes
-    for disk_op, disk_dict in self.op.disks:
-      if disk_op == constants.DDM_REMOVE:
-        # remove the last disk
-        device = instance.disks.pop()
-        device_idx = len(instance.disks)
-        for node, disk in device.ComputeNodeTree(instance.primary_node):
-          self.cfg.SetDiskID(disk, node)
-          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
-          if msg:
-            self.LogWarning("Could not remove disk/%d on node %s: %s,"
-                            " continuing anyway", device_idx, node, msg)
-        result.append(("disk/%d" % device_idx, "remove"))
-
-        # if this is a DRBD disk, return its port to the pool
-        if device.dev_type in constants.LDS_DRBD:
-          tcp_port = device.logical_id[2]
-          self.cfg.AddTcpUdpPort(tcp_port)
-      elif disk_op == constants.DDM_ADD:
-        # add a new disk
-        if instance.disk_template in (constants.DT_FILE,
-                                        constants.DT_SHARED_FILE):
-          file_driver, file_path = instance.disks[0].logical_id
-          file_path = os.path.dirname(file_path)
-        else:
-          file_driver = file_path = None
-        disk_idx_base = len(instance.disks)
-        new_disk = _GenerateDiskTemplate(self,
-                                         instance.disk_template,
-                                         instance.name, instance.primary_node,
-                                         instance.secondary_nodes,
-                                         [disk_dict],
-                                         file_path,
-                                         file_driver,
-                                         disk_idx_base,
-                                         feedback_fn,
-                                         self.diskparams)[0]
-        instance.disks.append(new_disk)
-        info = _GetInstanceInfoText(instance)
-
-        logging.info("Creating volume %s for instance %s",
-                     new_disk.iv_name, instance.name)
-        # Note: this needs to be kept in sync with _CreateDisks
-        #HARDCODE
-        for node in instance.all_nodes:
-          f_create = node == instance.primary_node
-          try:
-            _CreateBlockDev(self, node, instance, new_disk,
-                            f_create, info, f_create)
-          except errors.OpExecError, err:
-            self.LogWarning("Failed to create volume %s (%s) on"
-                            " node %s: %s",
-                            new_disk.iv_name, new_disk, node, err)
-        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
-                       (new_disk.size, new_disk.mode)))
-      else:
-        # change a given disk
-        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
-        result.append(("disk.mode/%d" % disk_op,
-                       disk_dict[constants.IDISK_MODE]))
+    # Apply disk changes
+    ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+                       self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+    _UpdateIvNames(0, instance.disks)
 
     if self.op.disk_template:
       if __debug__:
@@ -12569,33 +12824,10 @@ class LUInstanceSetParams(LogicalUnit):
     _ReleaseLocks(self, locking.LEVEL_NODE)
     _ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
-    # NIC changes
-    for nic_op, nic_dict in self.op.nics:
-      if nic_op == constants.DDM_REMOVE:
-        # remove the last nic
-        del instance.nics[-1]
-        result.append(("nic.%d" % len(instance.nics), "remove"))
-      elif nic_op == constants.DDM_ADD:
-        # mac and bridge should be set, by now
-        mac = nic_dict[constants.INIC_MAC]
-        ip = nic_dict.get(constants.INIC_IP, None)
-        nicparams = self.nic_pinst[constants.DDM_ADD]
-        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
-        instance.nics.append(new_nic)
-        result.append(("nic.%d" % (len(instance.nics) - 1),
-                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
-                       (new_nic.mac, new_nic.ip,
-                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
-                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
-                       )))
-      else:
-        for key in (constants.INIC_MAC, constants.INIC_IP):
-          if key in nic_dict:
-            setattr(instance.nics[nic_op], key, nic_dict[key])
-        if nic_op in self.nic_pinst:
-          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
-        for key, val in nic_dict.iteritems():
-          result.append(("nic.%s/%d" % (key, nic_op), val))
+    # Apply NIC changes
+    if self._new_nics is not None:
+      instance.nics = self._new_nics
+      result.extend(self._nic_chgdesc)
 
     # hvparams changes
     if self.op.hvparams:
@@ -12619,13 +12851,17 @@ class LUInstanceSetParams(LogicalUnit):
       for key, val in self.op.osparams.iteritems():
         result.append(("os/%s" % key, val))
 
-    # online/offline instance
-    if self.op.online_inst:
-      self.cfg.MarkInstanceDown(instance.name)
-      result.append(("admin_state", constants.ADMINST_DOWN))
-    if self.op.offline_inst:
+    if self.op.offline is None:
+      # Ignore
+      pass
+    elif self.op.offline:
+      # Mark instance as offline
       self.cfg.MarkInstanceOffline(instance.name)
       result.append(("admin_state", constants.ADMINST_OFFLINE))
+    else:
+      # Mark instance as online, but stopped
+      self.cfg.MarkInstanceDown(instance.name)
+      result.append(("admin_state", constants.ADMINST_DOWN))
 
     self.cfg.Update(instance, feedback_fn)
 
@@ -12719,7 +12955,7 @@ class LUInstanceChangeGroup(LogicalUnit):
 
     if self.req_target_uuids:
       # User requested specific target groups
-      self.target_uuids = self.req_target_uuids
+      self.target_uuids = frozenset(self.req_target_uuids)
     else:
       # All groups except those used by the instance are potential targets
       self.target_uuids = owned_groups - inst_groups
@@ -13910,16 +14146,8 @@ class LUGroupEvacuate(LogicalUnit):
     self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
-    for instance_name in owned_instances:
-      inst = self.instances[instance_name]
-      assert owned_nodes.issuperset(inst.all_nodes), \
-        "Instance %s's nodes changed while we kept the lock" % instance_name
-
-      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
-                                             owned_groups)
-
-      assert self.group_uuid in inst_groups, \
-        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
+    _CheckInstancesNodeGroups(self.cfg, self.instances,
+                              owned_groups, owned_nodes, self.group_uuid)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -14345,7 +14573,7 @@ class IAllocator(object):
     self.in_text = self.out_text = self.in_data = self.out_data = None
     # init all input fields so that pylint is happy
     self.mode = mode
-    self.memory = self.disks = self.disk_template = None
+    self.memory = self.disks = self.disk_template = self.spindle_use = None
     self.os = self.tags = self.nics = self.vcpus = None
     self.hypervisor = None
     self.relocate_from = None
@@ -14416,7 +14644,7 @@ class IAllocator(object):
 
     data["nodegroups"] = self._ComputeNodeGroupData(cfg)
 
-    config_ndata = self._ComputeBasicNodeData(ninfo)
+    config_ndata = self._ComputeBasicNodeData(cfg, ninfo)
     data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
                                                  i_list, config_ndata)
     assert len(data["nodes"]) == len(ninfo), \
@@ -14442,7 +14670,7 @@ class IAllocator(object):
     return ng
 
   @staticmethod
-  def _ComputeBasicNodeData(node_cfg):
+  def _ComputeBasicNodeData(cfg, node_cfg):
     """Compute global node data.
 
     @rtype: dict
@@ -14460,6 +14688,7 @@ class IAllocator(object):
       "group": ninfo.group,
       "master_capable": ninfo.master_capable,
       "vm_capable": ninfo.vm_capable,
+      "ndparams": cfg.GetNdParams(ninfo),
       })
       for ninfo in node_cfg.values())
 
@@ -14550,6 +14779,7 @@ class IAllocator(object):
         "admin_state": iinfo.admin_state,
         "vcpus": beinfo[constants.BE_VCPUS],
         "memory": beinfo[constants.BE_MAXMEM],
+        "spindle_use": beinfo[constants.BE_SPINDLE_USE],
         "os": iinfo.os,
         "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
         "nics": nic_data,
@@ -14589,6 +14819,7 @@ class IAllocator(object):
       "os": self.os,
       "vcpus": self.vcpus,
       "memory": self.memory,
+      "spindle_use": self.spindle_use,
       "disks": self.disks,
       "disk_space_total": disk_space,
       "nics": self.nics,
@@ -14702,6 +14933,7 @@ class IAllocator(object):
        [
         ("name", ht.TString),
         ("memory", ht.TInt),
+        ("spindle_use", ht.TInt),
         ("disks", ht.TListOf(ht.TDict)),
         ("disk_template", ht.TString),
         ("os", ht.TString),