gnt-cluster modify/init: deprecate --no-lvm-storage
[ganeti-local] / lib / cmdlib.py
index b8c2318..bba8277 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -813,8 +813,22 @@ def _GetUpdatedParams(old_params, update_dict,
   return params_copy
 
 
+def _UpdateMinMaxISpecs(ipolicy, new_minmax, group_policy):
+  use_none = use_default = group_policy
+  minmax = ipolicy.setdefault(constants.ISPECS_MINMAX, {})
+  for (key, value) in new_minmax.items():
+    if key not in constants.ISPECS_MINMAX_KEYS:
+      raise errors.OpPrereqError("Invalid key in new ipolicy/%s: %s" %
+                                 (constants.ISPECS_MINMAX, key),
+                                 errors.ECODE_INVAL)
+    old_spec = minmax.get(key, {})
+    minmax[key] = _GetUpdatedParams(old_spec, value, use_none=use_none,
+                                    use_default=use_default)
+    utils.ForceDictType(minmax[key], constants.ISPECS_PARAMETER_TYPES)
+
+
 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
-  """Return the new version of a instance policy.
+  """Return the new version of an instance policy.
 
   @param group_policy: whether this policy applies to a group and thus
     we should support removal of policy entries
@@ -826,11 +840,13 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key in constants.IPOLICY_ISPECS:
-      utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+    if key == constants.ISPECS_MINMAX:
+      _UpdateMinMaxISpecs(ipolicy, value, group_policy)
+    elif key == constants.ISPECS_STD:
       ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
                                        use_none=use_none,
                                        use_default=use_default)
+      utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
     else:
       if (not value or value == [constants.VALUE_DEFAULT] or
           value == constants.VALUE_DEFAULT):
@@ -1203,22 +1219,21 @@ def _CheckInstanceState(lu, instance, req_states, msg=None):
                      " is down")
 
 
-def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
+def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
   """Computes if value is in the desired range.
 
   @param name: name of the parameter for which we perform the check
   @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
       not just 'disk')
-  @param ipolicy: dictionary containing min, max and std values
+  @param ispecs: dictionary containing min and max values
   @param value: actual value that we want to use
-  @return: None or element not meeting the criteria
-
+  @return: None or an error string
 
   """
   if value in [None, constants.VALUE_AUTO]:
     return None
-  max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
-  min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
+  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
+  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
   if value > max_v or min_v > value:
     if qualifier:
       fqn = "%s/%s" % (name, qualifier)
@@ -1231,6 +1246,7 @@ def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
 
 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                  nic_count, disk_sizes, spindle_use,
+                                 disk_template,
                                  _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
@@ -1248,6 +1264,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
+  @type disk_template: string
+  @param disk_template: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
@@ -1257,18 +1275,26 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
     (constants.ISPEC_CPU_COUNT, "", cpu_count),
-    (constants.ISPEC_DISK_COUNT, "", disk_count),
     (constants.ISPEC_NIC_COUNT, "", nic_count),
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
-
-  return filter(None,
-                (_compute_fn(name, qualifier, ipolicy, value)
-                 for (name, qualifier, value) in test_settings))
-
-
-def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+  if disk_template != constants.DT_DISKLESS:
+    # This check doesn't make sense for diskless instances
+    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
+  ret = []
+  allowed_dts = ipolicy[constants.IPOLICY_DTS]
+  if disk_template not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
+               (disk_template, utils.CommaJoin(allowed_dts)))
+
+  minmax = ipolicy[constants.ISPECS_MINMAX]
+  return ret + filter(None,
+                      (_compute_fn(name, qualifier, minmax, value)
+                       for (name, qualifier, value) in test_settings))
+
+
+def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
                                      _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
@@ -1276,29 +1302,36 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   @param ipolicy: The ipolicy to verify against
   @type instance: L{objects.Instance}
   @param instance: The instance to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
-  mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
-  cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
-  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
+  be_full = cfg.GetClusterInfo().FillBE(instance)
+  mem_size = be_full[constants.BE_MAXMEM]
+  cpu_count = be_full[constants.BE_VCPUS]
+  spindle_use = be_full[constants.BE_SPINDLE_USE]
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
+  disk_template = instance.disk_template
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
+  ipolicy, instance_spec, disk_template,
+  _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
   @param ipolicy: The ipolicy to verify against
   @param instance_spec: dict
   @param instance_spec: The instance spec to verify
+  @type disk_template: string
+  @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1311,11 +1344,11 @@ def _ComputeIPolicyInstanceSpecViolation(
   spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
-                                 target_group,
+                                 target_group, cfg,
                                  _compute_fn=_ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
@@ -1323,6 +1356,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @param instance: The instance object to verify
   @param current_group: The current group of the instance
   @param target_group: The new group of the instance
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1330,23 +1365,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   if current_group == target_group:
     return []
   else:
-    return _compute_fn(ipolicy, instance)
+    return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
                             _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
   @param instance: The instance object to verify
   @param node: The new node to relocate
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
-  res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
 
   if res:
     msg = ("Instance does not meet target node group's (%s) instance"
@@ -1357,18 +1394,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
   @param new_ipolicy: The new (to become) ipolicy
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A list of instances which violates the new ipolicy but
       did not before
 
   """
-  return (_ComputeViolatingInstances(new_ipolicy, instances) -
-          _ComputeViolatingInstances(old_ipolicy, instances))
+  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
+          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
 def _ExpandItemName(fn, name, kind):
@@ -1463,12 +1502,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type vcpus: string
   @param vcpus: the count of VCPUs the instance has
   @type nics: list
-  @param nics: list of tuples (ip, mac, mode, link, net, netinfo) representing
-      the NICs the instance has
+  @param nics: list of tuples (name, uuid, ip, mac, mode, link, net, netinfo)
+      representing the NICs the instance has
   @type disk_template: string
   @param disk_template: the disk template of the instance
   @type disks: list
-  @param disks: the list of (size, mode) pairs
+  @param disks: list of tuples (name, uuid, size, mode)
   @type bep: dict
   @param bep: the backend parameters for the instance
   @type hvp: dict
@@ -1490,7 +1529,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
     "INSTANCE_MAXMEM": maxmem,
-    # TODO(2.7) remove deprecated "memory" value
+    # TODO(2.9) remove deprecated "memory" value
     "INSTANCE_MEMORY": maxmem,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
@@ -1498,9 +1537,10 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   }
   if nics:
     nic_count = len(nics)
-    for idx, (ip, mac, mode, link, net, netinfo) in enumerate(nics):
+    for idx, (name, _, ip, mac, mode, link, net, netinfo) in enumerate(nics):
       if ip is None:
         ip = ""
+      env["INSTANCE_NIC%d_NAME" % idx] = name
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_MAC" % idx] = mac
       env["INSTANCE_NIC%d_MODE" % idx] = mode
@@ -1522,7 +1562,8 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   if disks:
     disk_count = len(disks)
-    for idx, (size, mode) in enumerate(disks):
+    for idx, (name, size, mode) in enumerate(disks):
+      env["INSTANCE_DISK%d_NAME" % idx] = name
       env["INSTANCE_DISK%d_SIZE" % idx] = size
       env["INSTANCE_DISK%d_MODE" % idx] = mode
   else:
@@ -1559,7 +1600,7 @@ def _NICToTuple(lu, nic):
   if nic.network:
     nobj = lu.cfg.GetNetwork(nic.network)
     netinfo = objects.Network.ToDict(nobj)
-  return (nic.ip, nic.mac, mode, link, nic.network, netinfo)
+  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
 
 
 def _NICListToTuple(lu, nics):
@@ -1609,7 +1650,8 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     "vcpus": bep[constants.BE_VCPUS],
     "nics": _NICListToTuple(lu, instance.nics),
     "disk_template": instance.disk_template,
-    "disks": [(disk.size, disk.mode) for disk in instance.disks],
+    "disks": [(disk.name, disk.size, disk.mode)
+              for disk in instance.disks],
     "bep": bep,
     "hvp": hvp,
     "hypervisor_name": instance.hypervisor,
@@ -1647,17 +1689,19 @@ def _DecideSelfPromotion(lu, exceptions=None):
   return mc_now < mc_should
 
 
-def _ComputeViolatingInstances(ipolicy, instances):
+def _ComputeViolatingInstances(ipolicy, instances, cfg):
   """Computes a set of instances who violates given ipolicy.
 
   @param ipolicy: The ipolicy to verify
-  @type instances: object.Instance
+  @type instances: L{objects.Instance}
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A frozenset of instance names violating the ipolicy
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
@@ -2626,7 +2670,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config)
+    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -4168,11 +4212,10 @@ class LUClusterSetParams(LogicalUnit):
     mn = self.cfg.GetMasterNode()
     return ([mn], [mn])
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
+  def _CheckVgName(self, node_list, enabled_disk_templates,
+                   new_enabled_disk_templates):
+    """Check the consistency of the vg name on all nodes and in case it gets
+       unset whether there are instances still using it.
 
     """
     if self.op.vg_name is not None and not self.op.vg_name:
@@ -4180,6 +4223,55 @@ class LUClusterSetParams(LogicalUnit):
         raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
                                    " instances exist", errors.ECODE_INVAL)
 
+    if (self.op.vg_name is not None and
+        utils.IsLvmEnabled(enabled_disk_templates)) or \
+           (self.cfg.GetVGName() is not None and
+            utils.LvmGetsEnabled(enabled_disk_templates,
+                                 new_enabled_disk_templates)):
+      self._CheckVgNameOnNodes(node_list)
+
+  def _CheckVgNameOnNodes(self, node_list):
+    """Check the status of the volume group on each node.
+
+    """
+    vglist = self.rpc.call_vg_list(node_list)
+    for node in node_list:
+      msg = vglist[node].fail_msg
+      if msg:
+        # ignoring down node
+        self.LogWarning("Error while gathering data on node %s"
+                        " (ignoring node): %s", node, msg)
+        continue
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
+                                            self.op.vg_name,
+                                            constants.MIN_VG_SIZE)
+      if vgstatus:
+        raise errors.OpPrereqError("Error on node '%s': %s" %
+                                   (node, vgstatus), errors.ECODE_ENVIRON)
+
+  def _GetEnabledDiskTemplates(self, cluster):
+    """Determines the enabled disk templates and the subset of disk templates
+       that are newly enabled by this operation.
+
+    """
+    enabled_disk_templates = None
+    new_enabled_disk_templates = []
+    if self.op.enabled_disk_templates:
+      enabled_disk_templates = self.op.enabled_disk_templates
+      new_enabled_disk_templates = \
+        list(set(enabled_disk_templates)
+             - set(cluster.enabled_disk_templates))
+    else:
+      enabled_disk_templates = cluster.enabled_disk_templates
+    return (enabled_disk_templates, new_enabled_disk_templates)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
     if self.op.drbd_helper is not None and not self.op.drbd_helper:
       if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
         raise errors.OpPrereqError("Cannot disable drbd helper while"
@@ -4187,23 +4279,13 @@ class LUClusterSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
     node_list = self.owned_locks(locking.LEVEL_NODE)
+    self.cluster = cluster = self.cfg.GetClusterInfo()
 
-    # if vg_name not None, checks given volume group on all nodes
-    if self.op.vg_name:
-      vglist = self.rpc.call_vg_list(node_list)
-      for node in node_list:
-        msg = vglist[node].fail_msg
-        if msg:
-          # ignoring down node
-          self.LogWarning("Error while gathering data on node %s"
-                          " (ignoring node): %s", node, msg)
-          continue
-        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
-                                              self.op.vg_name,
-                                              constants.MIN_VG_SIZE)
-        if vgstatus:
-          raise errors.OpPrereqError("Error on node '%s': %s" %
-                                     (node, vgstatus), errors.ECODE_ENVIRON)
+    (enabled_disk_templates, new_enabled_disk_templates) = \
+      self._GetEnabledDiskTemplates(cluster)
+
+    self._CheckVgName(node_list, enabled_disk_templates,
+                      new_enabled_disk_templates)
 
     if self.op.drbd_helper:
       # checks given drbd helper on all nodes
@@ -4222,7 +4304,6 @@ class LUClusterSetParams(LogicalUnit):
           raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
                                      (node, node_helper), errors.ECODE_ENVIRON)
 
-    self.cluster = cluster = self.cfg.GetClusterInfo()
     # validate params changes
     if self.op.beparams:
       objects.UpgradeBeParams(self.op.beparams)
@@ -4266,7 +4347,7 @@ class LUClusterSetParams(LogicalUnit):
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances)
+                                            new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -4383,6 +4464,8 @@ class LUClusterSetParams(LogicalUnit):
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
 
+    self._CheckDiskTemplateConsistency()
+
     if self.op.os_hvp:
       # no need to check any newly-enabled hypervisors, since the
       # defaults have already been checked in the above code-block
@@ -4405,20 +4488,64 @@ class LUClusterSetParams(LogicalUnit):
                                    " specified" % self.op.default_iallocator,
                                    errors.ECODE_INVAL)
 
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
+  def _CheckDiskTemplateConsistency(self):
+    """Check whether the disk templates that are going to be disabled
+       are still in use by some instances.
+
+    """
+    if self.op.enabled_disk_templates:
+      cluster = self.cfg.GetClusterInfo()
+      instances = self.cfg.GetAllInstancesInfo()
+
+      disk_templates_to_remove = set(cluster.enabled_disk_templates) \
+        - set(self.op.enabled_disk_templates)
+      for instance in instances.itervalues():
+        if instance.disk_template in disk_templates_to_remove:
+          raise errors.OpPrereqError("Cannot disable disk template '%s',"
+                                     " because instance '%s' is using it." %
+                                     (instance.disk_template, instance.name))
+
+
+  def _SetVgName(self, feedback_fn):
+    """Determines and sets the new volume group name.
 
     """
     if self.op.vg_name is not None:
+      if self.op.vg_name and not \
+           utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+        feedback_fn("Note that you specified a volume group, but did not"
+                    " enable any lvm disk template.")
       new_volume = self.op.vg_name
       if not new_volume:
+        if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+          raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                     " disk templates are enabled.")
         new_volume = None
       if new_volume != self.cfg.GetVGName():
         self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
+    else:
+      if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
+          not self.cfg.GetVGName():
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.")
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(set(self.op.enabled_disk_templates))
+
+    self._SetVgName(feedback_fn)
+
     if self.op.drbd_helper is not None:
+      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
+        feedback_fn("Note that you specified a drbd user helper, but did"
+                    " enabled the drbd disk template.")
       new_helper = self.op.drbd_helper
       if not new_helper:
         new_helper = None
@@ -7390,8 +7517,7 @@ class LUInstanceReboot(LogicalUnit):
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
                                              reboot_type,
-                                             self.op.shutdown_timeout,
-                                             reason)
+                                             self.op.shutdown_timeout, reason)
       result.Raise("Could not reboot instance")
     else:
       if instance_running:
@@ -7599,6 +7725,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     constants.IDISK_VG,
     constants.IDISK_METAVG,
     constants.IDISK_PROVIDER,
+    constants.IDISK_NAME,
     ]))
 
   def _RunAllocator(self):
@@ -8139,8 +8266,8 @@ def _ExpandNamesForMigration(lu):
   lu.needed_locks[locking.LEVEL_NODE_RES] = []
   lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
 
-  # The node allocation lock is actually only needed for replicated instances
-  # (e.g. DRBD8) and if an iallocator is used.
+  # The node allocation lock is actually only needed for externally replicated
+  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
   lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
 
 
@@ -8365,6 +8492,10 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    if instance.disk_template not in constants.DTS_COPYABLE:
+      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
+                                 instance.disk_template, errors.ECODE_STATE)
+
     node = self.cfg.GetNodeInfo(self.op.target_node)
     assert node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
@@ -8389,7 +8520,7 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
                             ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
@@ -8440,12 +8571,9 @@ class LUInstanceMove(LogicalUnit):
     try:
       _CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
-      self.LogWarning("Device creation failed, reverting...")
-      try:
-        _RemoveDisks(self, instance, target_node=target_node)
-      finally:
-        self.cfg.ReleaseDRBDMinors(instance.name)
-        raise
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(instance.name)
+      raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
 
@@ -8648,11 +8776,10 @@ class TLMigrateInstance(Tasklet):
                                  errors.ECODE_STATE)
 
     if instance.disk_template in constants.DTS_EXT_MIRROR:
-      assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
       _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
+        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
         # We set set self.target_node as it is required by
@@ -8664,7 +8791,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
@@ -8708,7 +8835,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
@@ -9392,16 +9519,19 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgnames[0], names[0]),
                           params={})
+  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   dev_meta = objects.Disk(dev_type=constants.LD_LV,
                           size=constants.DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
                           params={})
+  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
                           logical_id=(primary, secondary, port,
                                       p_minor, s_minor,
                                       shared_secret),
                           children=[dev_data, dev_meta],
                           iv_name=iv_name, params={})
+  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   return drbd_dev
 
 
@@ -9463,6 +9593,7 @@ def _GenerateDiskTemplate(
                                       "disk/%d" % disk_index,
                                       minors[idx * 2], minors[idx * 2 + 1])
       disk_dev.mode = disk[constants.IDISK_MODE]
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
       disks.append(disk_dev)
   else:
     if secondary_nodes:
@@ -9523,11 +9654,14 @@ def _GenerateDiskTemplate(
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
                   (disk_index, utils.FormatUnit(size, "h")))
-      disks.append(objects.Disk(dev_type=dev_type, size=size,
-                                logical_id=logical_id_fn(idx, disk_index, disk),
-                                iv_name="disk/%d" % disk_index,
-                                mode=disk[constants.IDISK_MODE],
-                                params=params))
+      disk_dev = objects.Disk(dev_type=dev_type, size=size,
+                              logical_id=logical_id_fn(idx, disk_index, disk),
+                              iv_name="disk/%d" % disk_index,
+                              mode=disk[constants.IDISK_MODE],
+                              params=params)
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
+      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+      disks.append(disk_dev)
 
   return disks
 
@@ -9679,6 +9813,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     result.Raise("Failed to create directory '%s' on"
                  " node %s" % (file_storage_dir, pnode))
 
+  disks_created = []
   # Note: this needs to be kept in sync with adding of disks in
   # LUInstanceSetParams
   for idx, device in enumerate(instance.disks):
@@ -9688,7 +9823,19 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
-      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+      try:
+        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        disks_created.append((node, device))
+      except errors.OpExecError:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        for (node, disk) in disks_created:
+          lu.cfg.SetDiskID(disk, node)
+          result = lu.rpc.call_blockdev_remove(node, disk)
+          if result.fail_msg:
+            logging.warning("Failed to remove newly-created disk %s on node %s:"
+                            " %s", device, node, result.fail_msg)
+        raise
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -9696,8 +9843,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be removed, the removal will continue with the other ones (compare
-  with `_CreateDisks()`).
+  be removed, the removal will continue with the other ones.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -9968,8 +10114,13 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
     net_uuid = cfg.LookupNetwork(net)
-    nics.append(objects.NIC(mac=mac, ip=nic_ip,
-                            network=net_uuid, nicparams=nicparams))
+    name = nic.get(constants.INIC_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
+    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
+                          network=net_uuid, nicparams=nicparams)
+    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
+    nics.append(nic_obj)
 
   return nics
 
@@ -10006,10 +10157,14 @@ def _ComputeDisks(op, default_vg):
                                  op.disk_template), errors.ECODE_INVAL)
 
     data_vg = disk.get(constants.IDISK_VG, default_vg)
+    name = disk.get(constants.IDISK_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
     new_disk = {
       constants.IDISK_SIZE: size,
       constants.IDISK_MODE: mode,
       constants.IDISK_VG: data_vg,
+      constants.IDISK_NAME: name,
       }
 
     if constants.IDISK_METAVG in disk:
@@ -10091,6 +10246,19 @@ class LUInstanceCreate(LogicalUnit):
     # check nics' parameter names
     for nic in self.op.nics:
       utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
+    # check that NIC's parameters names are unique and valid
+    utils.ValidateDeviceNames("NIC", self.op.nics)
+
+    # check that disk's names are unique and valid
+    utils.ValidateDeviceNames("disk", self.op.disks)
+
+    cluster = self.cfg.GetClusterInfo()
+    if not self.op.disk_template in cluster.enabled_disk_templates:
+      raise errors.OpPrereqError("Cannot create an instance with disk template"
+                                 " '%s', because it is not enabled in the"
+                                 " cluster. Enabled disk templates are: %s." %
+                                 (self.op.disk_template,
+                                  ",".join(cluster.enabled_disk_templates)))
 
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
@@ -10356,8 +10524,8 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=_NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
-             for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
+             d[constants.IDISK_MODE]) for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -10769,25 +10937,6 @@ class LUInstanceCreate(LogicalUnit):
 
     nodenames = [pnode.name] + self.secondaries
 
-    # Verify instance specs
-    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
-    ispec = {
-      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
-      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
-      constants.ISPEC_DISK_COUNT: len(self.disks),
-      constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
-      constants.ISPEC_NIC_COUNT: len(self.nics),
-      constants.ISPEC_SPINDLE_USE: spindle_use,
-      }
-
-    group_info = self.cfg.GetNodeGroup(pnode.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
-    if not self.op.ignore_ipolicy and res:
-      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-             (pnode.group, group_info.name, utils.CommaJoin(res)))
-      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
     if not self.adopt_disks:
       if self.op.disk_template == constants.DT_RBD:
         # _CheckRADOSFreeSpace() is just a placeholder.
@@ -10886,12 +11035,12 @@ class LUInstanceCreate(LogicalUnit):
 
     group_info = self.cfg.GetNodeGroup(pnode.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
+    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
+                                               self.op.disk_template)
     if not self.op.ignore_ipolicy and res:
-      raise errors.OpPrereqError(("Instance allocation to group %s violates"
-                                  " policy: %s") % (pnode.group,
-                                                    utils.CommaJoin(res)),
-                                  errors.ECODE_INVAL)
+      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+             (pnode.group, group_info.name, utils.CommaJoin(res)))
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
@@ -10982,12 +11131,9 @@ class LUInstanceCreate(LogicalUnit):
       try:
         _CreateDisks(self, iobj)
       except errors.OpExecError:
-        self.LogWarning("Device creation failed, reverting...")
-        try:
-          _RemoveDisks(self, iobj)
-        finally:
-          self.cfg.ReleaseDRBDMinors(instance)
-          raise
+        self.LogWarning("Device creation failed")
+        self.cfg.ReleaseDRBDMinors(instance)
+        raise
 
     feedback_fn("adding instance %s to cluster config" % instance)
 
@@ -11738,7 +11884,7 @@ class TLReplaceDisks(Tasklet):
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
       _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              ignore=self.ignore_ipolicy)
+                              self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
@@ -12951,6 +13097,8 @@ class LUInstanceQueryData(NoHooksLU):
       "children": dev_children,
       "mode": dev.mode,
       "size": dev.size,
+      "name": dev.name,
+      "uuid": dev.uuid,
       }
 
   def Exec(self, feedback_fn):
@@ -13046,6 +13194,42 @@ def PrepareContainerMods(mods, private_fn):
   return [(op, idx, params, fn()) for (op, idx, params) in mods]
 
 
+def GetItemFromContainer(identifier, kind, container):
+  """Return the item refered by the identifier.
+
+  @type identifier: string
+  @param identifier: Item index or name or UUID
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to get the item from
+
+  """
+  # Index
+  try:
+    idx = int(identifier)
+    if idx == -1:
+      # Append
+      absidx = len(container) - 1
+    elif idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                       (kind, idx, len(container)))
+    else:
+      absidx = idx
+    return (absidx, container[idx])
+  except ValueError:
+    pass
+
+  for idx, item in enumerate(container):
+    if item.uuid == identifier or item.name == identifier:
+      return (idx, item)
+
+  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
+                             (kind, identifier), errors.ECODE_NOENT)
+
+
 #: Type description for changes as returned by L{ApplyContainerMods}'s
 #: callbacks
 _TApplyContModsCbChanges = \
@@ -13082,25 +13266,26 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
     item and private data object as added by L{PrepareContainerMods}
 
   """
-  for (op, idx, params, private) in mods:
-    if idx == -1:
-      # Append
-      absidx = len(container) - 1
-    elif idx < 0:
-      raise IndexError("Not accepting negative indices other than -1")
-    elif idx > len(container):
-      raise IndexError("Got %s index %s, but there are only %s" %
-                       (kind, idx, len(container)))
-    else:
-      absidx = idx
-
+  for (op, identifier, params, private) in mods:
     changes = None
 
     if op == constants.DDM_ADD:
       # Calculate where item will be added
+      # When adding an item, identifier can only be an index
+      try:
+        idx = int(identifier)
+      except ValueError:
+        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
+                                   " identifier for %s" % constants.DDM_ADD,
+                                   errors.ECODE_INVAL)
       if idx == -1:
         addidx = len(container)
       else:
+        if idx < 0:
+          raise IndexError("Not accepting negative indices other than -1")
+        elif idx > len(container):
+          raise IndexError("Got %s index %s, but there are only %s" %
+                           (kind, idx, len(container)))
         addidx = idx
 
       if create_fn is None:
@@ -13117,10 +13302,7 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
         container.insert(idx, item)
     else:
       # Retrieve existing item
-      try:
-        item = container[absidx]
-      except IndexError:
-        raise IndexError("Invalid %s index %s" % (kind, idx))
+      (absidx, item) = GetItemFromContainer(identifier, kind, container)
 
       if op == constants.DDM_REMOVE:
         assert not params
@@ -13246,19 +13428,21 @@ class LUInstanceSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
       params[constants.IDISK_SIZE] = size
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
 
     elif op == constants.DDM_MODIFY:
       if constants.IDISK_SIZE in params:
         raise errors.OpPrereqError("Disk size change not possible, use"
                                    " grow-disk", errors.ECODE_INVAL)
-      if constants.IDISK_MODE not in params:
-        raise errors.OpPrereqError("Disk 'mode' is the only kind of"
-                                   " modification supported, but missing",
-                                   errors.ECODE_NOENT)
-      if len(params) > 1:
+      if len(params) > 2:
         raise errors.OpPrereqError("Disk modification doesn't support"
                                    " additional arbitrary parameters",
                                    errors.ECODE_INVAL)
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -13267,9 +13451,12 @@ class LUInstanceSetParams(LogicalUnit):
     """
     if op in (constants.DDM_ADD, constants.DDM_MODIFY):
       ip = params.get(constants.INIC_IP, None)
+      name = params.get(constants.INIC_NAME, None)
       req_net = params.get(constants.INIC_NETWORK, None)
       link = params.get(constants.NIC_LINK, None)
       mode = params.get(constants.NIC_MODE, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.INIC_NAME] = None
       if req_net is not None:
         if req_net.lower() == constants.VALUE_NONE:
           params[constants.INIC_NETWORK] = None
@@ -13311,7 +13498,8 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem):
+            self.op.offline is not None or self.op.runtime_mem or
+            self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
@@ -13339,6 +13527,9 @@ class LUInstanceSetParams(LogicalUnit):
     self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
                     self._VerifyNicModification)
 
+    if self.op.pnode:
+      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODEGROUP] = []
@@ -13497,23 +13688,28 @@ class LUInstanceSetParams(LogicalUnit):
         params[constants.INIC_MAC] = \
           self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
 
-    #if there is a change in nic's ip/network configuration
+    # if there is a change in (ip, network) tuple
     new_ip = params.get(constants.INIC_IP, old_ip)
     if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
       if new_ip:
+        # if IP is pool then require a network and generate one IP
         if new_ip.lower() == constants.NIC_IP_POOL:
-          if not new_net_uuid:
+          if new_net_uuid:
+            try:
+              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("Unable to get a free IP"
+                                         " from the address pool",
+                                         errors.ECODE_STATE)
+            self.LogInfo("Chose IP %s from network %s",
+                         new_ip,
+                         new_net_obj.name)
+            params[constants.INIC_IP] = new_ip
+          else:
             raise errors.OpPrereqError("ip=pool, but no network found",
                                        errors.ECODE_INVAL)
-          try:
-            new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
-          except errors.ReservationError:
-            raise errors.OpPrereqError("Unable to get a free IP"
-                                       " from the address pool",
-                                       errors.ECODE_STATE)
-          self.LogInfo("Chose IP %s from network %s", new_ip, new_net_obj.name)
-          params[constants.INIC_IP] = new_ip
-        elif new_ip != old_ip or new_net_uuid != old_net_uuid:
+        # Reserve new IP if in the new network if any
+        elif new_net_uuid:
           try:
             self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
             self.LogInfo("Reserving IP %s in network %s",
@@ -13522,19 +13718,19 @@ class LUInstanceSetParams(LogicalUnit):
             raise errors.OpPrereqError("IP %s not available in network %s" %
                                        (new_ip, new_net_obj.name),
                                        errors.ECODE_NOTUNIQUE)
-
-        # new net is None
-        elif not new_net_uuid and self.op.conflicts_check:
+        # new network is None so check if new IP is a conflicting IP
+        elif self.op.conflicts_check:
           _CheckForConflictingIp(self, new_ip, pnode)
 
-      if old_ip:
+      # release old IP if old network is not None
+      if old_ip and old_net_uuid:
         try:
           self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
         except errors.AddressPoolError:
           logging.warning("Release IP %s not contained in network %s",
                           old_ip, old_net_obj.name)
 
-    # there are no changes in (net, ip) tuple
+    # there are no changes in (ip, network) tuple and old network is not None
     elif (old_net_uuid is not None and
           (req_link is not None or req_mode is not None)):
       raise errors.OpPrereqError("Not allowed to change link or mode of"
@@ -13581,7 +13777,7 @@ class LUInstanceSetParams(LogicalUnit):
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
                               ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
@@ -13616,6 +13812,21 @@ class LUInstanceSetParams(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     pnode = instance.primary_node
+
+    self.warn = []
+
+    if (self.op.pnode is not None and self.op.pnode != pnode and
+        not self.op.force):
+      # verify that the instance is not up
+      instance_info = self.rpc.call_instance_info(pnode, instance.name,
+                                                  instance.hypervisor)
+      if instance_info.fail_msg:
+        self.warn.append("Can't get instance runtime information: %s" %
+                         instance_info.fail_msg)
+      elif instance_info.payload:
+        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
+                                   errors.ECODE_STATE)
+
     assert pnode in self.owned_locks(locking.LEVEL_NODE)
     nodelist = list(instance.all_nodes)
     pnode_info = self.cfg.GetNodeInfo(pnode)
@@ -13748,8 +13959,6 @@ class LUInstanceSetParams(LogicalUnit):
     else:
       self.os_inst = {}
 
-    self.warn = []
-
     #TODO(dynmem): do the appropriate check involving MINMEM
     if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
         be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
@@ -13867,9 +14076,14 @@ class LUInstanceSetParams(LogicalUnit):
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
+    def _PrepareDiskMod(_, disk, params, __):
+      disk.name = params.get(constants.IDISK_NAME, None)
+
     # Verify disk changes (operating on a copy)
-    disks = instance.disks[:]
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    disks = copy.deepcopy(instance.disks)
+    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
+                       None)
+    utils.ValidateDeviceNames("disk", disks)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
@@ -13891,6 +14105,8 @@ class LUInstanceSetParams(LogicalUnit):
       nics = [nic.Copy() for nic in instance.nics]
       ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
                          self._CreateNewNic, self._ApplyNicMods, None)
+      # Verify that NIC names are unique and valid
+      utils.ValidateDeviceNames("NIC", nics)
       self._new_nics = nics
       ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
     else:
@@ -13908,14 +14124,20 @@ class LUInstanceSetParams(LogicalUnit):
                                                          None)
 
       # Copy ispec to verify parameters with min/max values separately
+      if self.op.disk_template:
+        new_disk_template = self.op.disk_template
+      else:
+        new_disk_template = instance.disk_template
       ispec_max = ispec.copy()
       ispec_max[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MAXMEM, None)
-      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
+      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
+                                                     new_disk_template)
       ispec_min = ispec.copy()
       ispec_min[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MINMEM, None)
-      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
+      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
+                                                     new_disk_template)
 
       if (res_max or res_min):
         # FIXME: Improve error message by including information about whether
@@ -13938,7 +14160,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # create a fake disk info for _GenerateDiskTemplate
     disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
-                  constants.IDISK_VG: d.logical_id[0]}
+                  constants.IDISK_VG: d.logical_id[0],
+                  constants.IDISK_NAME: d.name}
                  for d in instance.disks]
     new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
                                       instance.name, pnode, [snode],
@@ -14007,10 +14230,11 @@ class LUInstanceSetParams(LogicalUnit):
     old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
     new_disks = [d.children[0] for d in instance.disks]
 
-    # copy over size and mode
+    # copy over size, mode and name
     for parent, child in zip(old_disks, new_disks):
       child.size = parent.size
       child.mode = parent.mode
+      child.name = parent.name
 
     # this is a DRBD disk, return its port to the pool
     # NOTE: this must be done right before the call to cfg.Update!
@@ -14021,6 +14245,7 @@ class LUInstanceSetParams(LogicalUnit):
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
+    _UpdateIvNames(0, instance.disks)
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
@@ -14090,11 +14315,17 @@ class LUInstanceSetParams(LogicalUnit):
     """Modifies a disk.
 
     """
-    disk.mode = params[constants.IDISK_MODE]
+    changes = []
+    mode = params.get(constants.IDISK_MODE, None)
+    if mode:
+      disk.mode = mode
+      changes.append(("disk.mode/%d" % idx, disk.mode))
 
-    return [
-      ("disk.mode/%d" % idx, disk.mode),
-      ]
+    name = params.get(constants.IDISK_NAME, None)
+    disk.name = name
+    changes.append(("disk.name/%d" % idx, disk.name))
+
+    return changes
 
   def _RemoveDisk(self, idx, root, _):
     """Removes a disk.
@@ -14112,18 +14343,22 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
     mac = params[constants.INIC_MAC]
     ip = params.get(constants.INIC_IP, None)
     net = params.get(constants.INIC_NETWORK, None)
+    name = params.get(constants.INIC_NAME, None)
+    net_uuid = self.cfg.LookupNetwork(net)
     #TODO: not private.filled?? can a nic have no nicparams??
     nicparams = private.filled
+    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
+                       nicparams=nicparams)
+    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 
-    return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
+    return (nobj, [
       ("nic.%d" % idx,
        "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
@@ -14131,18 +14366,23 @@ class LUInstanceSetParams(LogicalUnit):
        net)),
       ])
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
     changes = []
 
-    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
+    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
       if key in params:
         changes.append(("nic.%s/%d" % (key, idx), params[key]))
         setattr(nic, key, params[key])
 
+    new_net = params.get(constants.INIC_NETWORK, nic.network)
+    new_net_uuid = self.cfg.LookupNetwork(new_net)
+    if new_net_uuid != nic.network:
+      changes.append(("nic.network/%d" % idx, new_net))
+      nic.network = new_net_uuid
+
     if private.filled:
       nic.nicparams = private.filled
 
@@ -14170,6 +14410,10 @@ class LUInstanceSetParams(LogicalUnit):
     result = []
     instance = self.instance
 
+    # New primary node
+    if self.op.pnode:
+      instance.primary_node = self.op.pnode
+
     # runtime memory
     if self.op.runtime_mem:
       rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
@@ -15346,7 +15590,7 @@ class LUGroupSetParams(LogicalUnit):
       violations = \
           _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                   self.group),
-                                        new_ipolicy, instances)
+                                        new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
@@ -16166,7 +16410,8 @@ class LUTestAllocator(NoHooksLU):
                                           nics=self.op.nics,
                                           vcpus=self.op.vcpus,
                                           spindle_use=self.op.spindle_use,
-                                          hypervisor=self.op.hypervisor)
+                                          hypervisor=self.op.hypervisor,
+                                          node_whitelist=None)
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       req = iallocator.IAReqRelocate(name=self.op.name,
                                      relocate_from=list(self.relocate_from))
@@ -16702,6 +16947,11 @@ class LUNetworkConnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    if self.op.conflicts_check:
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
       constants.NIC_LINK: self.network_link,
@@ -16716,23 +16966,22 @@ class LUNetworkConnect(LogicalUnit):
       self.LogWarning("Network '%s' is already mapped to group '%s'" %
                       (self.network_name, self.group.name))
       self.connected = True
-      return
 
-    if self.op.conflicts_check:
+    # check only if not already connected
+    elif self.op.conflicts_check:
       pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
 
       _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
-                            "connect to")
+                            "connect to", owned_instances)
 
   def Exec(self, feedback_fn):
-    if self.connected:
-      return
-
-    self.group.networks[self.network_uuid] = self.netparams
-    self.cfg.Update(self.group, feedback_fn)
+    # Connect the network and update the group only if not already connected
+    if not self.connected:
+      self.group.networks[self.network_uuid] = self.netparams
+      self.cfg.Update(self.group, feedback_fn)
 
 
-def _NetworkConflictCheck(lu, check_fn, action):
+def _NetworkConflictCheck(lu, check_fn, action, instances):
   """Checks for network interface conflicts with a network.
 
   @type lu: L{LogicalUnit}
@@ -16744,13 +16993,9 @@ def _NetworkConflictCheck(lu, check_fn, action):
   @raise errors.OpPrereqError: If conflicting IP addresses are found.
 
   """
-  # Check if locked instances are still correct
-  owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
-  _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
-
   conflicts = []
 
-  for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
+  for (_, instance) in lu.cfg.GetMultiInstanceInfo(instances):
     instconflicts = [(idx, nic.ip)
                      for (idx, nic) in enumerate(instance.nics)
                      if check_fn(nic)]
@@ -16824,23 +17069,27 @@ class LUNetworkDisconnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
     if self.network_uuid not in self.group.networks:
       self.LogWarning("Network '%s' is not mapped to group '%s'",
                       self.network_name, self.group.name)
       self.connected = False
-      return
 
-    _NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
-                          "disconnect from")
+    # We need this check only if network is not already connected
+    else:
+      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
+                            "disconnect from", owned_instances)
 
   def Exec(self, feedback_fn):
-    if not self.connected:
-      return
-
-    del self.group.networks[self.network_uuid]
-    self.cfg.Update(self.group, feedback_fn)
+    # Disconnect the network and update the group only if network is connected
+    if self.connected:
+      del self.group.networks[self.network_uuid]
+      self.cfg.Update(self.group, feedback_fn)
 
 
 #: Query type implementations