Merge remote-tracking branch 'origin/stable-2.8'
[ganeti-local] / lib / cmdlib.py
index 7722680..82f7b2e 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -814,45 +814,51 @@ def _GetUpdatedParams(old_params, update_dict,
 
 
 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
-  """Return the new version of a instance policy.
+  """Return the new version of an instance policy.
 
   @param group_policy: whether this policy applies to a group and thus
     we should support removal of policy entries
 
   """
-  use_none = use_default = group_policy
   ipolicy = copy.deepcopy(old_ipolicy)
   for key, value in new_ipolicy.items():
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key in constants.IPOLICY_ISPECS:
-      utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
-      ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
-                                       use_none=use_none,
-                                       use_default=use_default)
+    if (not value or value == [constants.VALUE_DEFAULT] or
+        value == constants.VALUE_DEFAULT):
+      if group_policy:
+        if key in ipolicy:
+          del ipolicy[key]
+      else:
+        raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
+                                   " on the cluster'" % key,
+                                   errors.ECODE_INVAL)
     else:
-      if (not value or value == [constants.VALUE_DEFAULT] or
-          value == constants.VALUE_DEFAULT):
+      if key in constants.IPOLICY_PARAMETERS:
+        # FIXME: we assume all such values are float
+        try:
+          ipolicy[key] = float(value)
+        except (TypeError, ValueError), err:
+          raise errors.OpPrereqError("Invalid value for attribute"
+                                     " '%s': '%s', error: %s" %
+                                     (key, value, err), errors.ECODE_INVAL)
+      elif key == constants.ISPECS_MINMAX:
+        for minmax in value:
+          for k in minmax.keys():
+            utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
+        ipolicy[key] = value
+      elif key == constants.ISPECS_STD:
         if group_policy:
-          del ipolicy[key]
-        else:
-          raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
-                                     " on the cluster'" % key,
-                                     errors.ECODE_INVAL)
+          msg = "%s cannot appear in group instance specs" % key
+          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+        ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
+                                         use_none=False, use_default=False)
+        utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
       else:
-        if key in constants.IPOLICY_PARAMETERS:
-          # FIXME: we assume all such values are float
-          try:
-            ipolicy[key] = float(value)
-          except (TypeError, ValueError), err:
-            raise errors.OpPrereqError("Invalid value for attribute"
-                                       " '%s': '%s', error: %s" %
-                                       (key, value, err), errors.ECODE_INVAL)
-        else:
-          # FIXME: we assume all others are lists; this should be redone
-          # in a nicer way
-          ipolicy[key] = list(value)
+        # FIXME: we assume all others are lists; this should be redone
+        # in a nicer way
+        ipolicy[key] = list(value)
   try:
     objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
   except errors.ConfigurationError, err:
@@ -1020,18 +1026,32 @@ def _CheckOutputFields(static, dynamic, selected):
                                % ",".join(delta), errors.ECODE_INVAL)
 
 
-def _CheckGlobalHvParams(params):
-  """Validates that given hypervisor params are not global ones.
+def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
+  """Make sure that none of the given paramters is global.
+
+  If a global parameter is found, an L{errors.OpPrereqError} exception is
+  raised. This is used to avoid setting global parameters for individual nodes.
 
-  This will ensure that instances don't get customised versions of
-  global params.
+  @type params: dictionary
+  @param params: Parameters to check
+  @type glob_pars: dictionary
+  @param glob_pars: Forbidden parameters
+  @type kind: string
+  @param kind: Kind of parameters (e.g. "node")
+  @type bad_levels: string
+  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
+      "instance")
+  @type good_levels: strings
+  @param good_levels: Level(s) at which the parameters are allowed (e.g.
+      "cluster or group")
 
   """
-  used_globals = constants.HVC_GLOBALS.intersection(params)
+  used_globals = glob_pars.intersection(params)
   if used_globals:
-    msg = ("The following hypervisor parameters are global and cannot"
-           " be customized at instance level, please modify them at"
-           " cluster level: %s" % utils.CommaJoin(used_globals))
+    msg = ("The following %s parameters are global and cannot"
+           " be customized at %s level, please modify them at"
+           " %s level: %s" %
+           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
     raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
@@ -1189,22 +1209,21 @@ def _CheckInstanceState(lu, instance, req_states, msg=None):
                      " is down")
 
 
-def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
+def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
   """Computes if value is in the desired range.
 
   @param name: name of the parameter for which we perform the check
   @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
       not just 'disk')
-  @param ipolicy: dictionary containing min, max and std values
+  @param ispecs: dictionary containing min and max values
   @param value: actual value that we want to use
-  @return: None or element not meeting the criteria
-
+  @return: None or an error string
 
   """
   if value in [None, constants.VALUE_AUTO]:
     return None
-  max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
-  min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
+  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
+  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
   if value > max_v or min_v > value:
     if qualifier:
       fqn = "%s/%s" % (name, qualifier)
@@ -1217,6 +1236,7 @@ def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
 
 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                  nic_count, disk_sizes, spindle_use,
+                                 disk_template,
                                  _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
@@ -1234,6 +1254,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
+  @type disk_template: string
+  @param disk_template: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
@@ -1243,18 +1265,31 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
     (constants.ISPEC_CPU_COUNT, "", cpu_count),
-    (constants.ISPEC_DISK_COUNT, "", disk_count),
     (constants.ISPEC_NIC_COUNT, "", nic_count),
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
-
-  return filter(None,
-                (_compute_fn(name, qualifier, ipolicy, value)
-                 for (name, qualifier, value) in test_settings))
-
-
-def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+  if disk_template != constants.DT_DISKLESS:
+    # This check doesn't make sense for diskless instances
+    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
+  ret = []
+  allowed_dts = ipolicy[constants.IPOLICY_DTS]
+  if disk_template not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
+               (disk_template, utils.CommaJoin(allowed_dts)))
+
+  min_errs = None
+  for minmax in ipolicy[constants.ISPECS_MINMAX]:
+    errs = filter(None,
+                  (_compute_fn(name, qualifier, minmax, value)
+                   for (name, qualifier, value) in test_settings))
+    if min_errs is None or len(errs) < len(min_errs):
+      min_errs = errs
+  assert min_errs is not None
+  return ret + min_errs
+
+
+def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
                                      _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
@@ -1262,29 +1297,36 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   @param ipolicy: The ipolicy to verify against
   @type instance: L{objects.Instance}
   @param instance: The instance to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
-  mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
-  cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
-  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
+  be_full = cfg.GetClusterInfo().FillBE(instance)
+  mem_size = be_full[constants.BE_MAXMEM]
+  cpu_count = be_full[constants.BE_VCPUS]
+  spindle_use = be_full[constants.BE_SPINDLE_USE]
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
+  disk_template = instance.disk_template
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
+  ipolicy, instance_spec, disk_template,
+  _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
   @param ipolicy: The ipolicy to verify against
   @param instance_spec: dict
   @param instance_spec: The instance spec to verify
+  @type disk_template: string
+  @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1297,11 +1339,11 @@ def _ComputeIPolicyInstanceSpecViolation(
   spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
-                                 target_group,
+                                 target_group, cfg,
                                  _compute_fn=_ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
@@ -1309,6 +1351,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @param instance: The instance object to verify
   @param current_group: The current group of the instance
   @param target_group: The new group of the instance
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1316,23 +1360,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   if current_group == target_group:
     return []
   else:
-    return _compute_fn(ipolicy, instance)
+    return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
                             _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
   @param instance: The instance object to verify
   @param node: The new node to relocate
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
-  res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
 
   if res:
     msg = ("Instance does not meet target node group's (%s) instance"
@@ -1343,18 +1389,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
   @param new_ipolicy: The new (to become) ipolicy
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A list of instances which violates the new ipolicy but
       did not before
 
   """
-  return (_ComputeViolatingInstances(new_ipolicy, instances) -
-          _ComputeViolatingInstances(old_ipolicy, instances))
+  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
+          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
 def _ExpandItemName(fn, name, kind):
@@ -1385,7 +1433,7 @@ def _ExpandInstanceName(cfg, name):
 
 
 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
-                         network_type, mac_prefix, tags):
+                         mac_prefix, tags):
   """Builds network related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -1400,8 +1448,6 @@ def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
   @param network6: the ipv6 subnet
   @type gateway6: string
   @param gateway6: the ipv6 gateway
-  @type network_type: string
-  @param network_type: the type of the network
   @type mac_prefix: string
   @param mac_prefix: the mac_prefix
   @type tags: list
@@ -1421,8 +1467,6 @@ def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
     env["NETWORK_GATEWAY6"] = gateway6
   if mac_prefix:
     env["NETWORK_MAC_PREFIX"] = mac_prefix
-  if network_type:
-    env["NETWORK_TYPE"] = network_type
   if tags:
     env["NETWORK_TAGS"] = " ".join(tags)
 
@@ -1453,12 +1497,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type vcpus: string
   @param vcpus: the count of VCPUs the instance has
   @type nics: list
-  @param nics: list of tuples (ip, mac, mode, link, network) representing
-      the NICs the instance has
+  @param nics: list of tuples (name, uuid, ip, mac, mode, link, net, netinfo)
+      representing the NICs the instance has
   @type disk_template: string
   @param disk_template: the disk template of the instance
   @type disks: list
-  @param disks: the list of (size, mode) pairs
+  @param disks: list of tuples (name, uuid, size, mode)
   @type bep: dict
   @param bep: the backend parameters for the instance
   @type hvp: dict
@@ -1480,7 +1524,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
     "INSTANCE_MAXMEM": maxmem,
-    # TODO(2.7) remove deprecated "memory" value
+    # TODO(2.9) remove deprecated "memory" value
     "INSTANCE_MEMORY": maxmem,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
@@ -1488,31 +1532,22 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   }
   if nics:
     nic_count = len(nics)
-    for idx, (ip, mac, mode, link, net, netinfo) in enumerate(nics):
+    for idx, (name, _, ip, mac, mode, link, net, netinfo) in enumerate(nics):
       if ip is None:
         ip = ""
+      env["INSTANCE_NIC%d_NAME" % idx] = name
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_MAC" % idx] = mac
       env["INSTANCE_NIC%d_MODE" % idx] = mode
       env["INSTANCE_NIC%d_LINK" % idx] = link
-      if network:
-        env["INSTANCE_NIC%d_NETWORK" % idx] = net
-        if netinfo:
-          nobj = objects.Network.FromDict(netinfo)
-          if nobj.network:
-            env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
-          if nobj.gateway:
-            env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
-          if nobj.network6:
-            env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
-          if nobj.gateway6:
-            env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
-          if nobj.mac_prefix:
-            env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
-          if nobj.network_type:
-            env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
-          if nobj.tags:
-            env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
+      if netinfo:
+        nobj = objects.Network.FromDict(netinfo)
+        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
+      elif network:
+        # FIXME: broken network reference: the instance NIC specifies a
+        # network, but the relevant network entry was not in the config. This
+        # should be made impossible.
+        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
       if mode == constants.NIC_MODE_BRIDGED:
         env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
@@ -1522,7 +1557,8 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   if disks:
     disk_count = len(disks)
-    for idx, (size, mode) in enumerate(disks):
+    for idx, (name, size, mode) in enumerate(disks):
+      env["INSTANCE_DISK%d_NAME" % idx] = name
       env["INSTANCE_DISK%d_SIZE" % idx] = size
       env["INSTANCE_DISK%d_MODE" % idx] = mode
   else:
@@ -1551,20 +1587,15 @@ def _NICToTuple(lu, nic):
   @param nic: nic to convert to hooks tuple
 
   """
-  ip = nic.ip
-  mac = nic.mac
   cluster = lu.cfg.GetClusterInfo()
   filled_params = cluster.SimpleFillNIC(nic.nicparams)
   mode = filled_params[constants.NIC_MODE]
   link = filled_params[constants.NIC_LINK]
-  net = nic.network
   netinfo = None
-  if net:
-    net_uuid = lu.cfg.LookupNetwork(net)
-    if net_uuid:
-      nobj = lu.cfg.GetNetwork(net_uuid)
-      netinfo = objects.Network.ToDict(nobj)
-  return (ip, mac, mode, link, net, netinfo)
+  if nic.network:
+    nobj = lu.cfg.GetNetwork(nic.network)
+    netinfo = objects.Network.ToDict(nobj)
+  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
 
 
 def _NICListToTuple(lu, nics):
@@ -1614,7 +1645,8 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     "vcpus": bep[constants.BE_VCPUS],
     "nics": _NICListToTuple(lu, instance.nics),
     "disk_template": instance.disk_template,
-    "disks": [(disk.size, disk.mode) for disk in instance.disks],
+    "disks": [(disk.name, disk.size, disk.mode)
+              for disk in instance.disks],
     "bep": bep,
     "hvp": hvp,
     "hypervisor_name": instance.hypervisor,
@@ -1652,17 +1684,19 @@ def _DecideSelfPromotion(lu, exceptions=None):
   return mc_now < mc_should
 
 
-def _ComputeViolatingInstances(ipolicy, instances):
+def _ComputeViolatingInstances(ipolicy, instances, cfg):
   """Computes a set of instances who violates given ipolicy.
 
   @param ipolicy: The ipolicy to verify
-  @type instances: object.Instance
+  @type instances: L{objects.Instance}
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A frozenset of instance names violating the ipolicy
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
@@ -2027,6 +2061,10 @@ class _VerifyErrors(object):
     """
     ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
     itype, etxt, _ = ecode
+    # If the error code is in the list of ignored errors, demote the error to a
+    # warning
+    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
+      ltype = self.ETYPE_WARNING
     # first complete the msg
     if args:
       msg = msg % args
@@ -2041,26 +2079,17 @@ class _VerifyErrors(object):
       msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
     # and finally report it via the feedback_fn
     self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
+    # do not mark the operation as failed for WARN cases only
+    if ltype == self.ETYPE_ERROR:
+      self.bad = True
 
-  def _ErrorIf(self, cond, ecode, *args, **kwargs):
+  def _ErrorIf(self, cond, *args, **kwargs):
     """Log an error message if the passed condition is True.
 
     """
-    cond = (bool(cond)
-            or self.op.debug_simulate_errors) # pylint: disable=E1101
-
-    # If the error code is in the list of ignored errors, demote the error to a
-    # warning
-    (_, etxt, _) = ecode
-    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
-      kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
-
-    if cond:
-      self._Error(ecode, *args, **kwargs)
-
-    # do not mark the operation as failed for WARN cases only
-    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
-      self.bad = self.bad or cond
+    if (bool(cond)
+        or self.op.debug_simulate_errors): # pylint: disable=E1101
+      self._Error(*args, **kwargs)
 
 
 class LUClusterVerify(NoHooksLU):
@@ -2636,7 +2665,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config)
+    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -2688,6 +2717,18 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
              utils.CommaJoin(inst_config.secondary_nodes),
              code=self.ETYPE_WARNING)
 
+    if inst_config.disk_template not in constants.DTS_EXCL_STORAGE:
+      # Disk template not compatible with exclusive_storage: no instance
+      # node should have the flag set
+      es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg,
+                                                     inst_config.all_nodes)
+      es_nodes = [n for (n, es) in es_flags.items()
+                  if es]
+      _ErrorIf(es_nodes, constants.CV_EINSTANCEUNSUITABLENODE, instance,
+               "instance has template %s, which is not supported on nodes"
+               " that have exclusive storage set: %s",
+               inst_config.disk_template, utils.CommaJoin(es_nodes))
+
     if inst_config.disk_template in constants.DTS_INT_MIRROR:
       instance_nodes = utils.NiceSort(inst_config.all_nodes)
       instance_groups = {}
@@ -3487,22 +3528,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         nimg.sbp[pnode].append(instance)
 
     es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg, self.my_node_names)
-    es_unset_nodes = []
     # The value of exclusive_storage should be the same across the group, so if
     # it's True for at least a node, we act as if it were set for all the nodes
     self._exclusive_storage = compat.any(es_flags.values())
     if self._exclusive_storage:
       node_verify_param[constants.NV_EXCLUSIVEPVS] = True
-      es_unset_nodes = [n for (n, es) in es_flags.items()
-                        if not es]
-
-    if es_unset_nodes:
-      self._Error(constants.CV_EGROUPMIXEDESFLAG, self.group_info.name,
-                  "The exclusive_storage flag should be uniform in a group,"
-                  " but these nodes have it unset: %s",
-                  utils.CommaJoin(utils.NiceSort(es_unset_nodes)))
-      self.LogWarning("Some checks required by exclusive storage will be"
-                      " performed also on nodes with the flag unset")
 
     # At this point, we have the in-memory data structures complete,
     # except for the runtime information, which we'll gather next
@@ -4177,11 +4207,10 @@ class LUClusterSetParams(LogicalUnit):
     mn = self.cfg.GetMasterNode()
     return ([mn], [mn])
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
+  def _CheckVgName(self, node_list, enabled_disk_templates,
+                   new_enabled_disk_templates):
+    """Check the consistency of the vg name on all nodes and in case it gets
+       unset whether there are instances still using it.
 
     """
     if self.op.vg_name is not None and not self.op.vg_name:
@@ -4189,6 +4218,55 @@ class LUClusterSetParams(LogicalUnit):
         raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
                                    " instances exist", errors.ECODE_INVAL)
 
+    if (self.op.vg_name is not None and
+        utils.IsLvmEnabled(enabled_disk_templates)) or \
+           (self.cfg.GetVGName() is not None and
+            utils.LvmGetsEnabled(enabled_disk_templates,
+                                 new_enabled_disk_templates)):
+      self._CheckVgNameOnNodes(node_list)
+
+  def _CheckVgNameOnNodes(self, node_list):
+    """Check the status of the volume group on each node.
+
+    """
+    vglist = self.rpc.call_vg_list(node_list)
+    for node in node_list:
+      msg = vglist[node].fail_msg
+      if msg:
+        # ignoring down node
+        self.LogWarning("Error while gathering data on node %s"
+                        " (ignoring node): %s", node, msg)
+        continue
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
+                                            self.op.vg_name,
+                                            constants.MIN_VG_SIZE)
+      if vgstatus:
+        raise errors.OpPrereqError("Error on node '%s': %s" %
+                                   (node, vgstatus), errors.ECODE_ENVIRON)
+
+  def _GetEnabledDiskTemplates(self, cluster):
+    """Determines the enabled disk templates and the subset of disk templates
+       that are newly enabled by this operation.
+
+    """
+    enabled_disk_templates = None
+    new_enabled_disk_templates = []
+    if self.op.enabled_disk_templates:
+      enabled_disk_templates = self.op.enabled_disk_templates
+      new_enabled_disk_templates = \
+        list(set(enabled_disk_templates)
+             - set(cluster.enabled_disk_templates))
+    else:
+      enabled_disk_templates = cluster.enabled_disk_templates
+    return (enabled_disk_templates, new_enabled_disk_templates)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
     if self.op.drbd_helper is not None and not self.op.drbd_helper:
       if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
         raise errors.OpPrereqError("Cannot disable drbd helper while"
@@ -4196,11 +4274,16 @@ class LUClusterSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
     node_list = self.owned_locks(locking.LEVEL_NODE)
+    self.cluster = cluster = self.cfg.GetClusterInfo()
+
+    vm_capable_nodes = [node.name
+                        for node in self.cfg.GetAllNodesInfo().values()
+                        if node.name in node_list and node.vm_capable]
 
     # if vg_name not None, checks given volume group on all nodes
     if self.op.vg_name:
-      vglist = self.rpc.call_vg_list(node_list)
-      for node in node_list:
+      vglist = self.rpc.call_vg_list(vm_capable_nodes)
+      for node in vm_capable_nodes:
         msg = vglist[node].fail_msg
         if msg:
           # ignoring down node
@@ -4231,7 +4314,6 @@ class LUClusterSetParams(LogicalUnit):
           raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
                                      (node, node_helper), errors.ECODE_ENVIRON)
 
-    self.cluster = cluster = self.cfg.GetClusterInfo()
     # validate params changes
     if self.op.beparams:
       objects.UpgradeBeParams(self.op.beparams)
@@ -4275,7 +4357,7 @@ class LUClusterSetParams(LogicalUnit):
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances)
+                                            new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -4392,6 +4474,8 @@ class LUClusterSetParams(LogicalUnit):
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
 
+    self._CheckDiskTemplateConsistency()
+
     if self.op.os_hvp:
       # no need to check any newly-enabled hypervisors, since the
       # defaults have already been checked in the above code-block
@@ -4414,20 +4498,63 @@ class LUClusterSetParams(LogicalUnit):
                                    " specified" % self.op.default_iallocator,
                                    errors.ECODE_INVAL)
 
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
+  def _CheckDiskTemplateConsistency(self):
+    """Check whether the disk templates that are going to be disabled
+       are still in use by some instances.
+
+    """
+    if self.op.enabled_disk_templates:
+      cluster = self.cfg.GetClusterInfo()
+      instances = self.cfg.GetAllInstancesInfo()
+
+      disk_templates_to_remove = set(cluster.enabled_disk_templates) \
+        - set(self.op.enabled_disk_templates)
+      for instance in instances.itervalues():
+        if instance.disk_template in disk_templates_to_remove:
+          raise errors.OpPrereqError("Cannot disable disk template '%s',"
+                                     " because instance '%s' is using it." %
+                                     (instance.disk_template, instance.name))
+
+  def _SetVgName(self, feedback_fn):
+    """Determines and sets the new volume group name.
 
     """
     if self.op.vg_name is not None:
+      if self.op.vg_name and not \
+           utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+        feedback_fn("Note that you specified a volume group, but did not"
+                    " enable any lvm disk template.")
       new_volume = self.op.vg_name
       if not new_volume:
+        if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+          raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                     " disk templates are enabled.")
         new_volume = None
       if new_volume != self.cfg.GetVGName():
         self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
+    else:
+      if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
+          not self.cfg.GetVGName():
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.")
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(set(self.op.enabled_disk_templates))
+
+    self._SetVgName(feedback_fn)
+
     if self.op.drbd_helper is not None:
+      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
+        feedback_fn("Note that you specified a drbd user helper, but did"
+                    " enabled the drbd disk template.")
       new_helper = self.op.drbd_helper
       if not new_helper:
         new_helper = None
@@ -5760,6 +5887,7 @@ class _InstanceQuery(_QueryBase):
       lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
       lu.needed_locks[locking.LEVEL_NODEGROUP] = []
       lu.needed_locks[locking.LEVEL_NODE] = []
+      lu.needed_locks[locking.LEVEL_NETWORK] = []
       lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
     self.do_grouplocks = (self.do_locking and
@@ -5779,6 +5907,12 @@ class _InstanceQuery(_QueryBase):
       elif level == locking.LEVEL_NODE:
         lu._LockInstancesNodes() # pylint: disable=W0212
 
+      elif level == locking.LEVEL_NETWORK:
+        lu.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
+
   @staticmethod
   def _CheckGroupLocks(lu):
     owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
@@ -5869,10 +6003,17 @@ class _InstanceQuery(_QueryBase):
       nodes = None
       groups = None
 
+    if query.IQ_NETWORKS in self.requested_data:
+      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
+                                    for i in instance_list))
+      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
+    else:
+      networks = None
+
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
                                    live_data, wrongnode_inst, consinfo,
-                                   nodes, groups)
+                                   nodes, groups, networks)
 
 
 class LUQuery(NoHooksLU):
@@ -6132,6 +6273,8 @@ class LUNodeAdd(LogicalUnit):
 
     if self.op.ndparams:
       utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                            "node", "cluster or group")
 
     if self.op.hv_state:
       self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
@@ -6157,9 +6300,6 @@ class LUNodeAdd(LogicalUnit):
     if vg_name is not None:
       vparams = {constants.NV_PVLIST: [vg_name]}
       excl_stor = _IsExclusiveStorageEnabledNode(cfg, self.new_node)
-      if self.op.ndparams:
-        excl_stor = self.op.ndparams.get(constants.ND_EXCLUSIVE_STORAGE,
-                                         excl_stor)
       cname = self.cfg.GetClusterName()
       result = rpcrunner.call_node_verify_light([node], vparams, cname)[node]
       (errmsgs, _) = _CheckNodePVs(result.payload, excl_stor)
@@ -6557,6 +6697,8 @@ class LUNodeSetParams(LogicalUnit):
     if self.op.ndparams:
       new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
       utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
+      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                            "node", "cluster or group")
       self.new_ndparams = new_ndparams
 
     if self.op.hv_state:
@@ -7289,6 +7431,7 @@ class LUInstanceStartup(LogicalUnit):
     """
     instance = self.instance
     force = self.op.force
+    reason = self.op.reason
 
     if not self.op.no_remember:
       self.cfg.MarkInstanceUp(instance.name)
@@ -7305,7 +7448,7 @@ class LUInstanceStartup(LogicalUnit):
         self.rpc.call_instance_start(node_current,
                                      (instance, self.op.hvparams,
                                       self.op.beparams),
-                                     self.op.startup_paused)
+                                     self.op.startup_paused, reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -7368,6 +7511,7 @@ class LUInstanceReboot(LogicalUnit):
     instance = self.instance
     ignore_secondaries = self.op.ignore_secondaries
     reboot_type = self.op.reboot_type
+    reason = self.op.reason
 
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
@@ -7383,12 +7527,13 @@ class LUInstanceReboot(LogicalUnit):
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
                                              reboot_type,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout, reason)
       result.Raise("Could not reboot instance")
     else:
       if instance_running:
         result = self.rpc.call_instance_shutdown(node_current, instance,
-                                                 self.op.shutdown_timeout)
+                                                 self.op.shutdown_timeout,
+                                                 reason)
         result.Raise("Could not shutdown instance for full reboot")
         _ShutdownInstanceDisks(self, instance)
       else:
@@ -7396,7 +7541,8 @@ class LUInstanceReboot(LogicalUnit):
                      instance.name)
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current,
-                                            (instance, None, None), False)
+                                            (instance, None, None), False,
+                                             reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -7464,6 +7610,7 @@ class LUInstanceShutdown(LogicalUnit):
     instance = self.instance
     node_current = instance.primary_node
     timeout = self.op.timeout
+    reason = self.op.reason
 
     # If the instance is offline we shouldn't mark it as down, as that
     # resets the offline flag.
@@ -7474,7 +7621,8 @@ class LUInstanceShutdown(LogicalUnit):
       assert self.op.ignore_offline_nodes
       self.LogInfo("Primary node offline, marked instance as stopped")
     else:
-      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
+      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
+                                               reason)
       msg = result.fail_msg
       if msg:
         self.LogWarning("Could not shutdown instance: %s", msg)
@@ -7591,6 +7739,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     constants.IDISK_VG,
     constants.IDISK_METAVG,
     constants.IDISK_PROVIDER,
+    constants.IDISK_NAME,
     ]))
 
   def _RunAllocator(self):
@@ -8054,7 +8203,8 @@ class LUInstanceRemove(LogicalUnit):
                  instance.name, instance.primary_node)
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout,
+                                             self.op.reason)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_failures:
@@ -8131,8 +8281,8 @@ def _ExpandNamesForMigration(lu):
   lu.needed_locks[locking.LEVEL_NODE_RES] = []
   lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
 
-  # The node allocation lock is actually only needed for replicated instances
-  # (e.g. DRBD8) and if an iallocator is used.
+  # The node allocation lock is actually only needed for externally replicated
+  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
   lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
 
 
@@ -8294,8 +8444,9 @@ class LUInstanceMigrate(LogicalUnit):
 
     """
     instance = self._migrater.instance
-    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
-    return (nl, nl + [instance.primary_node])
+    snodes = list(instance.secondary_nodes)
+    nl = [self.cfg.GetMasterNode(), instance.primary_node] + snodes
+    return (nl, nl)
 
 
 class LUInstanceMove(LogicalUnit):
@@ -8356,6 +8507,10 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    if instance.disk_template not in constants.DTS_COPYABLE:
+      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
+                                 instance.disk_template, errors.ECODE_STATE)
+
     node = self.cfg.GetNodeInfo(self.op.target_node)
     assert node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
@@ -8380,7 +8535,7 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
                             ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
@@ -8414,7 +8569,8 @@ class LUInstanceMove(LogicalUnit):
             self.owned_locks(locking.LEVEL_NODE_RES))
 
     result = self.rpc.call_instance_shutdown(source_node, instance,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout,
+                                             self.op.reason)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
@@ -8431,12 +8587,9 @@ class LUInstanceMove(LogicalUnit):
     try:
       _CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
-      self.LogWarning("Device creation failed, reverting...")
-      try:
-        _RemoveDisks(self, instance, target_node=target_node)
-      finally:
-        self.cfg.ReleaseDRBDMinors(instance.name)
-        raise
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(instance.name)
+      raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
 
@@ -8488,7 +8641,8 @@ class LUInstanceMove(LogicalUnit):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       result = self.rpc.call_instance_start(target_node,
-                                            (instance, None, None), False)
+                                            (instance, None, None), False,
+                                             self.op.reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -8639,11 +8793,10 @@ class TLMigrateInstance(Tasklet):
                                  errors.ECODE_STATE)
 
     if instance.disk_template in constants.DTS_EXT_MIRROR:
-      assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
       _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
+        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
         # We set set self.target_node as it is required by
@@ -8655,7 +8808,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
@@ -8699,7 +8852,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
@@ -9191,7 +9344,8 @@ class TLMigrateInstance(Tasklet):
                  instance.name, source_node)
 
     result = self.rpc.call_instance_shutdown(source_node, instance,
-                                             self.shutdown_timeout)
+                                             self.shutdown_timeout,
+                                             self.lu.op.reason)
     msg = result.fail_msg
     if msg:
       if self.ignore_consistency or primary_node.offline:
@@ -9228,7 +9382,7 @@ class TLMigrateInstance(Tasklet):
       self.feedback_fn("* starting the instance on the target node %s" %
                        target_node)
       result = self.rpc.call_instance_start(target_node, (instance, None, None),
-                                            False)
+                                            False, self.lu.op.reason)
       msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self.lu, instance)
@@ -9308,20 +9462,34 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   @type excl_stor: boolean
   @param excl_stor: Whether exclusive_storage is active for the node
 
+  @return: list of created devices
   """
-  if device.CreateOnSecondary():
-    force_create = True
+  created_devices = []
+  try:
+    if device.CreateOnSecondary():
+      force_create = True
 
-  if device.children:
-    for child in device.children:
-      _CreateBlockDevInner(lu, node, instance, child, force_create,
-                           info, force_open, excl_stor)
+    if device.children:
+      for child in device.children:
+        devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
+                                    info, force_open, excl_stor)
+        created_devices.extend(devs)
 
-  if not force_create:
-    return
+    if not force_create:
+      return created_devices
+
+    _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                          excl_stor)
+    # The device has been completely created, so there is no point in keeping
+    # its subdevices in the list. We just add the device itself instead.
+    created_devices = [(node, device)]
+    return created_devices
 
-  _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
-                        excl_stor)
+  except errors.DeviceCreationError, e:
+    e.created_devices.extend(created_devices)
+    raise e
+  except errors.OpExecError, e:
+    raise errors.DeviceCreationError(str(e), created_devices)
 
 
 def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
@@ -9383,16 +9551,19 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgnames[0], names[0]),
                           params={})
+  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   dev_meta = objects.Disk(dev_type=constants.LD_LV,
                           size=constants.DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
                           params={})
+  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
                           logical_id=(primary, secondary, port,
                                       p_minor, s_minor,
                                       shared_secret),
                           children=[dev_data, dev_meta],
                           iv_name=iv_name, params={})
+  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   return drbd_dev
 
 
@@ -9454,6 +9625,7 @@ def _GenerateDiskTemplate(
                                       "disk/%d" % disk_index,
                                       minors[idx * 2], minors[idx * 2 + 1])
       disk_dev.mode = disk[constants.IDISK_MODE]
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
       disks.append(disk_dev)
   else:
     if secondary_nodes:
@@ -9514,11 +9686,14 @@ def _GenerateDiskTemplate(
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
                   (disk_index, utils.FormatUnit(size, "h")))
-      disks.append(objects.Disk(dev_type=dev_type, size=size,
-                                logical_id=logical_id_fn(idx, disk_index, disk),
-                                iv_name="disk/%d" % disk_index,
-                                mode=disk[constants.IDISK_MODE],
-                                params=params))
+      disk_dev = objects.Disk(dev_type=dev_type, size=size,
+                              logical_id=logical_id_fn(idx, disk_index, disk),
+                              iv_name="disk/%d" % disk_index,
+                              mode=disk[constants.IDISK_MODE],
+                              params=params)
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
+      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+      disks.append(disk_dev)
 
   return disks
 
@@ -9550,7 +9725,9 @@ def _WipeDisks(lu, instance, disks=None):
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should create
-  @return: the success of the wipe
+  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
+  @param disks: Disk details; tuple contains disk index, disk object and the
+    start offset
 
   """
   node = instance.primary_node
@@ -9668,6 +9845,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     result.Raise("Failed to create directory '%s' on"
                  " node %s" % (file_storage_dir, pnode))
 
+  disks_created = []
   # Note: this needs to be kept in sync with adding of disks in
   # LUInstanceSetParams
   for idx, device in enumerate(instance.disks):
@@ -9677,7 +9855,23 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
-      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+      try:
+        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        disks_created.append((node, device))
+      except errors.OpExecError:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+      except errors.DeviceCreationError, e:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        disks_created.extend(e.created_devices)
+        for (node, disk) in disks_created:
+          lu.cfg.SetDiskID(disk, node)
+          result = lu.rpc.call_blockdev_remove(node, disk)
+          if result.fail_msg:
+            logging.warning("Failed to remove newly-created disk %s on node %s:"
+                            " %s", device, node, result.fail_msg)
+        raise errors.OpExecError(e.message)
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -9685,8 +9879,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be removed, the removal will continue with the other ones (compare
-  with `_CreateDisks()`).
+  be removed, the removal will continue with the other ones.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -9956,8 +10149,14 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
 
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
-    nics.append(objects.NIC(mac=mac, ip=nic_ip,
-                            network=net, nicparams=nicparams))
+    net_uuid = cfg.LookupNetwork(net)
+    name = nic.get(constants.INIC_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
+    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
+                          network=net_uuid, nicparams=nicparams)
+    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
+    nics.append(nic_obj)
 
   return nics
 
@@ -9994,10 +10193,14 @@ def _ComputeDisks(op, default_vg):
                                  op.disk_template), errors.ECODE_INVAL)
 
     data_vg = disk.get(constants.IDISK_VG, default_vg)
+    name = disk.get(constants.IDISK_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
     new_disk = {
       constants.IDISK_SIZE: size,
       constants.IDISK_MODE: mode,
       constants.IDISK_VG: data_vg,
+      constants.IDISK_NAME: name,
       }
 
     if constants.IDISK_METAVG in disk:
@@ -10079,6 +10282,19 @@ class LUInstanceCreate(LogicalUnit):
     # check nics' parameter names
     for nic in self.op.nics:
       utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
+    # check that NIC's parameters names are unique and valid
+    utils.ValidateDeviceNames("NIC", self.op.nics)
+
+    # check that disk's names are unique and valid
+    utils.ValidateDeviceNames("disk", self.op.disks)
+
+    cluster = self.cfg.GetClusterInfo()
+    if not self.op.disk_template in cluster.enabled_disk_templates:
+      raise errors.OpPrereqError("Cannot create an instance with disk template"
+                                 " '%s', because it is not enabled in the"
+                                 " cluster. Enabled disk templates are: %s." %
+                                 (self.op.disk_template,
+                                  ",".join(cluster.enabled_disk_templates)))
 
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
@@ -10284,7 +10500,7 @@ class LUInstanceCreate(LogicalUnit):
     """
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
     else:
       node_whitelist = None
 
@@ -10344,8 +10560,8 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=_NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
-             for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
+             d[constants.IDISK_MODE]) for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -10594,7 +10810,8 @@ class LUInstanceCreate(LogicalUnit):
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
-    _CheckGlobalHvParams(self.op.hvparams)
+    _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+                          "instance", "cluster")
 
     # fill and remember the beparams dict
     self.be_full = _ComputeFullBeParams(self.op, cluster)
@@ -10692,14 +10909,15 @@ class LUInstanceCreate(LogicalUnit):
     # Fill in any IPs from IP pools. This must happen here, because we need to
     # know the nic's primary node, as specified by the iallocator
     for idx, nic in enumerate(self.nics):
-      net = nic.network
-      if net is not None:
-        netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
+      net_uuid = nic.network
+      if net_uuid is not None:
+        nobj = self.cfg.GetNetwork(net_uuid)
+        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
                                      " %s. Propably not connected to"
                                      " node's %s nodegroup" %
-                                     (net, self.pnode.name),
+                                     (nobj.name, self.pnode.name),
                                      errors.ECODE_INVAL)
         self.LogInfo("NIC/%d inherits netparams %s" %
                      (idx, netparams.values()))
@@ -10707,19 +10925,19 @@ class LUInstanceCreate(LogicalUnit):
         if nic.ip is not None:
           if nic.ip.lower() == constants.NIC_IP_POOL:
             try:
-              nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
+              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
                                          " from the address pool" % idx,
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from network %s", nic.ip, net)
+            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
           else:
             try:
-              self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
+              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("IP address %s already in use"
                                          " or does not belong to network %s" %
-                                         (nic.ip, net),
+                                         (nic.ip, nobj.name),
                                          errors.ECODE_NOTUNIQUE)
 
       # net is None, ip None or given
@@ -10755,25 +10973,6 @@ class LUInstanceCreate(LogicalUnit):
 
     nodenames = [pnode.name] + self.secondaries
 
-    # Verify instance specs
-    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
-    ispec = {
-      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
-      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
-      constants.ISPEC_DISK_COUNT: len(self.disks),
-      constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
-      constants.ISPEC_NIC_COUNT: len(self.nics),
-      constants.ISPEC_SPINDLE_USE: spindle_use,
-      }
-
-    group_info = self.cfg.GetNodeGroup(pnode.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
-    if not self.op.ignore_ipolicy and res:
-      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-             (pnode.group, group_info.name, utils.CommaJoin(res)))
-      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
     if not self.adopt_disks:
       if self.op.disk_template == constants.DT_RBD:
         # _CheckRADOSFreeSpace() is just a placeholder.
@@ -10872,12 +11071,12 @@ class LUInstanceCreate(LogicalUnit):
 
     group_info = self.cfg.GetNodeGroup(pnode.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
+    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
+                                               self.op.disk_template)
     if not self.op.ignore_ipolicy and res:
-      raise errors.OpPrereqError(("Instance allocation to group %s violates"
-                                  " policy: %s") % (pnode.group,
-                                                    utils.CommaJoin(res)),
-                                  errors.ECODE_INVAL)
+      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+             (pnode.group, group_info.name, utils.CommaJoin(res)))
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
@@ -10968,12 +11167,9 @@ class LUInstanceCreate(LogicalUnit):
       try:
         _CreateDisks(self, iobj)
       except errors.OpExecError:
-        self.LogWarning("Device creation failed, reverting...")
-        try:
-          _RemoveDisks(self, iobj)
-        finally:
-          self.cfg.ReleaseDRBDMinors(instance)
-          raise
+        self.LogWarning("Device creation failed")
+        self.cfg.ReleaseDRBDMinors(instance)
+        raise
 
     feedback_fn("adding instance %s to cluster config" % instance)
 
@@ -11136,7 +11332,7 @@ class LUInstanceCreate(LogicalUnit):
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
       result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
-                                            False)
+                                            False, self.op.reason)
       result.Raise("Could not start instance")
 
     return list(iobj.all_nodes)
@@ -11228,7 +11424,7 @@ class LUInstanceMultiAlloc(NoHooksLU):
 
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
     else:
       node_whitelist = None
 
@@ -11724,7 +11920,7 @@ class TLReplaceDisks(Tasklet):
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
       _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              ignore=self.ignore_ipolicy)
+                              self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
@@ -12820,12 +13016,13 @@ class LUInstanceQueryData(NoHooksLU):
 
       self.needed_locks[locking.LEVEL_NODEGROUP] = []
       self.needed_locks[locking.LEVEL_NODE] = []
+      self.needed_locks[locking.LEVEL_NETWORK] = []
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if self.op.use_locking:
+      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
       if level == locking.LEVEL_NODEGROUP:
-        owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
 
         # Lock all groups used by instances optimistically; this requires going
         # via the node before it's locked, requiring verification later on
@@ -12838,6 +13035,13 @@ class LUInstanceQueryData(NoHooksLU):
       elif level == locking.LEVEL_NODE:
         self._LockInstancesNodes()
 
+      elif level == locking.LEVEL_NETWORK:
+        self.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in owned_instances
+                    for net_uuid in
+                       self.cfg.GetInstanceNetworks(instance_name))
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -12847,6 +13051,7 @@ class LUInstanceQueryData(NoHooksLU):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
 
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
@@ -12858,7 +13063,8 @@ class LUInstanceQueryData(NoHooksLU):
       _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
                                 None)
     else:
-      assert not (owned_instances or owned_groups or owned_nodes)
+      assert not (owned_instances or owned_groups or
+                  owned_nodes or owned_networks)
 
     self.wanted_instances = instances.values()
 
@@ -12927,6 +13133,8 @@ class LUInstanceQueryData(NoHooksLU):
       "children": dev_children,
       "mode": dev.mode,
       "size": dev.size,
+      "name": dev.name,
+      "uuid": dev.uuid,
       }
 
   def Exec(self, feedback_fn):
@@ -12942,7 +13150,6 @@ class LUInstanceQueryData(NoHooksLU):
                                                  for node in nodes.values()))
 
     group2name_fn = lambda uuid: groups[uuid].name
-
     for instance in self.wanted_instances:
       pnode = nodes[instance.primary_node]
 
@@ -13023,6 +13230,42 @@ def PrepareContainerMods(mods, private_fn):
   return [(op, idx, params, fn()) for (op, idx, params) in mods]
 
 
+def GetItemFromContainer(identifier, kind, container):
+  """Return the item refered by the identifier.
+
+  @type identifier: string
+  @param identifier: Item index or name or UUID
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to get the item from
+
+  """
+  # Index
+  try:
+    idx = int(identifier)
+    if idx == -1:
+      # Append
+      absidx = len(container) - 1
+    elif idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                       (kind, idx, len(container)))
+    else:
+      absidx = idx
+    return (absidx, container[idx])
+  except ValueError:
+    pass
+
+  for idx, item in enumerate(container):
+    if item.uuid == identifier or item.name == identifier:
+      return (idx, item)
+
+  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
+                             (kind, identifier), errors.ECODE_NOENT)
+
+
 #: Type description for changes as returned by L{ApplyContainerMods}'s
 #: callbacks
 _TApplyContModsCbChanges = \
@@ -13059,25 +13302,26 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
     item and private data object as added by L{PrepareContainerMods}
 
   """
-  for (op, idx, params, private) in mods:
-    if idx == -1:
-      # Append
-      absidx = len(container) - 1
-    elif idx < 0:
-      raise IndexError("Not accepting negative indices other than -1")
-    elif idx > len(container):
-      raise IndexError("Got %s index %s, but there are only %s" %
-                       (kind, idx, len(container)))
-    else:
-      absidx = idx
-
+  for (op, identifier, params, private) in mods:
     changes = None
 
     if op == constants.DDM_ADD:
       # Calculate where item will be added
+      # When adding an item, identifier can only be an index
+      try:
+        idx = int(identifier)
+      except ValueError:
+        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
+                                   " identifier for %s" % constants.DDM_ADD,
+                                   errors.ECODE_INVAL)
       if idx == -1:
         addidx = len(container)
       else:
+        if idx < 0:
+          raise IndexError("Not accepting negative indices other than -1")
+        elif idx > len(container):
+          raise IndexError("Got %s index %s, but there are only %s" %
+                           (kind, idx, len(container)))
         addidx = idx
 
       if create_fn is None:
@@ -13094,10 +13338,7 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
         container.insert(idx, item)
     else:
       # Retrieve existing item
-      try:
-        item = container[absidx]
-      except IndexError:
-        raise IndexError("Invalid %s index %s" % (kind, idx))
+      (absidx, item) = GetItemFromContainer(identifier, kind, container)
 
       if op == constants.DDM_REMOVE:
         assert not params
@@ -13223,19 +13464,21 @@ class LUInstanceSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
       params[constants.IDISK_SIZE] = size
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
 
     elif op == constants.DDM_MODIFY:
       if constants.IDISK_SIZE in params:
         raise errors.OpPrereqError("Disk size change not possible, use"
                                    " grow-disk", errors.ECODE_INVAL)
-      if constants.IDISK_MODE not in params:
-        raise errors.OpPrereqError("Disk 'mode' is the only kind of"
-                                   " modification supported, but missing",
-                                   errors.ECODE_NOENT)
-      if len(params) > 1:
+      if len(params) > 2:
         raise errors.OpPrereqError("Disk modification doesn't support"
                                    " additional arbitrary parameters",
                                    errors.ECODE_INVAL)
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -13244,9 +13487,12 @@ class LUInstanceSetParams(LogicalUnit):
     """
     if op in (constants.DDM_ADD, constants.DDM_MODIFY):
       ip = params.get(constants.INIC_IP, None)
+      name = params.get(constants.INIC_NAME, None)
       req_net = params.get(constants.INIC_NETWORK, None)
       link = params.get(constants.NIC_LINK, None)
       mode = params.get(constants.NIC_MODE, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.INIC_NAME] = None
       if req_net is not None:
         if req_net.lower() == constants.VALUE_NONE:
           params[constants.INIC_NETWORK] = None
@@ -13288,11 +13534,13 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem):
+            self.op.offline is not None or self.op.runtime_mem or
+            self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
-      _CheckGlobalHvParams(self.op.hvparams)
+      _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+                            "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
       "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
@@ -13315,6 +13563,9 @@ class LUInstanceSetParams(LogicalUnit):
     self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
                     self._VerifyNicModification)
 
+    if self.op.pnode:
+      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODEGROUP] = []
@@ -13385,7 +13636,7 @@ class LUInstanceSetParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return (nl, nl)
 
-  def _PrepareNicModification(self, params, private, old_ip, old_net,
+  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
                               old_params, cluster, pnode):
 
     update_params_dict = dict([(key, params[key])
@@ -13395,13 +13646,21 @@ class LUInstanceSetParams(LogicalUnit):
     req_link = update_params_dict.get(constants.NIC_LINK, None)
     req_mode = update_params_dict.get(constants.NIC_MODE, None)
 
-    new_net = params.get(constants.INIC_NETWORK, old_net)
-    if new_net is not None:
-      netparams = self.cfg.GetGroupNetParams(new_net, pnode)
-      if netparams is None:
+    new_net_uuid = None
+    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
+    if new_net_uuid_or_name:
+      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
+      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
+
+    if old_net_uuid:
+      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
+
+    if new_net_uuid:
+      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
+      if not netparams:
         raise errors.OpPrereqError("No netparams found for the network"
-                                   " %s, probably not connected" % new_net,
-                                   errors.ECODE_INVAL)
+                                   " %s, probably not connected" %
+                                   new_net_obj.name, errors.ECODE_INVAL)
       new_params = dict(netparams)
     else:
       new_params = _GetUpdatedParams(old_params, update_params_dict)
@@ -13440,7 +13699,7 @@ class LUInstanceSetParams(LogicalUnit):
       elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
         # otherwise generate the MAC address
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
       else:
         # or validate/reserve the current one
         try:
@@ -13449,62 +13708,66 @@ class LUInstanceSetParams(LogicalUnit):
           raise errors.OpPrereqError("MAC address '%s' already in use"
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
-    elif new_net != old_net:
-
-      def get_net_prefix(net):
-        if net:
-          uuid = self.cfg.LookupNetwork(net)
-          if uuid:
-            nobj = self.cfg.GetNetwork(uuid)
-            return nobj.mac_prefix
-        return None
-
-      new_prefix = get_net_prefix(new_net)
-      old_prefix = get_net_prefix(old_net)
+    elif new_net_uuid != old_net_uuid:
+
+      def get_net_prefix(net_uuid):
+        mac_prefix = None
+        if net_uuid:
+          nobj = self.cfg.GetNetwork(net_uuid)
+          mac_prefix = nobj.mac_prefix
+
+        return mac_prefix
+
+      new_prefix = get_net_prefix(new_net_uuid)
+      old_prefix = get_net_prefix(old_net_uuid)
       if old_prefix != new_prefix:
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
 
-    #if there is a change in nic-network configuration
+    # if there is a change in (ip, network) tuple
     new_ip = params.get(constants.INIC_IP, old_ip)
-    if (new_ip, new_net) != (old_ip, old_net):
+    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
       if new_ip:
-        if new_net:
-          if new_ip.lower() == constants.NIC_IP_POOL:
+        # if IP is pool then require a network and generate one IP
+        if new_ip.lower() == constants.NIC_IP_POOL:
+          if new_net_uuid:
             try:
-              new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
+              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP"
                                          " from the address pool",
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
+            self.LogInfo("Chose IP %s from network %s",
+                         new_ip,
+                         new_net_obj.name)
             params[constants.INIC_IP] = new_ip
-          elif new_ip != old_ip or new_net != old_net:
-            try:
-              self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
-              self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
-            except errors.ReservationError:
-              raise errors.OpPrereqError("IP %s not available in network %s" %
-                                         (new_ip, new_net),
-                                         errors.ECODE_NOTUNIQUE)
-        elif new_ip.lower() == constants.NIC_IP_POOL:
-          raise errors.OpPrereqError("ip=pool, but no network found",
-                                     errors.ECODE_INVAL)
-
-        # new net is None
+          else:
+            raise errors.OpPrereqError("ip=pool, but no network found",
+                                       errors.ECODE_INVAL)
+        # Reserve new IP if in the new network if any
+        elif new_net_uuid:
+          try:
+            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
+            self.LogInfo("Reserving IP %s in network %s",
+                         new_ip, new_net_obj.name)
+          except errors.ReservationError:
+            raise errors.OpPrereqError("IP %s not available in network %s" %
+                                       (new_ip, new_net_obj.name),
+                                       errors.ECODE_NOTUNIQUE)
+        # new network is None so check if new IP is a conflicting IP
         elif self.op.conflicts_check:
           _CheckForConflictingIp(self, new_ip, pnode)
 
-      if old_ip:
-        if old_net:
-          try:
-            self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
-          except errors.AddressPoolError:
-            logging.warning("Release IP %s not contained in network %s",
-                            old_ip, old_net)
+      # release old IP if old network is not None
+      if old_ip and old_net_uuid:
+        try:
+          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
+        except errors.AddressPoolError:
+          logging.warning("Release IP %s not contained in network %s",
+                          old_ip, old_net_obj.name)
 
-    # there are no changes in (net, ip) tuple
-    elif (old_net is not None and
+    # there are no changes in (ip, network) tuple and old network is not None
+    elif (old_net_uuid is not None and
           (req_link is not None or req_mode is not None)):
       raise errors.OpPrereqError("Not allowed to change link or mode of"
                                  " a NIC that is connected to a network",
@@ -13550,7 +13813,7 @@ class LUInstanceSetParams(LogicalUnit):
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
                               ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
@@ -13585,6 +13848,21 @@ class LUInstanceSetParams(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     pnode = instance.primary_node
+
+    self.warn = []
+
+    if (self.op.pnode is not None and self.op.pnode != pnode and
+        not self.op.force):
+      # verify that the instance is not up
+      instance_info = self.rpc.call_instance_info(pnode, instance.name,
+                                                  instance.hypervisor)
+      if instance_info.fail_msg:
+        self.warn.append("Can't get instance runtime information: %s" %
+                         instance_info.fail_msg)
+      elif instance_info.payload:
+        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
+                                   errors.ECODE_STATE)
+
     assert pnode in self.owned_locks(locking.LEVEL_NODE)
     nodelist = list(instance.all_nodes)
     pnode_info = self.cfg.GetNodeInfo(pnode)
@@ -13717,8 +13995,6 @@ class LUInstanceSetParams(LogicalUnit):
     else:
       self.os_inst = {}
 
-    self.warn = []
-
     #TODO(dynmem): do the appropriate check involving MINMEM
     if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
         be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
@@ -13836,9 +14112,14 @@ class LUInstanceSetParams(LogicalUnit):
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
+    def _PrepareDiskMod(_, disk, params, __):
+      disk.name = params.get(constants.IDISK_NAME, None)
+
     # Verify disk changes (operating on a copy)
-    disks = instance.disks[:]
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    disks = copy.deepcopy(instance.disks)
+    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
+                       None)
+    utils.ValidateDeviceNames("disk", disks)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
@@ -13860,6 +14141,8 @@ class LUInstanceSetParams(LogicalUnit):
       nics = [nic.Copy() for nic in instance.nics]
       ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
                          self._CreateNewNic, self._ApplyNicMods, None)
+      # Verify that NIC names are unique and valid
+      utils.ValidateDeviceNames("NIC", nics)
       self._new_nics = nics
       ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
     else:
@@ -13877,14 +14160,20 @@ class LUInstanceSetParams(LogicalUnit):
                                                          None)
 
       # Copy ispec to verify parameters with min/max values separately
+      if self.op.disk_template:
+        new_disk_template = self.op.disk_template
+      else:
+        new_disk_template = instance.disk_template
       ispec_max = ispec.copy()
       ispec_max[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MAXMEM, None)
-      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
+      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
+                                                     new_disk_template)
       ispec_min = ispec.copy()
       ispec_min[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MINMEM, None)
-      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
+      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
+                                                     new_disk_template)
 
       if (res_max or res_min):
         # FIXME: Improve error message by including information about whether
@@ -13907,7 +14196,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # create a fake disk info for _GenerateDiskTemplate
     disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
-                  constants.IDISK_VG: d.logical_id[0]}
+                  constants.IDISK_VG: d.logical_id[0],
+                  constants.IDISK_NAME: d.name}
                  for d in instance.disks]
     new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
                                       instance.name, pnode, [snode],
@@ -13937,11 +14227,22 @@ class LUInstanceSetParams(LogicalUnit):
 
     feedback_fn("Initializing DRBD devices...")
     # all child devices are in place, we can now create the DRBD devices
-    for disk in anno_disks:
-      for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
-        f_create = node == pnode
-        _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
-                              excl_stor)
+    try:
+      for disk in anno_disks:
+        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
+          f_create = node == pnode
+          _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+                                excl_stor)
+    except errors.GenericError, e:
+      feedback_fn("Initializing of DRBD devices failed;"
+                  " renaming back original volumes...")
+      for disk in new_disks:
+        self.cfg.SetDiskID(disk, pnode)
+      rename_back_list = [(n.children[0], o.logical_id)
+                          for (n, o) in zip(new_disks, instance.disks)]
+      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
+      result.Raise("Failed to rename LVs back after error %s" % str(e))
+      raise
 
     # at this point, the instance has been modified
     instance.disk_template = constants.DT_DRBD8
@@ -13976,10 +14277,11 @@ class LUInstanceSetParams(LogicalUnit):
     old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
     new_disks = [d.children[0] for d in instance.disks]
 
-    # copy over size and mode
+    # copy over size, mode and name
     for parent, child in zip(old_disks, new_disks):
       child.size = parent.size
       child.mode = parent.mode
+      child.name = parent.name
 
     # this is a DRBD disk, return its port to the pool
     # NOTE: this must be done right before the call to cfg.Update!
@@ -13990,6 +14292,7 @@ class LUInstanceSetParams(LogicalUnit):
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
+    _UpdateIvNames(0, instance.disks)
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
@@ -14045,6 +14348,11 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
                         disk.iv_name, disk, node, err)
 
+    if self.cluster.prealloc_wipe_disks:
+      # Wipe new disk
+      _WipeDisks(self, instance,
+                 disks=[(idx, disk, 0)])
+
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
@@ -14054,11 +14362,17 @@ class LUInstanceSetParams(LogicalUnit):
     """Modifies a disk.
 
     """
-    disk.mode = params[constants.IDISK_MODE]
+    changes = []
+    mode = params.get(constants.IDISK_MODE, None)
+    if mode:
+      disk.mode = mode
+      changes.append(("disk.mode/%d" % idx, disk.mode))
 
-    return [
-      ("disk.mode/%d" % idx, disk.mode),
-      ]
+    name = params.get(constants.IDISK_NAME, None)
+    disk.name = name
+    changes.append(("disk.name/%d" % idx, disk.name))
+
+    return changes
 
   def _RemoveDisk(self, idx, root, _):
     """Removes a disk.
@@ -14076,18 +14390,22 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
     mac = params[constants.INIC_MAC]
     ip = params.get(constants.INIC_IP, None)
     net = params.get(constants.INIC_NETWORK, None)
+    name = params.get(constants.INIC_NAME, None)
+    net_uuid = self.cfg.LookupNetwork(net)
     #TODO: not private.filled?? can a nic have no nicparams??
     nicparams = private.filled
+    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
+                       nicparams=nicparams)
+    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 
-    return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
+    return (nobj, [
       ("nic.%d" % idx,
        "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
@@ -14095,18 +14413,23 @@ class LUInstanceSetParams(LogicalUnit):
        net)),
       ])
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
     changes = []
 
-    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
+    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
       if key in params:
         changes.append(("nic.%s/%d" % (key, idx), params[key]))
         setattr(nic, key, params[key])
 
+    new_net = params.get(constants.INIC_NETWORK, nic.network)
+    new_net_uuid = self.cfg.LookupNetwork(new_net)
+    if new_net_uuid != nic.network:
+      changes.append(("nic.network/%d" % idx, new_net))
+      nic.network = new_net_uuid
+
     if private.filled:
       nic.nicparams = private.filled
 
@@ -14134,6 +14457,10 @@ class LUInstanceSetParams(LogicalUnit):
     result = []
     instance = self.instance
 
+    # New primary node
+    if self.op.pnode:
+      instance.primary_node = self.op.pnode
+
     # runtime memory
     if self.op.runtime_mem:
       rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
@@ -14720,7 +15047,8 @@ class LUBackupExport(LogicalUnit):
       # shutdown the instance, but not the disks
       feedback_fn("Shutting down instance %s" % instance.name)
       result = self.rpc.call_instance_shutdown(src_node, instance,
-                                               self.op.shutdown_timeout)
+                                               self.op.shutdown_timeout,
+                                               self.op.reason)
       # TODO: Maybe ignore failures if ignore_remove_failures is set
       result.Raise("Could not shutdown instance %s on"
                    " node %s" % (instance.name, src_node))
@@ -14749,7 +15077,8 @@ class LUBackupExport(LogicalUnit):
           assert not activate_disks
           feedback_fn("Starting instance %s" % instance.name)
           result = self.rpc.call_instance_start(src_node,
-                                                (instance, None, None), False)
+                                                (instance, None, None), False,
+                                                 self.op.reason)
           msg = result.fail_msg
           if msg:
             feedback_fn("Failed to start instance: %s" % msg)
@@ -15310,7 +15639,7 @@ class LUGroupSetParams(LogicalUnit):
       violations = \
           _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                   self.group),
-                                        new_ipolicy, instances)
+                                        new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
@@ -16130,7 +16459,8 @@ class LUTestAllocator(NoHooksLU):
                                           nics=self.op.nics,
                                           vcpus=self.op.vcpus,
                                           spindle_use=self.op.spindle_use,
-                                          hypervisor=self.op.hypervisor)
+                                          hypervisor=self.op.hypervisor,
+                                          node_whitelist=None)
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       req = iallocator.IAReqRelocate(name=self.op.name,
                                      relocate_from=list(self.relocate_from))
@@ -16207,11 +16537,15 @@ class LUNetworkAdd(LogicalUnit):
       raise errors.OpPrereqError("Network must be given",
                                  errors.ECODE_INVAL)
 
-    uuid = self.cfg.LookupNetwork(self.op.network_name)
-
-    if uuid:
-      raise errors.OpPrereqError(("Network with name '%s' already exists" %
-                                  self.op.network_name), errors.ECODE_EXISTS)
+    try:
+      existing_uuid = self.cfg.LookupNetwork(self.op.network_name)
+    except errors.OpPrereqError:
+      pass
+    else:
+      raise errors.OpPrereqError("Desired network name '%s' already exists as a"
+                                 " network (UUID: %s)" %
+                                 (self.op.network_name, existing_uuid),
+                                 errors.ECODE_EXISTS)
 
     # Check tag validity
     for tag in self.op.tags:
@@ -16228,7 +16562,6 @@ class LUNetworkAdd(LogicalUnit):
       "network6": self.op.network6,
       "gateway6": self.op.gateway6,
       "mac_prefix": self.op.mac_prefix,
-      "network_type": self.op.network_type,
       "tags": self.op.tags,
       }
     return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
@@ -16243,14 +16576,13 @@ class LUNetworkAdd(LogicalUnit):
                            network6=self.op.network6,
                            gateway6=self.op.gateway6,
                            mac_prefix=self.op.mac_prefix,
-                           network_type=self.op.network_type,
-                           uuid=self.network_uuid,
-                           family=constants.IP4_VERSION)
+                           uuid=self.network_uuid)
     # Initialize the associated address pool
     try:
       pool = network.AddressPool.InitializeNetwork(nobj)
-    except errors.AddressPoolError, e:
-      raise errors.OpExecError("Cannot create IP pool for this network: %s" % e)
+    except errors.AddressPoolError, err:
+      raise errors.OpExecError("Cannot create IP address pool for network"
+                               " '%s': %s" % (self.op.network_name, err))
 
     # Check if we need to reserve the nodes and the cluster master IP
     # These may not be allocated to any instances in routed mode, as
@@ -16263,25 +16595,26 @@ class LUNetworkAdd(LogicalUnit):
               pool.Reserve(ip)
               self.LogInfo("Reserved IP address of node '%s' (%s)",
                            node.name, ip)
-          except errors.AddressPoolError:
-            self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
-                            node.name, ip)
+          except errors.AddressPoolError, err:
+            self.LogWarning("Cannot reserve IP address '%s' of node '%s': %s",
+                            ip, node.name, err)
 
       master_ip = self.cfg.GetClusterInfo().master_ip
       try:
         if pool.Contains(master_ip):
           pool.Reserve(master_ip)
           self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
-      except errors.AddressPoolError:
-        self.LogWarning("Cannot reserve cluster master IP address (%s)",
-                        master_ip)
+      except errors.AddressPoolError, err:
+        self.LogWarning("Cannot reserve cluster master IP address (%s): %s",
+                        master_ip, err)
 
     if self.op.add_reserved_ips:
       for ip in self.op.add_reserved_ips:
         try:
           pool.Reserve(ip, external=True)
-        except errors.AddressPoolError, e:
-          raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
+        except errors.AddressPoolError, err:
+          raise errors.OpExecError("Cannot reserve IP address '%s': %s" %
+                                   (ip, err))
 
     if self.op.tags:
       for tag in self.op.tags:
@@ -16299,10 +16632,6 @@ class LUNetworkRemove(LogicalUnit):
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
 
-    if not self.network_uuid:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name), errors.ECODE_NOENT)
-
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -16371,9 +16700,6 @@ class LUNetworkSetParams(LogicalUnit):
 
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name), errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -16385,7 +16711,6 @@ class LUNetworkSetParams(LogicalUnit):
     """
     self.network = self.cfg.GetNetwork(self.network_uuid)
     self.gateway = self.network.gateway
-    self.network_type = self.network.network_type
     self.mac_prefix = self.network.mac_prefix
     self.network6 = self.network.network6
     self.gateway6 = self.network.gateway6
@@ -16403,12 +16728,6 @@ class LUNetworkSetParams(LogicalUnit):
                                      " reserved" % self.gateway,
                                      errors.ECODE_STATE)
 
-    if self.op.network_type:
-      if self.op.network_type == constants.VALUE_NONE:
-        self.network_type = None
-      else:
-        self.network_type = self.op.network_type
-
     if self.op.mac_prefix:
       if self.op.mac_prefix == constants.VALUE_NONE:
         self.mac_prefix = None
@@ -16439,7 +16758,6 @@ class LUNetworkSetParams(LogicalUnit):
       "network6": self.network6,
       "gateway6": self.gateway6,
       "mac_prefix": self.mac_prefix,
-      "network_type": self.network_type,
       "tags": self.tags,
       }
     return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
@@ -16499,9 +16817,6 @@ class LUNetworkSetParams(LogicalUnit):
     if self.op.gateway6:
       self.network.gateway6 = self.gateway6
 
-    if self.op.network_type:
-      self.network.network_type = self.network_type
-
     self.pool.Validate()
 
     self.cfg.Update(self.network, feedback_fn)
@@ -16554,8 +16869,6 @@ class _NetworkQuery(_QueryBase):
     network_uuids = self._GetNames(lu, all_networks.keys(),
                                    locking.LEVEL_NETWORK)
 
-    name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
-
     do_instances = query.NETQ_INST in self.requested_data
     do_groups = query.NETQ_GROUP in self.requested_data
 
@@ -16580,10 +16893,8 @@ class _NetworkQuery(_QueryBase):
       network_to_instances = dict((uuid, []) for uuid in network_uuids)
       for instance in all_instances.values():
         for nic in instance.nics:
-          if nic.network:
-            net_uuid = name_to_uuid[nic.network]
-            if net_uuid in network_uuids:
-              network_to_instances[net_uuid].append(instance.name)
+          if nic.network in network_uuids:
+            network_to_instances[nic.network].append(instance.name)
             break
 
     if query.NETQ_STATS in self.requested_data:
@@ -16646,14 +16957,7 @@ class LUNetworkConnect(LogicalUnit):
     self.network_link = self.op.network_link
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network '%s' does not exist" %
-                                 self.network_name, errors.ECODE_NOENT)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group '%s' does not exist" %
-                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16692,6 +16996,11 @@ class LUNetworkConnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    if self.op.conflicts_check:
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
       constants.NIC_LINK: self.network_link,
@@ -16706,23 +17015,22 @@ class LUNetworkConnect(LogicalUnit):
       self.LogWarning("Network '%s' is already mapped to group '%s'" %
                       (self.network_name, self.group.name))
       self.connected = True
-      return
 
-    if self.op.conflicts_check:
+    # check only if not already connected
+    elif self.op.conflicts_check:
       pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
 
       _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
-                            "connect to")
+                            "connect to", owned_instances)
 
   def Exec(self, feedback_fn):
-    if self.connected:
-      return
-
-    self.group.networks[self.network_uuid] = self.netparams
-    self.cfg.Update(self.group, feedback_fn)
+    # Connect the network and update the group only if not already connected
+    if not self.connected:
+      self.group.networks[self.network_uuid] = self.netparams
+      self.cfg.Update(self.group, feedback_fn)
 
 
-def _NetworkConflictCheck(lu, check_fn, action):
+def _NetworkConflictCheck(lu, check_fn, action, instances):
   """Checks for network interface conflicts with a network.
 
   @type lu: L{LogicalUnit}
@@ -16734,13 +17042,9 @@ def _NetworkConflictCheck(lu, check_fn, action):
   @raise errors.OpPrereqError: If conflicting IP addresses are found.
 
   """
-  # Check if locked instances are still correct
-  owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
-  _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
-
   conflicts = []
 
-  for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
+  for (_, instance) in lu.cfg.GetMultiInstanceInfo(instances):
     instconflicts = [(idx, nic.ip)
                      for (idx, nic) in enumerate(instance.nics)
                      if check_fn(nic)]
@@ -16782,14 +17086,7 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_name = self.op.group_name
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network '%s' does not exist" %
-                                 self.network_name, errors.ECODE_NOENT)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group '%s' does not exist" %
-                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16803,9 +17100,8 @@ class LUNetworkDisconnect(LogicalUnit):
 
       # Lock instances optimistically, needs verification once group lock has
       # been acquired
-      if self.op.conflicts_check:
-        self.needed_locks[locking.LEVEL_INSTANCE] = \
-          self.cfg.GetNodeGroupInstances(self.group_uuid)
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+        self.cfg.GetNodeGroupInstances(self.group_uuid)
 
   def BuildHooksEnv(self):
     ret = {
@@ -16822,24 +17118,27 @@ class LUNetworkDisconnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
     if self.network_uuid not in self.group.networks:
       self.LogWarning("Network '%s' is not mapped to group '%s'",
                       self.network_name, self.group.name)
       self.connected = False
-      return
 
-    if self.op.conflicts_check:
-      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_name,
-                            "disconnect from")
+    # We need this check only if network is not already connected
+    else:
+      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
+                            "disconnect from", owned_instances)
 
   def Exec(self, feedback_fn):
-    if not self.connected:
-      return
-
-    del self.group.networks[self.network_uuid]
-    self.cfg.Update(self.group, feedback_fn)
+    # Disconnect the network and update the group only if network is connected
+    if self.connected:
+      del self.group.networks[self.network_uuid]
+      self.cfg.Update(self.group, feedback_fn)
 
 
 #: Query type implementations
@@ -16881,7 +17180,8 @@ def _CheckForConflictingIp(lu, ip, node):
   """
   (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
   if conf_net is not None:
-    raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
+    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
+                                " network %s, but the target NIC does not." %
                                 (ip, conf_net)),
                                errors.ECODE_STATE)