gnt-cluster modify/init: deprecate --no-lvm-storage
[ganeti-local] / lib / cmdlib.py
index 0b1d5ce..bba8277 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -697,6 +697,39 @@ def _SupportsOob(cfg, node):
   return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
 
 
+def _IsExclusiveStorageEnabledNode(cfg, node):
+  """Whether exclusive_storage is in effect for the given node.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type node: L{objects.Node}
+  @param node: The node
+  @rtype: bool
+  @return: The effective value of exclusive_storage
+
+  """
+  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
+
+
+def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
+  """Whether exclusive_storage is in effect for the given node.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type nodename: string
+  @param nodename: The node
+  @rtype: bool
+  @return: The effective value of exclusive_storage
+  @raise errors.OpPrereqError: if no node exists with the given name
+
+  """
+  ni = cfg.GetNodeInfo(nodename)
+  if ni is None:
+    raise errors.OpPrereqError("Invalid node name %s" % nodename,
+                               errors.ECODE_NOENT)
+  return _IsExclusiveStorageEnabledNode(cfg, ni)
+
+
 def _CopyLockList(names):
   """Makes a copy of a list of lock names.
 
@@ -780,8 +813,22 @@ def _GetUpdatedParams(old_params, update_dict,
   return params_copy
 
 
+def _UpdateMinMaxISpecs(ipolicy, new_minmax, group_policy):
+  use_none = use_default = group_policy
+  minmax = ipolicy.setdefault(constants.ISPECS_MINMAX, {})
+  for (key, value) in new_minmax.items():
+    if key not in constants.ISPECS_MINMAX_KEYS:
+      raise errors.OpPrereqError("Invalid key in new ipolicy/%s: %s" %
+                                 (constants.ISPECS_MINMAX, key),
+                                 errors.ECODE_INVAL)
+    old_spec = minmax.get(key, {})
+    minmax[key] = _GetUpdatedParams(old_spec, value, use_none=use_none,
+                                    use_default=use_default)
+    utils.ForceDictType(minmax[key], constants.ISPECS_PARAMETER_TYPES)
+
+
 def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
-  """Return the new version of a instance policy.
+  """Return the new version of an instance policy.
 
   @param group_policy: whether this policy applies to a group and thus
     we should support removal of policy entries
@@ -793,11 +840,13 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
     if key not in constants.IPOLICY_ALL_KEYS:
       raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
                                  errors.ECODE_INVAL)
-    if key in constants.IPOLICY_ISPECS:
-      utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+    if key == constants.ISPECS_MINMAX:
+      _UpdateMinMaxISpecs(ipolicy, value, group_policy)
+    elif key == constants.ISPECS_STD:
       ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
                                        use_none=use_none,
                                        use_default=use_default)
+      utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
     else:
       if (not value or value == [constants.VALUE_DEFAULT] or
           value == constants.VALUE_DEFAULT):
@@ -987,18 +1036,32 @@ def _CheckOutputFields(static, dynamic, selected):
                                % ",".join(delta), errors.ECODE_INVAL)
 
 
-def _CheckGlobalHvParams(params):
-  """Validates that given hypervisor params are not global ones.
+def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
+  """Make sure that none of the given paramters is global.
+
+  If a global parameter is found, an L{errors.OpPrereqError} exception is
+  raised. This is used to avoid setting global parameters for individual nodes.
 
-  This will ensure that instances don't get customised versions of
-  global params.
+  @type params: dictionary
+  @param params: Parameters to check
+  @type glob_pars: dictionary
+  @param glob_pars: Forbidden parameters
+  @type kind: string
+  @param kind: Kind of parameters (e.g. "node")
+  @type bad_levels: string
+  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
+      "instance")
+  @type good_levels: strings
+  @param good_levels: Level(s) at which the parameters are allowed (e.g.
+      "cluster or group")
 
   """
-  used_globals = constants.HVC_GLOBALS.intersection(params)
+  used_globals = glob_pars.intersection(params)
   if used_globals:
-    msg = ("The following hypervisor parameters are global and cannot"
-           " be customized at instance level, please modify them at"
-           " cluster level: %s" % utils.CommaJoin(used_globals))
+    msg = ("The following %s parameters are global and cannot"
+           " be customized at %s level, please modify them at"
+           " %s level: %s" %
+           (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
     raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
@@ -1088,6 +1151,35 @@ def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
       raise errors.OpExecError(msg)
 
 
+def _CheckNodePVs(nresult, exclusive_storage):
+  """Check node PVs.
+
+  """
+  pvlist_dict = nresult.get(constants.NV_PVLIST, None)
+  if pvlist_dict is None:
+    return (["Can't get PV list from node"], None)
+  pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
+  errlist = []
+  # check that ':' is not present in PV names, since it's a
+  # special character for lvcreate (denotes the range of PEs to
+  # use on the PV)
+  for pv in pvlist:
+    if ":" in pv.name:
+      errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
+                     (pv.name, pv.vg_name))
+  es_pvinfo = None
+  if exclusive_storage:
+    (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
+    errlist.extend(errmsgs)
+    shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
+    if shared_pvs:
+      for (pvname, lvlist) in shared_pvs:
+        # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
+        errlist.append("PV %s is shared among unrelated LVs (%s)" %
+                       (pvname, utils.CommaJoin(lvlist)))
+  return (errlist, es_pvinfo)
+
+
 def _GetClusterDomainSecret():
   """Reads the cluster domain secret.
 
@@ -1127,22 +1219,21 @@ def _CheckInstanceState(lu, instance, req_states, msg=None):
                      " is down")
 
 
-def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
+def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
   """Computes if value is in the desired range.
 
   @param name: name of the parameter for which we perform the check
   @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
       not just 'disk')
-  @param ipolicy: dictionary containing min, max and std values
+  @param ispecs: dictionary containing min and max values
   @param value: actual value that we want to use
-  @return: None or element not meeting the criteria
-
+  @return: None or an error string
 
   """
   if value in [None, constants.VALUE_AUTO]:
     return None
-  max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
-  min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
+  max_v = ispecs[constants.ISPECS_MAX].get(name, value)
+  min_v = ispecs[constants.ISPECS_MIN].get(name, value)
   if value > max_v or min_v > value:
     if qualifier:
       fqn = "%s/%s" % (name, qualifier)
@@ -1155,6 +1246,7 @@ def _ComputeMinMaxSpec(name, qualifier, ipolicy, value):
 
 def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                  nic_count, disk_sizes, spindle_use,
+                                 disk_template,
                                  _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
@@ -1172,6 +1264,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
+  @type disk_template: string
+  @param disk_template: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
@@ -1181,18 +1275,26 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
     (constants.ISPEC_CPU_COUNT, "", cpu_count),
-    (constants.ISPEC_DISK_COUNT, "", disk_count),
     (constants.ISPEC_NIC_COUNT, "", nic_count),
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
-
-  return filter(None,
-                (_compute_fn(name, qualifier, ipolicy, value)
-                 for (name, qualifier, value) in test_settings))
-
-
-def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+  if disk_template != constants.DT_DISKLESS:
+    # This check doesn't make sense for diskless instances
+    test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
+  ret = []
+  allowed_dts = ipolicy[constants.IPOLICY_DTS]
+  if disk_template not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates: %s)" %
+               (disk_template, utils.CommaJoin(allowed_dts)))
+
+  minmax = ipolicy[constants.ISPECS_MINMAX]
+  return ret + filter(None,
+                      (_compute_fn(name, qualifier, minmax, value)
+                       for (name, qualifier, value) in test_settings))
+
+
+def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
                                      _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
@@ -1200,29 +1302,36 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   @param ipolicy: The ipolicy to verify against
   @type instance: L{objects.Instance}
   @param instance: The instance to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
-  mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
-  cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
-  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
+  be_full = cfg.GetClusterInfo().FillBE(instance)
+  mem_size = be_full[constants.BE_MAXMEM]
+  cpu_count = be_full[constants.BE_VCPUS]
+  spindle_use = be_full[constants.BE_SPINDLE_USE]
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
+  disk_template = instance.disk_template
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
+  ipolicy, instance_spec, disk_template,
+  _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
   @param ipolicy: The ipolicy to verify against
   @param instance_spec: dict
   @param instance_spec: The instance spec to verify
+  @type disk_template: string
+  @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1235,11 +1344,11 @@ def _ComputeIPolicyInstanceSpecViolation(
   spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
 
   return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use)
+                     disk_sizes, spindle_use, disk_template)
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
-                                 target_group,
+                                 target_group, cfg,
                                  _compute_fn=_ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
@@ -1247,6 +1356,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @param instance: The instance object to verify
   @param current_group: The current group of the instance
   @param target_group: The new group of the instance
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1254,23 +1365,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   if current_group == target_group:
     return []
   else:
-    return _compute_fn(ipolicy, instance)
+    return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
                             _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
   @param instance: The instance object to verify
   @param node: The new node to relocate
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
-  res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
 
   if res:
     msg = ("Instance does not meet target node group's (%s) instance"
@@ -1281,18 +1394,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
   @param new_ipolicy: The new (to become) ipolicy
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A list of instances which violates the new ipolicy but
       did not before
 
   """
-  return (_ComputeViolatingInstances(new_ipolicy, instances) -
-          _ComputeViolatingInstances(old_ipolicy, instances))
+  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
+          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
 def _ExpandItemName(fn, name, kind):
@@ -1323,7 +1438,7 @@ def _ExpandInstanceName(cfg, name):
 
 
 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
-                         network_type, mac_prefix, tags):
+                         mac_prefix, tags):
   """Builds network related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -1338,8 +1453,6 @@ def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
   @param network6: the ipv6 subnet
   @type gateway6: string
   @param gateway6: the ipv6 gateway
-  @type network_type: string
-  @param network_type: the type of the network
   @type mac_prefix: string
   @param mac_prefix: the mac_prefix
   @type tags: list
@@ -1359,8 +1472,6 @@ def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
     env["NETWORK_GATEWAY6"] = gateway6
   if mac_prefix:
     env["NETWORK_MAC_PREFIX"] = mac_prefix
-  if network_type:
-    env["NETWORK_TYPE"] = network_type
   if tags:
     env["NETWORK_TAGS"] = " ".join(tags)
 
@@ -1391,12 +1502,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type vcpus: string
   @param vcpus: the count of VCPUs the instance has
   @type nics: list
-  @param nics: list of tuples (ip, mac, mode, link, network) representing
-      the NICs the instance has
+  @param nics: list of tuples (name, uuid, ip, mac, mode, link, net, netinfo)
+      representing the NICs the instance has
   @type disk_template: string
   @param disk_template: the disk template of the instance
   @type disks: list
-  @param disks: the list of (size, mode) pairs
+  @param disks: list of tuples (name, uuid, size, mode)
   @type bep: dict
   @param bep: the backend parameters for the instance
   @type hvp: dict
@@ -1418,7 +1529,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
     "INSTANCE_MAXMEM": maxmem,
-    # TODO(2.7) remove deprecated "memory" value
+    # TODO(2.9) remove deprecated "memory" value
     "INSTANCE_MEMORY": maxmem,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
@@ -1426,31 +1537,22 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   }
   if nics:
     nic_count = len(nics)
-    for idx, (ip, mac, mode, link, net, netinfo) in enumerate(nics):
+    for idx, (name, _, ip, mac, mode, link, net, netinfo) in enumerate(nics):
       if ip is None:
         ip = ""
+      env["INSTANCE_NIC%d_NAME" % idx] = name
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_MAC" % idx] = mac
       env["INSTANCE_NIC%d_MODE" % idx] = mode
       env["INSTANCE_NIC%d_LINK" % idx] = link
-      if network:
-        env["INSTANCE_NIC%d_NETWORK" % idx] = net
-        if netinfo:
-          nobj = objects.Network.FromDict(netinfo)
-          if nobj.network:
-            env["INSTANCE_NIC%d_NETWORK_SUBNET" % idx] = nobj.network
-          if nobj.gateway:
-            env["INSTANCE_NIC%d_NETWORK_GATEWAY" % idx] = nobj.gateway
-          if nobj.network6:
-            env["INSTANCE_NIC%d_NETWORK_SUBNET6" % idx] = nobj.network6
-          if nobj.gateway6:
-            env["INSTANCE_NIC%d_NETWORK_GATEWAY6" % idx] = nobj.gateway6
-          if nobj.mac_prefix:
-            env["INSTANCE_NIC%d_NETWORK_MAC_PREFIX" % idx] = nobj.mac_prefix
-          if nobj.network_type:
-            env["INSTANCE_NIC%d_NETWORK_TYPE" % idx] = nobj.network_type
-          if nobj.tags:
-            env["INSTANCE_NIC%d_NETWORK_TAGS" % idx] = " ".join(nobj.tags)
+      if netinfo:
+        nobj = objects.Network.FromDict(netinfo)
+        env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
+      elif network:
+        # FIXME: broken network reference: the instance NIC specifies a
+        # network, but the relevant network entry was not in the config. This
+        # should be made impossible.
+        env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
       if mode == constants.NIC_MODE_BRIDGED:
         env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
@@ -1460,7 +1562,8 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   if disks:
     disk_count = len(disks)
-    for idx, (size, mode) in enumerate(disks):
+    for idx, (name, size, mode) in enumerate(disks):
+      env["INSTANCE_DISK%d_NAME" % idx] = name
       env["INSTANCE_DISK%d_SIZE" % idx] = size
       env["INSTANCE_DISK%d_MODE" % idx] = mode
   else:
@@ -1489,20 +1592,15 @@ def _NICToTuple(lu, nic):
   @param nic: nic to convert to hooks tuple
 
   """
-  ip = nic.ip
-  mac = nic.mac
   cluster = lu.cfg.GetClusterInfo()
   filled_params = cluster.SimpleFillNIC(nic.nicparams)
   mode = filled_params[constants.NIC_MODE]
   link = filled_params[constants.NIC_LINK]
-  net = nic.network
   netinfo = None
-  if net:
-    net_uuid = lu.cfg.LookupNetwork(net)
-    if net_uuid:
-      nobj = lu.cfg.GetNetwork(net_uuid)
-      netinfo = objects.Network.ToDict(nobj)
-  return (ip, mac, mode, link, net, netinfo)
+  if nic.network:
+    nobj = lu.cfg.GetNetwork(nic.network)
+    netinfo = objects.Network.ToDict(nobj)
+  return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
 
 
 def _NICListToTuple(lu, nics):
@@ -1552,7 +1650,8 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     "vcpus": bep[constants.BE_VCPUS],
     "nics": _NICListToTuple(lu, instance.nics),
     "disk_template": instance.disk_template,
-    "disks": [(disk.size, disk.mode) for disk in instance.disks],
+    "disks": [(disk.name, disk.size, disk.mode)
+              for disk in instance.disks],
     "bep": bep,
     "hvp": hvp,
     "hypervisor_name": instance.hypervisor,
@@ -1590,17 +1689,19 @@ def _DecideSelfPromotion(lu, exceptions=None):
   return mc_now < mc_should
 
 
-def _ComputeViolatingInstances(ipolicy, instances):
+def _ComputeViolatingInstances(ipolicy, instances, cfg):
   """Computes a set of instances who violates given ipolicy.
 
   @param ipolicy: The ipolicy to verify
-  @type instances: object.Instance
+  @type instances: L{objects.Instance}
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A frozenset of instance names violating the ipolicy
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
@@ -1965,6 +2066,10 @@ class _VerifyErrors(object):
     """
     ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
     itype, etxt, _ = ecode
+    # If the error code is in the list of ignored errors, demote the error to a
+    # warning
+    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
+      ltype = self.ETYPE_WARNING
     # first complete the msg
     if args:
       msg = msg % args
@@ -1979,26 +2084,17 @@ class _VerifyErrors(object):
       msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
     # and finally report it via the feedback_fn
     self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
+    # do not mark the operation as failed for WARN cases only
+    if ltype == self.ETYPE_ERROR:
+      self.bad = True
 
-  def _ErrorIf(self, cond, ecode, *args, **kwargs):
+  def _ErrorIf(self, cond, *args, **kwargs):
     """Log an error message if the passed condition is True.
 
     """
-    cond = (bool(cond)
-            or self.op.debug_simulate_errors) # pylint: disable=E1101
-
-    # If the error code is in the list of ignored errors, demote the error to a
-    # warning
-    (_, etxt, _) = ecode
-    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
-      kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
-
-    if cond:
-      self._Error(ecode, *args, **kwargs)
-
-    # do not mark the operation as failed for WARN cases only
-    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
-      self.bad = self.bad or cond
+    if (bool(cond)
+        or self.op.debug_simulate_errors): # pylint: disable=E1101
+      self._Error(*args, **kwargs)
 
 
 class LUClusterVerify(NoHooksLU):
@@ -2060,7 +2156,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
       msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
              (item, hv_name))
       try:
-        hv_class = hypervisor.GetHypervisor(hv_name)
+        hv_class = hypervisor.GetHypervisorClass(hv_name)
         utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
         hv_class.CheckParameterSyntax(hv_params)
       except errors.GenericError, err:
@@ -2180,6 +2276,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @ivar oslist: list of OSes as diagnosed by DiagnoseOS
     @type vm_capable: boolean
     @ivar vm_capable: whether the node can host instances
+    @type pv_min: float
+    @ivar pv_min: size in MiB of the smallest PVs
+    @type pv_max: float
+    @ivar pv_max: size in MiB of the biggest PVs
 
     """
     def __init__(self, offline=False, name=None, vm_capable=True):
@@ -2199,6 +2299,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self.ghost = False
       self.os_fail = False
       self.oslist = {}
+      self.pv_min = None
+      self.pv_max = None
 
   def ExpandNames(self):
     # This raises errors.OpPrereqError on its own:
@@ -2400,13 +2502,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
              "Node time diverges by at least %s from master node time",
              ntime_diff)
 
-  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
-    """Check the node LVM results.
+  def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
+    """Check the node LVM results and update info for cross-node checks.
 
     @type ninfo: L{objects.Node}
     @param ninfo: the node to check
     @param nresult: the remote results for the node
     @param vg_name: the configured VG name
+    @type nimg: L{NodeImage}
+    @param nimg: node image
 
     """
     if vg_name is None:
@@ -2424,19 +2528,42 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                             constants.MIN_VG_SIZE)
       _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
 
-    # check pv names
-    pvlist = nresult.get(constants.NV_PVLIST, None)
-    test = pvlist is None
-    _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
-    if not test:
-      # check that ':' is not present in PV names, since it's a
-      # special character for lvcreate (denotes the range of PEs to
-      # use on the PV)
-      for _, pvname, owner_vg in pvlist:
-        test = ":" in pvname
-        _ErrorIf(test, constants.CV_ENODELVM, node,
-                 "Invalid character ':' in PV '%s' of VG '%s'",
-                 pvname, owner_vg)
+    # Check PVs
+    (errmsgs, pvminmax) = _CheckNodePVs(nresult, self._exclusive_storage)
+    for em in errmsgs:
+      self._Error(constants.CV_ENODELVM, node, em)
+    if pvminmax is not None:
+      (nimg.pv_min, nimg.pv_max) = pvminmax
+
+  def _VerifyGroupLVM(self, node_image, vg_name):
+    """Check cross-node consistency in LVM.
+
+    @type node_image: dict
+    @param node_image: info about nodes, mapping from node to names to
+      L{NodeImage} objects
+    @param vg_name: the configured VG name
+
+    """
+    if vg_name is None:
+      return
+
+    # Only exlcusive storage needs this kind of checks
+    if not self._exclusive_storage:
+      return
+
+    # exclusive_storage wants all PVs to have the same size (approximately),
+    # if the smallest and the biggest ones are okay, everything is fine.
+    # pv_min is None iff pv_max is None
+    vals = filter((lambda ni: ni.pv_min is not None), node_image.values())
+    if not vals:
+      return
+    (pvmin, minnode) = min((ni.pv_min, ni.name) for ni in vals)
+    (pvmax, maxnode) = max((ni.pv_max, ni.name) for ni in vals)
+    bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
+    self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
+                  "PV sizes differ too much in the group; smallest (%s MB) is"
+                  " on %s, biggest (%s MB) is on %s",
+                  pvmin, minnode, pvmax, maxnode)
 
   def _VerifyNodeBridges(self, ninfo, nresult, bridges):
     """Check the node bridges.
@@ -2523,24 +2650,27 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           msg = "cannot reach the master IP"
         _ErrorIf(True, constants.CV_ENODENET, node, msg)
 
-  def _VerifyInstance(self, instance, instanceconfig, node_image,
+  def _VerifyInstance(self, instance, inst_config, node_image,
                       diskstatus):
     """Verify an instance.
 
     This function checks to see if the required block devices are
-    available on the instance's node.
+    available on the instance's node, and that the nodes are in the correct
+    state.
 
     """
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
-    node_current = instanceconfig.primary_node
+    pnode = inst_config.primary_node
+    pnode_img = node_image[pnode]
+    groupinfo = self.cfg.GetAllNodeGroupsInfo()
 
     node_vol_should = {}
-    instanceconfig.MapLVsByNode(node_vol_should)
+    inst_config.MapLVsByNode(node_vol_should)
 
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, instanceconfig)
+    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -2554,12 +2684,14 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
                  "volume %s missing on node %s", volume, node)
 
-    if instanceconfig.admin_state == constants.ADMINST_UP:
-      pri_img = node_image[node_current]
-      test = instance not in pri_img.instances and not pri_img.offline
+    if inst_config.admin_state == constants.ADMINST_UP:
+      test = instance not in pnode_img.instances and not pnode_img.offline
       _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
                "instance not running on its primary node %s",
-               node_current)
+               pnode)
+      _ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE, instance,
+               "instance is marked as running and lives on offline node %s",
+               pnode)
 
     diskdata = [(nname, success, status, idx)
                 for (nname, disks) in diskstatus.items()
@@ -2570,16 +2702,80 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # node here
       snode = node_image[nname]
       bad_snode = snode.ghost or snode.offline
-      _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
+      _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
                not success and not bad_snode,
                constants.CV_EINSTANCEFAULTYDISK, instance,
                "couldn't retrieve status for disk/%s on %s: %s",
                idx, nname, bdev_status)
-      _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
+      _ErrorIf((inst_config.admin_state == constants.ADMINST_UP and
                 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
                constants.CV_EINSTANCEFAULTYDISK, instance,
                "disk/%s on %s is faulty", idx, nname)
 
+    _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
+             constants.CV_ENODERPC, pnode, "instance %s, connection to"
+             " primary node failed", instance)
+
+    _ErrorIf(len(inst_config.secondary_nodes) > 1,
+             constants.CV_EINSTANCELAYOUT,
+             instance, "instance has multiple secondary nodes: %s",
+             utils.CommaJoin(inst_config.secondary_nodes),
+             code=self.ETYPE_WARNING)
+
+    if inst_config.disk_template not in constants.DTS_EXCL_STORAGE:
+      # Disk template not compatible with exclusive_storage: no instance
+      # node should have the flag set
+      es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg,
+                                                     inst_config.all_nodes)
+      es_nodes = [n for (n, es) in es_flags.items()
+                  if es]
+      _ErrorIf(es_nodes, constants.CV_EINSTANCEUNSUITABLENODE, instance,
+               "instance has template %s, which is not supported on nodes"
+               " that have exclusive storage set: %s",
+               inst_config.disk_template, utils.CommaJoin(es_nodes))
+
+    if inst_config.disk_template in constants.DTS_INT_MIRROR:
+      instance_nodes = utils.NiceSort(inst_config.all_nodes)
+      instance_groups = {}
+
+      for node in instance_nodes:
+        instance_groups.setdefault(self.all_node_info[node].group,
+                                   []).append(node)
+
+      pretty_list = [
+        "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
+        # Sort so that we always list the primary node first.
+        for group, nodes in sorted(instance_groups.items(),
+                                   key=lambda (_, nodes): pnode in nodes,
+                                   reverse=True)]
+
+      self._ErrorIf(len(instance_groups) > 1,
+                    constants.CV_EINSTANCESPLITGROUPS,
+                    instance, "instance has primary and secondary nodes in"
+                    " different groups: %s", utils.CommaJoin(pretty_list),
+                    code=self.ETYPE_WARNING)
+
+    inst_nodes_offline = []
+    for snode in inst_config.secondary_nodes:
+      s_img = node_image[snode]
+      _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
+               snode, "instance %s, connection to secondary node failed",
+               instance)
+
+      if s_img.offline:
+        inst_nodes_offline.append(snode)
+
+    # warn that the instance lives on offline nodes
+    _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
+             "instance has offline secondary node(s) %s",
+             utils.CommaJoin(inst_nodes_offline))
+    # ... or ghost/non-vm_capable nodes
+    for node in inst_config.all_nodes:
+      _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
+               instance, "instance lives on ghost node %s", node)
+      _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
+               instance, "instance lives on non-vm_capable node %s", node)
+
   def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
     """Verify if there are any unknown volumes in the cluster.
 
@@ -3133,7 +3329,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                  len(s) == 2 for s in statuses)
                       for inst, nnames in instdisk.items()
                       for nname, statuses in nnames.items())
-    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
+    if __debug__:
+      instdisk_keys = set(instdisk)
+      instanceinfo_keys = set(instanceinfo)
+      assert instdisk_keys == instanceinfo_keys, \
+        ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
+         (instdisk_keys, instanceinfo_keys))
 
     return instdisk
 
@@ -3213,7 +3414,6 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     vg_name = self.cfg.GetVGName()
     drbd_helper = self.cfg.GetDRBDHelper()
     cluster = self.cfg.GetClusterInfo()
-    groupinfo = self.cfg.GetAllNodeGroupsInfo()
     hypervisors = cluster.enabled_hypervisors
     node_data_list = [self.my_node_info[name] for name in self.my_node_names]
 
@@ -3332,6 +3532,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           nimg.sbp[pnode] = []
         nimg.sbp[pnode].append(instance)
 
+    es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg, self.my_node_names)
+    # The value of exclusive_storage should be the same across the group, so if
+    # it's True for at least a node, we act as if it were set for all the nodes
+    self._exclusive_storage = compat.any(es_flags.values())
+    if self._exclusive_storage:
+      node_verify_param[constants.NV_EXCLUSIVEPVS] = True
+
     # At this point, we have the in-memory data structures complete,
     # except for the runtime information, which we'll gather next
 
@@ -3435,7 +3642,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                    node == master_node)
 
       if nimg.vm_capable:
-        self._VerifyNodeLVM(node_i, nresult, vg_name)
+        self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
         self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
                              all_drbd_map)
 
@@ -3462,6 +3669,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
                    "node is running unknown instance %s", inst)
 
+    self._VerifyGroupLVM(node_image, vg_name)
+
     for node, result in extra_lv_nvinfo.items():
       self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
                               node_image[node], vg_name)
@@ -3473,76 +3682,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       inst_config = self.my_inst_info[instance]
       self._VerifyInstance(instance, inst_config, node_image,
                            instdisk[instance])
-      inst_nodes_offline = []
-
-      pnode = inst_config.primary_node
-      pnode_img = node_image[pnode]
-      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
-               constants.CV_ENODERPC, pnode, "instance %s, connection to"
-               " primary node failed", instance)
-
-      _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
-               pnode_img.offline,
-               constants.CV_EINSTANCEBADNODE, instance,
-               "instance is marked as running and lives on offline node %s",
-               inst_config.primary_node)
 
       # If the instance is non-redundant we cannot survive losing its primary
       # node, so we are not N+1 compliant.
       if inst_config.disk_template not in constants.DTS_MIRRORED:
         i_non_redundant.append(instance)
 
-      _ErrorIf(len(inst_config.secondary_nodes) > 1,
-               constants.CV_EINSTANCELAYOUT,
-               instance, "instance has multiple secondary nodes: %s",
-               utils.CommaJoin(inst_config.secondary_nodes),
-               code=self.ETYPE_WARNING)
-
-      if inst_config.disk_template in constants.DTS_INT_MIRROR:
-        pnode = inst_config.primary_node
-        instance_nodes = utils.NiceSort(inst_config.all_nodes)
-        instance_groups = {}
-
-        for node in instance_nodes:
-          instance_groups.setdefault(self.all_node_info[node].group,
-                                     []).append(node)
-
-        pretty_list = [
-          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
-          # Sort so that we always list the primary node first.
-          for group, nodes in sorted(instance_groups.items(),
-                                     key=lambda (_, nodes): pnode in nodes,
-                                     reverse=True)]
-
-        self._ErrorIf(len(instance_groups) > 1,
-                      constants.CV_EINSTANCESPLITGROUPS,
-                      instance, "instance has primary and secondary nodes in"
-                      " different groups: %s", utils.CommaJoin(pretty_list),
-                      code=self.ETYPE_WARNING)
-
       if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
         i_non_a_balanced.append(instance)
 
-      for snode in inst_config.secondary_nodes:
-        s_img = node_image[snode]
-        _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
-                 snode, "instance %s, connection to secondary node failed",
-                 instance)
-
-        if s_img.offline:
-          inst_nodes_offline.append(snode)
-
-      # warn that the instance lives on offline nodes
-      _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
-               "instance has offline secondary node(s) %s",
-               utils.CommaJoin(inst_nodes_offline))
-      # ... or ghost/non-vm_capable nodes
-      for node in inst_config.all_nodes:
-        _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
-                 instance, "instance lives on ghost node %s", node)
-        _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
-                 instance, "instance lives on non-vm_capable node %s", node)
-
     feedback_fn("* Verifying orphan volumes")
     reserved = utils.FieldSet(*cluster.reserved_lvs)
 
@@ -4064,11 +4212,10 @@ class LUClusterSetParams(LogicalUnit):
     mn = self.cfg.GetMasterNode()
     return ([mn], [mn])
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
+  def _CheckVgName(self, node_list, enabled_disk_templates,
+                   new_enabled_disk_templates):
+    """Check the consistency of the vg name on all nodes and in case it gets
+       unset whether there are instances still using it.
 
     """
     if self.op.vg_name is not None and not self.op.vg_name:
@@ -4076,6 +4223,55 @@ class LUClusterSetParams(LogicalUnit):
         raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
                                    " instances exist", errors.ECODE_INVAL)
 
+    if (self.op.vg_name is not None and
+        utils.IsLvmEnabled(enabled_disk_templates)) or \
+           (self.cfg.GetVGName() is not None and
+            utils.LvmGetsEnabled(enabled_disk_templates,
+                                 new_enabled_disk_templates)):
+      self._CheckVgNameOnNodes(node_list)
+
+  def _CheckVgNameOnNodes(self, node_list):
+    """Check the status of the volume group on each node.
+
+    """
+    vglist = self.rpc.call_vg_list(node_list)
+    for node in node_list:
+      msg = vglist[node].fail_msg
+      if msg:
+        # ignoring down node
+        self.LogWarning("Error while gathering data on node %s"
+                        " (ignoring node): %s", node, msg)
+        continue
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
+                                            self.op.vg_name,
+                                            constants.MIN_VG_SIZE)
+      if vgstatus:
+        raise errors.OpPrereqError("Error on node '%s': %s" %
+                                   (node, vgstatus), errors.ECODE_ENVIRON)
+
+  def _GetEnabledDiskTemplates(self, cluster):
+    """Determines the enabled disk templates and the subset of disk templates
+       that are newly enabled by this operation.
+
+    """
+    enabled_disk_templates = None
+    new_enabled_disk_templates = []
+    if self.op.enabled_disk_templates:
+      enabled_disk_templates = self.op.enabled_disk_templates
+      new_enabled_disk_templates = \
+        list(set(enabled_disk_templates)
+             - set(cluster.enabled_disk_templates))
+    else:
+      enabled_disk_templates = cluster.enabled_disk_templates
+    return (enabled_disk_templates, new_enabled_disk_templates)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
     if self.op.drbd_helper is not None and not self.op.drbd_helper:
       if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
         raise errors.OpPrereqError("Cannot disable drbd helper while"
@@ -4083,23 +4279,13 @@ class LUClusterSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
     node_list = self.owned_locks(locking.LEVEL_NODE)
+    self.cluster = cluster = self.cfg.GetClusterInfo()
 
-    # if vg_name not None, checks given volume group on all nodes
-    if self.op.vg_name:
-      vglist = self.rpc.call_vg_list(node_list)
-      for node in node_list:
-        msg = vglist[node].fail_msg
-        if msg:
-          # ignoring down node
-          self.LogWarning("Error while gathering data on node %s"
-                          " (ignoring node): %s", node, msg)
-          continue
-        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
-                                              self.op.vg_name,
-                                              constants.MIN_VG_SIZE)
-        if vgstatus:
-          raise errors.OpPrereqError("Error on node '%s': %s" %
-                                     (node, vgstatus), errors.ECODE_ENVIRON)
+    (enabled_disk_templates, new_enabled_disk_templates) = \
+      self._GetEnabledDiskTemplates(cluster)
+
+    self._CheckVgName(node_list, enabled_disk_templates,
+                      new_enabled_disk_templates)
 
     if self.op.drbd_helper:
       # checks given drbd helper on all nodes
@@ -4118,7 +4304,6 @@ class LUClusterSetParams(LogicalUnit):
           raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
                                      (node, node_helper), errors.ECODE_ENVIRON)
 
-    self.cluster = cluster = self.cfg.GetClusterInfo()
     # validate params changes
     if self.op.beparams:
       objects.UpgradeBeParams(self.op.beparams)
@@ -4162,7 +4347,7 @@ class LUClusterSetParams(LogicalUnit):
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances)
+                                            new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -4225,7 +4410,10 @@ class LUClusterSetParams(LogicalUnit):
           self.new_os_hvp[os_name] = hvs
         else:
           for hv_name, hv_dict in hvs.items():
-            if hv_name not in self.new_os_hvp[os_name]:
+            if hv_dict is None:
+              # Delete if it exists
+              self.new_os_hvp[os_name].pop(hv_name, None)
+            elif hv_name not in self.new_os_hvp[os_name]:
               self.new_os_hvp[os_name][hv_name] = hv_dict
             else:
               self.new_os_hvp[os_name][hv_name].update(hv_dict)
@@ -4271,11 +4459,13 @@ class LUClusterSetParams(LogicalUnit):
             (self.op.enabled_hypervisors and
              hv_name in self.op.enabled_hypervisors)):
           # either this is a new hypervisor, or its parameters have changed
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
 
+    self._CheckDiskTemplateConsistency()
+
     if self.op.os_hvp:
       # no need to check any newly-enabled hypervisors, since the
       # defaults have already been checked in the above code-block
@@ -4285,7 +4475,7 @@ class LUClusterSetParams(LogicalUnit):
           # we need to fill in the new os_hvp on top of the actual hv_p
           cluster_defaults = self.new_hvparams.get(hv_name, {})
           new_osp = objects.FillDict(cluster_defaults, hv_params)
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           hv_class.CheckParameterSyntax(new_osp)
           _CheckHVParams(self, node_list, hv_name, new_osp)
 
@@ -4298,20 +4488,64 @@ class LUClusterSetParams(LogicalUnit):
                                    " specified" % self.op.default_iallocator,
                                    errors.ECODE_INVAL)
 
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
+  def _CheckDiskTemplateConsistency(self):
+    """Check whether the disk templates that are going to be disabled
+       are still in use by some instances.
+
+    """
+    if self.op.enabled_disk_templates:
+      cluster = self.cfg.GetClusterInfo()
+      instances = self.cfg.GetAllInstancesInfo()
+
+      disk_templates_to_remove = set(cluster.enabled_disk_templates) \
+        - set(self.op.enabled_disk_templates)
+      for instance in instances.itervalues():
+        if instance.disk_template in disk_templates_to_remove:
+          raise errors.OpPrereqError("Cannot disable disk template '%s',"
+                                     " because instance '%s' is using it." %
+                                     (instance.disk_template, instance.name))
+
+
+  def _SetVgName(self, feedback_fn):
+    """Determines and sets the new volume group name.
 
     """
     if self.op.vg_name is not None:
+      if self.op.vg_name and not \
+           utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+        feedback_fn("Note that you specified a volume group, but did not"
+                    " enable any lvm disk template.")
       new_volume = self.op.vg_name
       if not new_volume:
+        if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
+          raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                     " disk templates are enabled.")
         new_volume = None
       if new_volume != self.cfg.GetVGName():
         self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
+    else:
+      if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
+          not self.cfg.GetVGName():
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.")
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(set(self.op.enabled_disk_templates))
+
+    self._SetVgName(feedback_fn)
+
     if self.op.drbd_helper is not None:
+      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
+        feedback_fn("Note that you specified a drbd user helper, but did"
+                    " enabled the drbd disk template.")
       new_helper = self.op.drbd_helper
       if not new_helper:
         new_helper = None
@@ -4507,12 +4741,14 @@ def _ComputeAncillaryFiles(cluster, redist):
   files_vm = set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
 
   files_opt |= set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
 
   # Filenames in each category must be unique
   all_files_set = files_all | files_mc | files_vm
@@ -5119,6 +5355,159 @@ class LUOsDiagnose(NoHooksLU):
     return self.oq.OldStyleQuery(self)
 
 
+class _ExtStorageQuery(_QueryBase):
+  FIELDS = query.EXTSTORAGE_FIELDS
+
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
+    lu.needed_locks = {}
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  @staticmethod
+  def _DiagnoseByProvider(rlist):
+    """Remaps a per-node return list into an a per-provider per-node dictionary
+
+    @param rlist: a map with node names as keys and ExtStorage objects as values
+
+    @rtype: dict
+    @return: a dictionary with extstorage providers as keys and as
+        value another map, with nodes as keys and tuples of
+        (path, status, diagnose, parameters) as values, eg::
+
+          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+                         "node2": [(/srv/..., False, "missing file")]
+                         "node3": [(/srv/..., True, "", [])]
+          }
+
+    """
+    all_es = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
+        continue
+      for (name, path, status, diagnose, params) in nr.payload:
+        if name not in all_es:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_es[name] = {}
+          for nname in good_nodes:
+            all_es[name][nname] = []
+        # convert params from [name, help] to (name, help)
+        params = [tuple(v) for v in params]
+        all_es[name][node_name].append((path, status, diagnose, params))
+    return all_es
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    # Locking is not used
+    assert not (compat.any(lu.glm.is_owned(level)
+                           for level in locking.LEVELS
+                           if level != locking.LEVEL_CLUSTER) or
+                self.do_locking or self.use_locking)
+
+    valid_nodes = [node.name
+                   for node in lu.cfg.GetAllNodesInfo().values()
+                   if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+    data = {}
+
+    nodegroup_list = lu.cfg.GetNodeGroupList()
+
+    for (es_name, es_data) in pol.items():
+      # For every provider compute the nodegroup validity.
+      # To do this we need to check the validity of each node in es_data
+      # and then construct the corresponding nodegroup dict:
+      #      { nodegroup1: status
+      #        nodegroup2: status
+      #      }
+      ndgrp_data = {}
+      for nodegroup in nodegroup_list:
+        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+        nodegroup_nodes = ndgrp.members
+        nodegroup_name = ndgrp.name
+        node_statuses = []
+
+        for node in nodegroup_nodes:
+          if node in valid_nodes:
+            if es_data[node] != []:
+              node_status = es_data[node][0][1]
+              node_statuses.append(node_status)
+            else:
+              node_statuses.append(False)
+
+        if False in node_statuses:
+          ndgrp_data[nodegroup_name] = False
+        else:
+          ndgrp_data[nodegroup_name] = True
+
+      # Compute the provider's parameters
+      parameters = set()
+      for idx, esl in enumerate(es_data.values()):
+        valid = bool(esl and esl[0][1])
+        if not valid:
+          break
+
+        node_params = esl[0][3]
+        if idx == 0:
+          # First entry
+          parameters.update(node_params)
+        else:
+          # Filter out inconsistent values
+          parameters.intersection_update(node_params)
+
+      params = list(parameters)
+
+      # Now fill all the info for this provider
+      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+                                  nodegroup_status=ndgrp_data,
+                                  parameters=params)
+
+      data[es_name] = info
+
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+  """Logical unit for ExtStorage diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                               self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.eq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.eq.OldStyleQuery(self)
+
+
 class LUNodeRemove(LogicalUnit):
   """Logical unit for removing a node.
 
@@ -5249,8 +5638,9 @@ class _NodeQuery(_QueryBase):
       # filter out non-vm_capable nodes
       toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
 
+      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
       node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
-                                        [lu.cfg.GetHypervisorType()])
+                                        [lu.cfg.GetHypervisorType()], es_flags)
       live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
                        for (name, nresult) in node_data.items()
                        if not nresult.fail_msg and nresult.payload)
@@ -5488,6 +5878,7 @@ class _InstanceQuery(_QueryBase):
       lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
       lu.needed_locks[locking.LEVEL_NODEGROUP] = []
       lu.needed_locks[locking.LEVEL_NODE] = []
+      lu.needed_locks[locking.LEVEL_NETWORK] = []
       lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
     self.do_grouplocks = (self.do_locking and
@@ -5507,6 +5898,12 @@ class _InstanceQuery(_QueryBase):
       elif level == locking.LEVEL_NODE:
         lu._LockInstancesNodes() # pylint: disable=W0212
 
+      elif level == locking.LEVEL_NETWORK:
+        lu.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
+
   @staticmethod
   def _CheckGroupLocks(lu):
     owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
@@ -5597,10 +5994,17 @@ class _InstanceQuery(_QueryBase):
       nodes = None
       groups = None
 
+    if query.IQ_NETWORKS in self.requested_data:
+      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
+                                    for i in instance_list))
+      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
+    else:
+      networks = None
+
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
                                    disk_usage, offline_nodes, bad_nodes,
                                    live_data, wrongnode_inst, consinfo,
-                                   nodes, groups)
+                                   nodes, groups, networks)
 
 
 class LUQuery(NoHooksLU):
@@ -5856,10 +6260,12 @@ class LUNodeAdd(LogicalUnit):
                                    secondary_ip=secondary_ip,
                                    master_candidate=self.master_candidate,
                                    offline=False, drained=False,
-                                   group=node_group)
+                                   group=node_group, ndparams={})
 
     if self.op.ndparams:
       utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                            "node", "cluster or group")
 
     if self.op.hv_state:
       self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
@@ -5869,7 +6275,8 @@ class LUNodeAdd(LogicalUnit):
 
     # TODO: If we need to have multiple DnsOnlyRunner we probably should make
     #       it a property on the base class.
-    result = rpc.DnsOnlyRunner().call_version([node])[node]
+    rpcrunner = rpc.DnsOnlyRunner()
+    result = rpcrunner.call_version([node])[node]
     result.Raise("Can't get version information from node %s" % node)
     if constants.PROTOCOL_VERSION == result.payload:
       logging.info("Communication to node %s fine, sw version %s match",
@@ -5880,6 +6287,17 @@ class LUNodeAdd(LogicalUnit):
                                  (constants.PROTOCOL_VERSION, result.payload),
                                  errors.ECODE_ENVIRON)
 
+    vg_name = cfg.GetVGName()
+    if vg_name is not None:
+      vparams = {constants.NV_PVLIST: [vg_name]}
+      excl_stor = _IsExclusiveStorageEnabledNode(cfg, self.new_node)
+      cname = self.cfg.GetClusterName()
+      result = rpcrunner.call_node_verify_light([node], vparams, cname)[node]
+      (errmsgs, _) = _CheckNodePVs(result.payload, excl_stor)
+      if errmsgs:
+        raise errors.OpPrereqError("Checks on node PVs failed: %s" %
+                                   "; ".join(errmsgs), errors.ECODE_ENVIRON)
+
   def Exec(self, feedback_fn):
     """Adds the new node to the cluster.
 
@@ -6270,6 +6688,8 @@ class LUNodeSetParams(LogicalUnit):
     if self.op.ndparams:
       new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
       utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
+      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                            "node", "cluster or group")
       self.new_ndparams = new_ndparams
 
     if self.op.hv_state:
@@ -6788,7 +7208,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
       we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
   nodeinfo[node].Raise("Can't get data from node %s" % node,
                        prereq=True, ecode=errors.ECODE_ENVIRON)
   (_, _, (hv_info, )) = nodeinfo[node].payload
@@ -6849,7 +7269,8 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
+  es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
+  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, es_flags)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6885,7 +7306,7 @@ def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6963,7 +7384,7 @@ class LUInstanceStartup(LogicalUnit):
       utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
       filled_hvp = cluster.FillHV(instance)
       filled_hvp.update(self.op.hvparams)
-      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
       hv_type.CheckParameterSyntax(filled_hvp)
       _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
 
@@ -7080,6 +7501,7 @@ class LUInstanceReboot(LogicalUnit):
     instance = self.instance
     ignore_secondaries = self.op.ignore_secondaries
     reboot_type = self.op.reboot_type
+    reason = self.op.reason
 
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
@@ -7095,7 +7517,7 @@ class LUInstanceReboot(LogicalUnit):
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
                                              reboot_type,
-                                             self.op.shutdown_timeout)
+                                             self.op.shutdown_timeout, reason)
       result.Raise("Could not reboot instance")
     else:
       if instance_running:
@@ -7177,7 +7599,9 @@ class LUInstanceShutdown(LogicalUnit):
     node_current = instance.primary_node
     timeout = self.op.timeout
 
-    if not self.op.no_remember:
+    # If the instance is offline we shouldn't mark it as down, as that
+    # resets the offline flag.
+    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
       self.cfg.MarkInstanceDown(instance.name)
 
     if self.primary_offline:
@@ -7300,6 +7724,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # TODO: Implement support changing VG while recreating
     constants.IDISK_VG,
     constants.IDISK_METAVG,
+    constants.IDISK_PROVIDER,
+    constants.IDISK_NAME,
     ]))
 
   def _RunAllocator(self):
@@ -7338,7 +7764,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
                                         disks=[{constants.IDISK_SIZE: d.size,
                                                 constants.IDISK_MODE: d.mode}
                                                 for d in self.instance.disks],
-                                        hypervisor=self.instance.hypervisor)
+                                        hypervisor=self.instance.hypervisor,
+                                        node_whitelist=None)
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
@@ -7839,8 +8266,8 @@ def _ExpandNamesForMigration(lu):
   lu.needed_locks[locking.LEVEL_NODE_RES] = []
   lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
 
-  # The node allocation lock is actually only needed for replicated instances
-  # (e.g. DRBD8) and if an iallocator is used.
+  # The node allocation lock is actually only needed for externally replicated
+  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
   lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
 
 
@@ -8002,8 +8429,9 @@ class LUInstanceMigrate(LogicalUnit):
 
     """
     instance = self._migrater.instance
-    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
-    return (nl, nl + [instance.primary_node])
+    snodes = list(instance.secondary_nodes)
+    nl = [self.cfg.GetMasterNode(), instance.primary_node] + snodes
+    return (nl, nl)
 
 
 class LUInstanceMove(LogicalUnit):
@@ -8064,6 +8492,10 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    if instance.disk_template not in constants.DTS_COPYABLE:
+      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
+                                 instance.disk_template, errors.ECODE_STATE)
+
     node = self.cfg.GetNodeInfo(self.op.target_node)
     assert node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
@@ -8088,7 +8520,7 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
                             ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
@@ -8139,12 +8571,9 @@ class LUInstanceMove(LogicalUnit):
     try:
       _CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
-      self.LogWarning("Device creation failed, reverting...")
-      try:
-        _RemoveDisks(self, instance, target_node=target_node)
-      finally:
-        self.cfg.ReleaseDRBDMinors(instance.name)
-        raise
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(instance.name)
+      raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
 
@@ -8347,11 +8776,10 @@ class TLMigrateInstance(Tasklet):
                                  errors.ECODE_STATE)
 
     if instance.disk_template in constants.DTS_EXT_MIRROR:
-      assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
       _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
+        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
         # We set set self.target_node as it is required by
@@ -8363,7 +8791,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
@@ -8407,7 +8835,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
@@ -8698,7 +9126,7 @@ class TLMigrateInstance(Tasklet):
 
     # Check for hypervisor version mismatch and warn the user.
     nodeinfo = self.rpc.call_node_info([source_node, target_node],
-                                       None, [self.instance.hypervisor])
+                                       None, [self.instance.hypervisor], False)
     for ninfo in nodeinfo.values():
       ninfo.Raise("Unable to retrieve node information from node '%s'" %
                   ninfo.node)
@@ -8847,9 +9275,9 @@ class TLMigrateInstance(Tasklet):
       self._GoReconnect(False)
       self._WaitUntilSync()
 
-    # If the instance's disk template is `rbd' and there was a successful
-    # migration, unmap the device from the source node.
-    if self.instance.disk_template == constants.DT_RBD:
+    # If the instance's disk template is `rbd' or `ext' and there was a
+    # successful migration, unmap the device from the source node.
+    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
       disks = _ExpandCheckDisks(instance, instance.disks)
       self.feedback_fn("* unmapping instance's disks from %s" % source_node)
       for disk in disks:
@@ -8980,12 +9408,13 @@ def _CreateBlockDev(lu, node, instance, device, force_create, info,
 
   """
   (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
+  excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
   return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
-                              force_open)
+                              force_open, excl_stor)
 
 
 def _CreateBlockDevInner(lu, node, instance, device, force_create,
-                         info, force_open):
+                         info, force_open, excl_stor):
   """Create a tree of block devices on a given node.
 
   If this device type has to be created on secondaries, create it and
@@ -9012,6 +9441,8 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
       L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
+  @type excl_stor: boolean
+  @param excl_stor: Whether exclusive_storage is active for the node
 
   """
   if device.CreateOnSecondary():
@@ -9020,15 +9451,17 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   if device.children:
     for child in device.children:
       _CreateBlockDevInner(lu, node, instance, child, force_create,
-                           info, force_open)
+                           info, force_open, excl_stor)
 
   if not force_create:
     return
 
-  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
+  _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                        excl_stor)
 
 
-def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
+def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                          excl_stor):
   """Create a single block device on a given node.
 
   This will not recurse over children of the device, so they must be
@@ -9047,11 +9480,14 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
       L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
+  @type excl_stor: boolean
+  @param excl_stor: Whether exclusive_storage is active for the node
 
   """
   lu.cfg.SetDiskID(device, node)
   result = lu.rpc.call_blockdev_create(node, device, device.size,
-                                       instance.name, force_open, info)
+                                       instance.name, force_open, info,
+                                       excl_stor)
   result.Raise("Can't create block device %s on"
                " node %s for instance %s" % (device, node, instance.name))
   if device.physical_id is None:
@@ -9083,22 +9519,26 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgnames[0], names[0]),
                           params={})
+  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   dev_meta = objects.Disk(dev_type=constants.LD_LV,
                           size=constants.DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
                           params={})
+  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
                           logical_id=(primary, secondary, port,
                                       p_minor, s_minor,
                                       shared_secret),
                           children=[dev_data, dev_meta],
                           iv_name=iv_name, params={})
+  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   return drbd_dev
 
 
 _DISK_TEMPLATE_NAME_PREFIX = {
   constants.DT_PLAIN: "",
   constants.DT_RBD: ".rbd",
+  constants.DT_EXT: ".ext",
   }
 
 
@@ -9108,6 +9548,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   constants.DT_SHARED_FILE: constants.LD_FILE,
   constants.DT_BLOCK: constants.LD_BLOCKDEV,
   constants.DT_RBD: constants.LD_RBD,
+  constants.DT_EXT: constants.LD_EXT,
   }
 
 
@@ -9152,6 +9593,7 @@ def _GenerateDiskTemplate(
                                       "disk/%d" % disk_index,
                                       minors[idx * 2], minors[idx * 2 + 1])
       disk_dev.mode = disk[constants.IDISK_MODE]
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
       disks.append(disk_dev)
   else:
     if secondary_nodes:
@@ -9187,21 +9629,39 @@ def _GenerateDiskTemplate(
                                        disk[constants.IDISK_ADOPT])
     elif template_name == constants.DT_RBD:
       logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+    elif template_name == constants.DT_EXT:
+      def logical_id_fn(idx, _, disk):
+        provider = disk.get(constants.IDISK_PROVIDER, None)
+        if provider is None:
+          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
+                                       " not found", constants.DT_EXT,
+                                       constants.IDISK_PROVIDER)
+        return (provider, names[idx])
     else:
       raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
 
     dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
 
     for idx, disk in enumerate(disk_info):
+      params = {}
+      # Only for the Ext template add disk_info to params
+      if template_name == constants.DT_EXT:
+        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            params[key] = disk[key]
       disk_index = idx + base_index
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
                   (disk_index, utils.FormatUnit(size, "h")))
-      disks.append(objects.Disk(dev_type=dev_type, size=size,
-                                logical_id=logical_id_fn(idx, disk_index, disk),
-                                iv_name="disk/%d" % disk_index,
-                                mode=disk[constants.IDISK_MODE],
-                                params={}))
+      disk_dev = objects.Disk(dev_type=dev_type, size=size,
+                              logical_id=logical_id_fn(idx, disk_index, disk),
+                              iv_name="disk/%d" % disk_index,
+                              mode=disk[constants.IDISK_MODE],
+                              params=params)
+      disk_dev.name = disk.get(constants.IDISK_NAME, None)
+      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+      disks.append(disk_dev)
 
   return disks
 
@@ -9233,7 +9693,9 @@ def _WipeDisks(lu, instance, disks=None):
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should create
-  @return: the success of the wipe
+  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
+  @param disks: Disk details; tuple contains disk index, disk object and the
+    start offset
 
   """
   node = instance.primary_node
@@ -9351,6 +9813,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     result.Raise("Failed to create directory '%s' on"
                  " node %s" % (file_storage_dir, pnode))
 
+  disks_created = []
   # Note: this needs to be kept in sync with adding of disks in
   # LUInstanceSetParams
   for idx, device in enumerate(instance.disks):
@@ -9360,7 +9823,19 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
-      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+      try:
+        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        disks_created.append((node, device))
+      except errors.OpExecError:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        for (node, disk) in disks_created:
+          lu.cfg.SetDiskID(disk, node)
+          result = lu.rpc.call_blockdev_remove(node, disk)
+          if result.fail_msg:
+            logging.warning("Failed to remove newly-created disk %s on node %s:"
+                            " %s", device, node, result.fail_msg)
+        raise
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -9368,8 +9843,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be removed, the removal will continue with the other ones (compare
-  with `_CreateDisks()`).
+  be removed, the removal will continue with the other ones.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -9639,8 +10113,14 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
 
     check_params = cluster.SimpleFillNIC(nicparams)
     objects.NIC.CheckParameterSyntax(check_params)
-    nics.append(objects.NIC(mac=mac, ip=nic_ip,
-                            network=net, nicparams=nicparams))
+    net_uuid = cfg.LookupNetwork(net)
+    name = nic.get(constants.INIC_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
+    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
+                          network=net_uuid, nicparams=nicparams)
+    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
+    nics.append(nic_obj)
 
   return nics
 
@@ -9651,7 +10131,7 @@ def _ComputeDisks(op, default_vg):
   @param op: The instance opcode
   @param default_vg: The default_vg to assume
 
-  @return: The computer disks
+  @return: The computed disks
 
   """
   disks = []
@@ -9669,16 +10149,41 @@ def _ComputeDisks(op, default_vg):
       raise errors.OpPrereqError("Invalid disk size '%s'" % size,
                                  errors.ECODE_INVAL)
 
+    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
+    if ext_provider and op.disk_template != constants.DT_EXT:
+      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+                                 " disk template, not %s" %
+                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
+                                 op.disk_template), errors.ECODE_INVAL)
+
     data_vg = disk.get(constants.IDISK_VG, default_vg)
+    name = disk.get(constants.IDISK_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
     new_disk = {
       constants.IDISK_SIZE: size,
       constants.IDISK_MODE: mode,
       constants.IDISK_VG: data_vg,
+      constants.IDISK_NAME: name,
       }
+
     if constants.IDISK_METAVG in disk:
       new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
     if constants.IDISK_ADOPT in disk:
       new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
+
+    # For extstorage, demand the `provider' option and add any
+    # additional parameters (ext-params) to the dict
+    if op.disk_template == constants.DT_EXT:
+      if ext_provider:
+        new_disk[constants.IDISK_PROVIDER] = ext_provider
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            new_disk[key] = disk[key]
+      else:
+        raise errors.OpPrereqError("Missing provider for template '%s'" %
+                                   constants.DT_EXT, errors.ECODE_INVAL)
+
     disks.append(new_disk)
 
   return disks
@@ -9702,6 +10207,16 @@ def _ComputeFullBeParams(op, cluster):
   return cluster.SimpleFillBE(op.beparams)
 
 
+def _CheckOpportunisticLocking(op):
+  """Generate error if opportunistic locking is not possible.
+
+  """
+  if op.opportunistic_locking and not op.iallocator:
+    raise errors.OpPrereqError("Opportunistic locking is only available in"
+                               " combination with an instance allocator",
+                               errors.ECODE_INVAL)
+
+
 class LUInstanceCreate(LogicalUnit):
   """Create an instance.
 
@@ -9731,11 +10246,25 @@ class LUInstanceCreate(LogicalUnit):
     # check nics' parameter names
     for nic in self.op.nics:
       utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
+    # check that NIC's parameters names are unique and valid
+    utils.ValidateDeviceNames("NIC", self.op.nics)
+
+    # check that disk's names are unique and valid
+    utils.ValidateDeviceNames("disk", self.op.disks)
+
+    cluster = self.cfg.GetClusterInfo()
+    if not self.op.disk_template in cluster.enabled_disk_templates:
+      raise errors.OpPrereqError("Cannot create an instance with disk template"
+                                 " '%s', because it is not enabled in the"
+                                 " cluster. Enabled disk templates are: %s." %
+                                 (self.op.disk_template,
+                                  ",".join(cluster.enabled_disk_templates)))
 
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
     for disk in self.op.disks:
-      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+      if self.op.disk_template != constants.DT_EXT:
+        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
       if constants.IDISK_ADOPT in disk:
         has_adopt = True
       else:
@@ -9797,6 +10326,8 @@ class LUInstanceCreate(LogicalUnit):
                         " template")
         self.op.snode = None
 
+    _CheckOpportunisticLocking(self.op)
+
     self._cds = _GetClusterDomainSecret()
 
     if self.op.mode == constants.INSTANCE_IMPORT:
@@ -9933,7 +10464,7 @@ class LUInstanceCreate(LogicalUnit):
     """
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
     else:
       node_whitelist = None
 
@@ -9993,8 +10524,8 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=_NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
-             for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
+             d[constants.IDISK_MODE]) for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -10239,11 +10770,12 @@ class LUInstanceCreate(LogicalUnit):
     utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
     filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
                                       self.op.hvparams)
-    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
+    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
-    _CheckGlobalHvParams(self.op.hvparams)
+    _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+                          "instance", "cluster")
 
     # fill and remember the beparams dict
     self.be_full = _ComputeFullBeParams(self.op, cluster)
@@ -10341,14 +10873,15 @@ class LUInstanceCreate(LogicalUnit):
     # Fill in any IPs from IP pools. This must happen here, because we need to
     # know the nic's primary node, as specified by the iallocator
     for idx, nic in enumerate(self.nics):
-      net = nic.network
-      if net is not None:
-        netparams = self.cfg.GetGroupNetParams(net, self.pnode.name)
+      net_uuid = nic.network
+      if net_uuid is not None:
+        nobj = self.cfg.GetNetwork(net_uuid)
+        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
                                      " %s. Propably not connected to"
                                      " node's %s nodegroup" %
-                                     (net, self.pnode.name),
+                                     (nobj.name, self.pnode.name),
                                      errors.ECODE_INVAL)
         self.LogInfo("NIC/%d inherits netparams %s" %
                      (idx, netparams.values()))
@@ -10356,24 +10889,24 @@ class LUInstanceCreate(LogicalUnit):
         if nic.ip is not None:
           if nic.ip.lower() == constants.NIC_IP_POOL:
             try:
-              nic.ip = self.cfg.GenerateIp(net, self.proc.GetECId())
+              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
                                          " from the address pool" % idx,
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from network %s", nic.ip, net)
+            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
           else:
             try:
-              self.cfg.ReserveIp(net, nic.ip, self.proc.GetECId())
+              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("IP address %s already in use"
                                          " or does not belong to network %s" %
-                                         (nic.ip, net),
+                                         (nic.ip, nobj.name),
                                          errors.ECODE_NOTUNIQUE)
-      else:
-        # net is None, ip None or given
-        if self.op.conflicts_check:
-          _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+
+      # net is None, ip None or given
+      elif self.op.conflicts_check:
+        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
 
     # mirror node verification
     if self.op.disk_template in constants.DTS_INT_MIRROR:
@@ -10392,26 +10925,17 @@ class LUInstanceCreate(LogicalUnit):
                         " from the first disk's node group will be"
                         " used")
 
-    nodenames = [pnode.name] + self.secondaries
-
-    # Verify instance specs
-    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
-    ispec = {
-      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
-      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
-      constants.ISPEC_DISK_COUNT: len(self.disks),
-      constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks],
-      constants.ISPEC_NIC_COUNT: len(self.nics),
-      constants.ISPEC_SPINDLE_USE: spindle_use,
-      }
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      nodes = [pnode]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        nodes.append(snode)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        raise errors.OpPrereqError("Disk template %s not supported with"
+                                   " exclusive storage" % self.op.disk_template,
+                                   errors.ECODE_STATE)
 
-    group_info = self.cfg.GetNodeGroup(pnode.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
-    if not self.op.ignore_ipolicy and res:
-      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-             (pnode.group, group_info.name, utils.CommaJoin(res)))
-      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+    nodenames = [pnode.name] + self.secondaries
 
     if not self.adopt_disks:
       if self.op.disk_template == constants.DT_RBD:
@@ -10419,6 +10943,9 @@ class LUInstanceCreate(LogicalUnit):
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
         _CheckRADOSFreeSpace()
+      elif self.op.disk_template == constants.DT_EXT:
+        # FIXME: Function that checks prereqs if needed
+        pass
       else:
         # Check lv size requirements, if not adopting
         req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
@@ -10508,12 +11035,12 @@ class LUInstanceCreate(LogicalUnit):
 
     group_info = self.cfg.GetNodeGroup(pnode.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec)
+    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
+                                               self.op.disk_template)
     if not self.op.ignore_ipolicy and res:
-      raise errors.OpPrereqError(("Instance allocation to group %s violates"
-                                  " policy: %s") % (pnode.group,
-                                                    utils.CommaJoin(res)),
-                                  errors.ECODE_INVAL)
+      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+             (pnode.group, group_info.name, utils.CommaJoin(res)))
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
@@ -10523,6 +11050,9 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
+    #TODO: _CheckExtParams (remotely)
+    # Check parameters for extstorage
+
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
@@ -10601,12 +11131,9 @@ class LUInstanceCreate(LogicalUnit):
       try:
         _CreateDisks(self, iobj)
       except errors.OpExecError:
-        self.LogWarning("Device creation failed, reverting...")
-        try:
-          _RemoveDisks(self, iobj)
-        finally:
-          self.cfg.ReleaseDRBDMinors(instance)
-          raise
+        self.LogWarning("Device creation failed")
+        self.cfg.ReleaseDRBDMinors(instance)
+        raise
 
     feedback_fn("adding instance %s to cluster config" % instance)
 
@@ -10812,6 +11339,8 @@ class LUInstanceMultiAlloc(NoHooksLU):
                                    " or set a cluster-wide default iallocator",
                                    errors.ECODE_INVAL)
 
+    _CheckOpportunisticLocking(self.op)
+
     dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
     if dups:
       raise errors.OpPrereqError("There are duplicate instance names: %s" %
@@ -10859,7 +11388,7 @@ class LUInstanceMultiAlloc(NoHooksLU):
 
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
     else:
       node_whitelist = None
 
@@ -10991,7 +11520,7 @@ def _GetInstanceConsole(cluster, instance):
   @rtype: dict
 
   """
-  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
   # beparams and hvparams are passed separately, to avoid editing the
   # instance and then saving the defaults in the instance itself.
   hvparams = cluster.FillHV(instance)
@@ -11355,7 +11884,7 @@ class TLReplaceDisks(Tasklet):
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
       _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              ignore=self.ignore_ipolicy)
+                              self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
@@ -11533,11 +12062,13 @@ class TLReplaceDisks(Tasklet):
       new_lvs = [lv_data, lv_meta]
       old_lvs = [child.Copy() for child in dev.children]
       iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
+      excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
 
       # we pass force_create=True to force the LVM creation
       for new_lv in new_lvs:
         _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
-                             _GetInstanceInfoText(self.instance), False)
+                             _GetInstanceInfoText(self.instance), False,
+                             excl_stor)
 
     return iv_names
 
@@ -11746,13 +12277,15 @@ class TLReplaceDisks(Tasklet):
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
     disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
     for idx, dev in enumerate(disks):
       self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
                       (self.new_node, idx))
       # we pass force_create=True to force LVM creation
       for new_lv in dev.children:
         _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
-                             True, _GetInstanceInfoText(self.instance), False)
+                             True, _GetInstanceInfoText(self.instance), False,
+                             excl_stor)
 
     # Step 4: dbrd minors and drbd setups changes
     # after this, we must manually remove the drbd minors on both the
@@ -11796,7 +12329,8 @@ class TLReplaceDisks(Tasklet):
       try:
         _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
                               anno_new_drbd,
-                              _GetInstanceInfoText(self.instance), False)
+                              _GetInstanceInfoText(self.instance), False,
+                              excl_stor)
       except errors.GenericError:
         self.cfg.ReleaseDRBDMinors(self.instance.name)
         raise
@@ -12300,13 +12834,22 @@ class LUInstanceGrowDisk(LogicalUnit):
                                    utils.FormatUnit(self.delta, "h"),
                                    errors.ECODE_INVAL)
 
-    if instance.disk_template not in (constants.DT_FILE,
-                                      constants.DT_SHARED_FILE,
-                                      constants.DT_RBD):
+    self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
+
+  def _CheckDiskSpace(self, nodenames, req_vgspace):
+    template = self.instance.disk_template
+    if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
       # TODO: check the free disk space for file, when that feature will be
       # supported
-      _CheckNodesFreeDiskPerVG(self, nodenames,
-                               self.disk.ComputeGrowth(self.delta))
+      nodes = map(self.cfg.GetNodeInfo, nodenames)
+      es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+                        nodes)
+      if es_nodes:
+        # With exclusive storage we need to something smarter than just looking
+        # at free space; for now, let's simply abort the operation.
+        raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
+                                   " is enabled", errors.ECODE_STATE)
+      _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
 
   def Exec(self, feedback_fn):
     """Execute disk grow.
@@ -12437,12 +12980,13 @@ class LUInstanceQueryData(NoHooksLU):
 
       self.needed_locks[locking.LEVEL_NODEGROUP] = []
       self.needed_locks[locking.LEVEL_NODE] = []
+      self.needed_locks[locking.LEVEL_NETWORK] = []
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
   def DeclareLocks(self, level):
     if self.op.use_locking:
+      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
       if level == locking.LEVEL_NODEGROUP:
-        owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
 
         # Lock all groups used by instances optimistically; this requires going
         # via the node before it's locked, requiring verification later on
@@ -12455,6 +12999,13 @@ class LUInstanceQueryData(NoHooksLU):
       elif level == locking.LEVEL_NODE:
         self._LockInstancesNodes()
 
+      elif level == locking.LEVEL_NETWORK:
+        self.needed_locks[locking.LEVEL_NETWORK] = \
+          frozenset(net_uuid
+                    for instance_name in owned_instances
+                    for net_uuid in
+                       self.cfg.GetInstanceNetworks(instance_name))
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -12464,6 +13015,7 @@ class LUInstanceQueryData(NoHooksLU):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
 
     if self.wanted_names is None:
       assert self.op.use_locking, "Locking was not used"
@@ -12475,7 +13027,8 @@ class LUInstanceQueryData(NoHooksLU):
       _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
                                 None)
     else:
-      assert not (owned_instances or owned_groups or owned_nodes)
+      assert not (owned_instances or owned_groups or
+                  owned_nodes or owned_networks)
 
     self.wanted_instances = instances.values()
 
@@ -12544,6 +13097,8 @@ class LUInstanceQueryData(NoHooksLU):
       "children": dev_children,
       "mode": dev.mode,
       "size": dev.size,
+      "name": dev.name,
+      "uuid": dev.uuid,
       }
 
   def Exec(self, feedback_fn):
@@ -12559,7 +13114,6 @@ class LUInstanceQueryData(NoHooksLU):
                                                  for node in nodes.values()))
 
     group2name_fn = lambda uuid: groups[uuid].name
-
     for instance in self.wanted_instances:
       pnode = nodes[instance.primary_node]
 
@@ -12640,6 +13194,42 @@ def PrepareContainerMods(mods, private_fn):
   return [(op, idx, params, fn()) for (op, idx, params) in mods]
 
 
+def GetItemFromContainer(identifier, kind, container):
+  """Return the item refered by the identifier.
+
+  @type identifier: string
+  @param identifier: Item index or name or UUID
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to get the item from
+
+  """
+  # Index
+  try:
+    idx = int(identifier)
+    if idx == -1:
+      # Append
+      absidx = len(container) - 1
+    elif idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                       (kind, idx, len(container)))
+    else:
+      absidx = idx
+    return (absidx, container[idx])
+  except ValueError:
+    pass
+
+  for idx, item in enumerate(container):
+    if item.uuid == identifier or item.name == identifier:
+      return (idx, item)
+
+  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
+                             (kind, identifier), errors.ECODE_NOENT)
+
+
 #: Type description for changes as returned by L{ApplyContainerMods}'s
 #: callbacks
 _TApplyContModsCbChanges = \
@@ -12676,25 +13266,26 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
     item and private data object as added by L{PrepareContainerMods}
 
   """
-  for (op, idx, params, private) in mods:
-    if idx == -1:
-      # Append
-      absidx = len(container) - 1
-    elif idx < 0:
-      raise IndexError("Not accepting negative indices other than -1")
-    elif idx > len(container):
-      raise IndexError("Got %s index %s, but there are only %s" %
-                       (kind, idx, len(container)))
-    else:
-      absidx = idx
-
+  for (op, identifier, params, private) in mods:
     changes = None
 
     if op == constants.DDM_ADD:
       # Calculate where item will be added
+      # When adding an item, identifier can only be an index
+      try:
+        idx = int(identifier)
+      except ValueError:
+        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
+                                   " identifier for %s" % constants.DDM_ADD,
+                                   errors.ECODE_INVAL)
       if idx == -1:
         addidx = len(container)
       else:
+        if idx < 0:
+          raise IndexError("Not accepting negative indices other than -1")
+        elif idx > len(container):
+          raise IndexError("Got %s index %s, but there are only %s" %
+                           (kind, idx, len(container)))
         addidx = idx
 
       if create_fn is None:
@@ -12711,10 +13302,7 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
         container.insert(idx, item)
     else:
       # Retrieve existing item
-      try:
-        item = container[absidx]
-      except IndexError:
-        raise IndexError("Invalid %s index %s" % (kind, idx))
+      (absidx, item) = GetItemFromContainer(identifier, kind, container)
 
       if op == constants.DDM_REMOVE:
         assert not params
@@ -12802,7 +13390,10 @@ class LUInstanceSetParams(LogicalUnit):
     for (op, _, params) in mods:
       assert ht.TDict(params)
 
-      utils.ForceDictType(params, key_types)
+      # If 'key_types' is an empty dict, we assume we have an
+      # 'ext' template and thus do not ForceDictType
+      if key_types:
+        utils.ForceDictType(params, key_types)
 
       if op == constants.DDM_REMOVE:
         if params:
@@ -12837,10 +13428,21 @@ class LUInstanceSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
       params[constants.IDISK_SIZE] = size
-
-    elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
-      raise errors.OpPrereqError("Disk size change not possible, use"
-                                 " grow-disk", errors.ECODE_INVAL)
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
+
+    elif op == constants.DDM_MODIFY:
+      if constants.IDISK_SIZE in params:
+        raise errors.OpPrereqError("Disk size change not possible, use"
+                                   " grow-disk", errors.ECODE_INVAL)
+      if len(params) > 2:
+        raise errors.OpPrereqError("Disk modification doesn't support"
+                                   " additional arbitrary parameters",
+                                   errors.ECODE_INVAL)
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -12849,9 +13451,12 @@ class LUInstanceSetParams(LogicalUnit):
     """
     if op in (constants.DDM_ADD, constants.DDM_MODIFY):
       ip = params.get(constants.INIC_IP, None)
+      name = params.get(constants.INIC_NAME, None)
       req_net = params.get(constants.INIC_NETWORK, None)
       link = params.get(constants.NIC_LINK, None)
       mode = params.get(constants.NIC_MODE, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.INIC_NAME] = None
       if req_net is not None:
         if req_net.lower() == constants.VALUE_NONE:
           params[constants.INIC_NETWORK] = None
@@ -12893,21 +13498,19 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem):
+            self.op.offline is not None or self.op.runtime_mem or
+            self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
-      _CheckGlobalHvParams(self.op.hvparams)
+      _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+                            "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
       "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
     self.op.nics = self._UpgradeDiskNicMods(
       "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
 
-    # Check disk modifications
-    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
-                    self._VerifyDiskModification)
-
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
                                  " changes not supported at the same time",
@@ -12924,6 +13527,9 @@ class LUInstanceSetParams(LogicalUnit):
     self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
                     self._VerifyNicModification)
 
+    if self.op.pnode:
+      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODEGROUP] = []
@@ -12994,7 +13600,7 @@ class LUInstanceSetParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return (nl, nl)
 
-  def _PrepareNicModification(self, params, private, old_ip, old_net,
+  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
                               old_params, cluster, pnode):
 
     update_params_dict = dict([(key, params[key])
@@ -13004,13 +13610,21 @@ class LUInstanceSetParams(LogicalUnit):
     req_link = update_params_dict.get(constants.NIC_LINK, None)
     req_mode = update_params_dict.get(constants.NIC_MODE, None)
 
-    new_net = params.get(constants.INIC_NETWORK, old_net)
-    if new_net is not None:
-      netparams = self.cfg.GetGroupNetParams(new_net, pnode)
-      if netparams is None:
+    new_net_uuid = None
+    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
+    if new_net_uuid_or_name:
+      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
+      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
+
+    if old_net_uuid:
+      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
+
+    if new_net_uuid:
+      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
+      if not netparams:
         raise errors.OpPrereqError("No netparams found for the network"
-                                   " %s, probably not connected" % new_net,
-                                   errors.ECODE_INVAL)
+                                   " %s, probably not connected" %
+                                   new_net_obj.name, errors.ECODE_INVAL)
       new_params = dict(netparams)
     else:
       new_params = _GetUpdatedParams(old_params, update_params_dict)
@@ -13049,7 +13663,7 @@ class LUInstanceSetParams(LogicalUnit):
       elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
         # otherwise generate the MAC address
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
       else:
         # or validate/reserve the current one
         try:
@@ -13058,62 +13672,66 @@ class LUInstanceSetParams(LogicalUnit):
           raise errors.OpPrereqError("MAC address '%s' already in use"
                                      " in cluster" % mac,
                                      errors.ECODE_NOTUNIQUE)
-    elif new_net != old_net:
-
-      def get_net_prefix(net):
-        if net:
-          uuid = self.cfg.LookupNetwork(net)
-          if uuid:
-            nobj = self.cfg.GetNetwork(uuid)
-            return nobj.mac_prefix
-        return None
-
-      new_prefix = get_net_prefix(new_net)
-      old_prefix = get_net_prefix(old_net)
+    elif new_net_uuid != old_net_uuid:
+
+      def get_net_prefix(net_uuid):
+        mac_prefix = None
+        if net_uuid:
+          nobj = self.cfg.GetNetwork(net_uuid)
+          mac_prefix = nobj.mac_prefix
+
+        return mac_prefix
+
+      new_prefix = get_net_prefix(new_net_uuid)
+      old_prefix = get_net_prefix(old_net_uuid)
       if old_prefix != new_prefix:
         params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net, self.proc.GetECId())
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
 
-    #if there is a change in nic-network configuration
+    # if there is a change in (ip, network) tuple
     new_ip = params.get(constants.INIC_IP, old_ip)
-    if (new_ip, new_net) != (old_ip, old_net):
+    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
       if new_ip:
-        if new_net:
-          if new_ip.lower() == constants.NIC_IP_POOL:
+        # if IP is pool then require a network and generate one IP
+        if new_ip.lower() == constants.NIC_IP_POOL:
+          if new_net_uuid:
             try:
-              new_ip = self.cfg.GenerateIp(new_net, self.proc.GetECId())
+              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
             except errors.ReservationError:
               raise errors.OpPrereqError("Unable to get a free IP"
                                          " from the address pool",
                                          errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from pool %s", new_ip, new_net)
+            self.LogInfo("Chose IP %s from network %s",
+                         new_ip,
+                         new_net_obj.name)
             params[constants.INIC_IP] = new_ip
-          elif new_ip != old_ip or new_net != old_net:
-            try:
-              self.LogInfo("Reserving IP %s in pool %s", new_ip, new_net)
-              self.cfg.ReserveIp(new_net, new_ip, self.proc.GetECId())
-            except errors.ReservationError:
-              raise errors.OpPrereqError("IP %s not available in network %s" %
-                                         (new_ip, new_net),
-                                         errors.ECODE_NOTUNIQUE)
-        elif new_ip.lower() == constants.NIC_IP_POOL:
-          raise errors.OpPrereqError("ip=pool, but no network found",
-                                     errors.ECODE_INVAL)
-        else:
-          # new net is None
-          if self.op.conflicts_check:
-            _CheckForConflictingIp(self, new_ip, pnode)
-
-      if old_ip:
-        if old_net:
+          else:
+            raise errors.OpPrereqError("ip=pool, but no network found",
+                                       errors.ECODE_INVAL)
+        # Reserve new IP if in the new network if any
+        elif new_net_uuid:
           try:
-            self.cfg.ReleaseIp(old_net, old_ip, self.proc.GetECId())
-          except errors.AddressPoolError:
-            logging.warning("Release IP %s not contained in network %s",
-                            old_ip, old_net)
+            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
+            self.LogInfo("Reserving IP %s in network %s",
+                         new_ip, new_net_obj.name)
+          except errors.ReservationError:
+            raise errors.OpPrereqError("IP %s not available in network %s" %
+                                       (new_ip, new_net_obj.name),
+                                       errors.ECODE_NOTUNIQUE)
+        # new network is None so check if new IP is a conflicting IP
+        elif self.op.conflicts_check:
+          _CheckForConflictingIp(self, new_ip, pnode)
+
+      # release old IP if old network is not None
+      if old_ip and old_net_uuid:
+        try:
+          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
+        except errors.AddressPoolError:
+          logging.warning("Release IP %s not contained in network %s",
+                          old_ip, old_net_obj.name)
 
-    # there are no changes in (net, ip) tuple
-    elif (old_net is not None and
+    # there are no changes in (ip, network) tuple and old network is not None
+    elif (old_net_uuid is not None and
           (req_link is not None or req_mode is not None)):
       raise errors.OpPrereqError("Not allowed to change link or mode of"
                                  " a NIC that is connected to a network",
@@ -13122,6 +13740,64 @@ class LUInstanceSetParams(LogicalUnit):
     private.params = new_params
     private.filled = new_filled_params
 
+  def _PreCheckDiskTemplate(self, pnode_info):
+    """CheckPrereq checks related to a new disk template."""
+    # Arguments are passed to avoid configuration lookups
+    instance = self.instance
+    pnode = instance.primary_node
+    cluster = self.cluster
+    if instance.disk_template == self.op.disk_template:
+      raise errors.OpPrereqError("Instance already has disk template %s" %
+                                 instance.disk_template, errors.ECODE_INVAL)
+
+    if (instance.disk_template,
+        self.op.disk_template) not in self._DISK_CONVERSIONS:
+      raise errors.OpPrereqError("Unsupported disk template conversion from"
+                                 " %s to %s" % (instance.disk_template,
+                                                self.op.disk_template),
+                                 errors.ECODE_INVAL)
+    _CheckInstanceState(self, instance, INSTANCE_DOWN,
+                        msg="cannot change disk template")
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      if self.op.remote_node == pnode:
+        raise errors.OpPrereqError("Given new secondary node %s is the same"
+                                   " as the primary node of the instance" %
+                                   self.op.remote_node, errors.ECODE_STATE)
+      _CheckNodeOnline(self, self.op.remote_node)
+      _CheckNodeNotDrained(self, self.op.remote_node)
+      # FIXME: here we assume that the old instance type is DT_PLAIN
+      assert instance.disk_template == constants.DT_PLAIN
+      disks = [{constants.IDISK_SIZE: d.size,
+                constants.IDISK_VG: d.logical_id[0]}
+               for d in instance.disks]
+      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
+      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+
+      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+      snode_group = self.cfg.GetNodeGroup(snode_info.group)
+      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+                                                              snode_group)
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
+                              ignore=self.op.ignore_ipolicy)
+      if pnode_info.group != snode_info.group:
+        self.LogWarning("The primary and secondary nodes are in two"
+                        " different node groups; the disk parameters"
+                        " from the first disk's node group will be"
+                        " used")
+
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      # Make sure none of the nodes require exclusive storage
+      nodes = [pnode_info]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        assert snode_info
+        nodes.append(snode_info)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
+                  " storage is enabled" % (instance.disk_template,
+                                           self.op.disk_template))
+        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -13136,6 +13812,21 @@ class LUInstanceSetParams(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     pnode = instance.primary_node
+
+    self.warn = []
+
+    if (self.op.pnode is not None and self.op.pnode != pnode and
+        not self.op.force):
+      # verify that the instance is not up
+      instance_info = self.rpc.call_instance_info(pnode, instance.name,
+                                                  instance.hypervisor)
+      if instance_info.fail_msg:
+        self.warn.append("Can't get instance runtime information: %s" %
+                         instance_info.fail_msg)
+      elif instance_info.payload:
+        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
+                                   errors.ECODE_STATE)
+
     assert pnode in self.owned_locks(locking.LEVEL_NODE)
     nodelist = list(instance.all_nodes)
     pnode_info = self.cfg.GetNodeInfo(pnode)
@@ -13148,10 +13839,46 @@ class LUInstanceSetParams(LogicalUnit):
     # dictionary with instance information after the modification
     ispec = {}
 
+    # Check disk modifications. This is done here and not in CheckArguments
+    # (as with NICs), because we need to know the instance's disk template
+    if instance.disk_template == constants.DT_EXT:
+      self._CheckMods("disk", self.op.disks, {},
+                      self._VerifyDiskModification)
+    else:
+      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+                      self._VerifyDiskModification)
+
     # Prepare disk/NIC modifications
     self.diskmod = PrepareContainerMods(self.op.disks, None)
     self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
 
+    # Check the validity of the `provider' parameter
+    if instance.disk_template in constants.DT_EXT:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if mod[0] == constants.DDM_ADD:
+          if ext_provider is None:
+            raise errors.OpPrereqError("Instance template is '%s' and parameter"
+                                       " '%s' missing, during disk add" %
+                                       (constants.DT_EXT,
+                                        constants.IDISK_PROVIDER),
+                                       errors.ECODE_NOENT)
+        elif mod[0] == constants.DDM_MODIFY:
+          if ext_provider:
+            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
+                                       " modification" %
+                                       constants.IDISK_PROVIDER,
+                                       errors.ECODE_INVAL)
+    else:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if ext_provider is not None:
+          raise errors.OpPrereqError("Parameter '%s' is only valid for"
+                                     " instances of type '%s'" %
+                                     (constants.IDISK_PROVIDER,
+                                      constants.DT_EXT),
+                                     errors.ECODE_INVAL)
+
     # OS change
     if self.op.os_name and not self.op.force:
       _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
@@ -13164,44 +13891,7 @@ class LUInstanceSetParams(LogicalUnit):
       "Can't modify disk template and apply disk changes at the same time"
 
     if self.op.disk_template:
-      if instance.disk_template == self.op.disk_template:
-        raise errors.OpPrereqError("Instance already has disk template %s" %
-                                   instance.disk_template, errors.ECODE_INVAL)
-
-      if (instance.disk_template,
-          self.op.disk_template) not in self._DISK_CONVERSIONS:
-        raise errors.OpPrereqError("Unsupported disk template conversion from"
-                                   " %s to %s" % (instance.disk_template,
-                                                  self.op.disk_template),
-                                   errors.ECODE_INVAL)
-      _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                          msg="cannot change disk template")
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if self.op.remote_node == pnode:
-          raise errors.OpPrereqError("Given new secondary node %s is the same"
-                                     " as the primary node of the instance" %
-                                     self.op.remote_node, errors.ECODE_STATE)
-        _CheckNodeOnline(self, self.op.remote_node)
-        _CheckNodeNotDrained(self, self.op.remote_node)
-        # FIXME: here we assume that the old instance type is DT_PLAIN
-        assert instance.disk_template == constants.DT_PLAIN
-        disks = [{constants.IDISK_SIZE: d.size,
-                  constants.IDISK_VG: d.logical_id[0]}
-                 for d in instance.disks]
-        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
-        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
-
-        snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
-        snode_group = self.cfg.GetNodeGroup(snode_info.group)
-        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                                snode_group)
-        _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
-                                ignore=self.op.ignore_ipolicy)
-        if pnode_info.group != snode_info.group:
-          self.LogWarning("The primary and secondary nodes are in two"
-                          " different node groups; the disk parameters"
-                          " from the first disk's node group will be"
-                          " used")
+      self._PreCheckDiskTemplate(pnode_info)
 
     # hvparams processing
     if self.op.hvparams:
@@ -13211,7 +13901,7 @@ class LUInstanceSetParams(LogicalUnit):
       hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
 
       # local check
-      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
+      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
       _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
@@ -13269,8 +13959,6 @@ class LUInstanceSetParams(LogicalUnit):
     else:
       self.os_inst = {}
 
-    self.warn = []
-
     #TODO(dynmem): do the appropriate check involving MINMEM
     if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
         be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
@@ -13281,7 +13969,7 @@ class LUInstanceSetParams(LogicalUnit):
       instance_info = self.rpc.call_instance_info(pnode, instance.name,
                                                   instance.hypervisor)
       nodeinfo = self.rpc.call_node_info(mem_check_list, None,
-                                         [instance.hypervisor])
+                                         [instance.hypervisor], False)
       pninfo = nodeinfo[pnode]
       msg = pninfo.fail_msg
       if msg:
@@ -13388,9 +14076,14 @@ class LUInstanceSetParams(LogicalUnit):
                                  " (%d), cannot add more" % constants.MAX_NICS,
                                  errors.ECODE_STATE)
 
+    def _PrepareDiskMod(_, disk, params, __):
+      disk.name = params.get(constants.IDISK_NAME, None)
+
     # Verify disk changes (operating on a copy)
-    disks = instance.disks[:]
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, None, None)
+    disks = copy.deepcopy(instance.disks)
+    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
+                       None)
+    utils.ValidateDeviceNames("disk", disks)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
                                  " more" % constants.MAX_DISKS,
@@ -13412,6 +14105,8 @@ class LUInstanceSetParams(LogicalUnit):
       nics = [nic.Copy() for nic in instance.nics]
       ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
                          self._CreateNewNic, self._ApplyNicMods, None)
+      # Verify that NIC names are unique and valid
+      utils.ValidateDeviceNames("NIC", nics)
       self._new_nics = nics
       ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
     else:
@@ -13429,14 +14124,20 @@ class LUInstanceSetParams(LogicalUnit):
                                                          None)
 
       # Copy ispec to verify parameters with min/max values separately
+      if self.op.disk_template:
+        new_disk_template = self.op.disk_template
+      else:
+        new_disk_template = instance.disk_template
       ispec_max = ispec.copy()
       ispec_max[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MAXMEM, None)
-      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max)
+      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
+                                                     new_disk_template)
       ispec_min = ispec.copy()
       ispec_min[constants.ISPEC_MEM_SIZE] = \
         self.be_new.get(constants.BE_MINMEM, None)
-      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min)
+      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
+                                                     new_disk_template)
 
       if (res_max or res_min):
         # FIXME: Improve error message by including information about whether
@@ -13459,7 +14160,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # create a fake disk info for _GenerateDiskTemplate
     disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
-                  constants.IDISK_VG: d.logical_id[0]}
+                  constants.IDISK_VG: d.logical_id[0],
+                  constants.IDISK_NAME: d.name}
                  for d in instance.disks]
     new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
                                       instance.name, pnode, [snode],
@@ -13467,15 +14169,18 @@ class LUInstanceSetParams(LogicalUnit):
                                       self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
                                         self.diskparams)
+    p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+    s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
     info = _GetInstanceInfoText(instance)
     feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in anno_disks:
       # unfortunately this is... not too nice
       _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
-                            info, True)
+                            info, True, p_excl_stor)
       for child in disk.children:
-        _CreateSingleBlockDev(self, snode, instance, child, info, True)
+        _CreateSingleBlockDev(self, snode, instance, child, info, True,
+                              s_excl_stor)
     # at this stage, all new LVs have been created, we can rename the
     # old ones
     feedback_fn("Renaming original volumes...")
@@ -13487,9 +14192,10 @@ class LUInstanceSetParams(LogicalUnit):
     feedback_fn("Initializing DRBD devices...")
     # all child devices are in place, we can now create the DRBD devices
     for disk in anno_disks:
-      for node in [pnode, snode]:
+      for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
         f_create = node == pnode
-        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
+        _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+                              excl_stor)
 
     # at this point, the instance has been modified
     instance.disk_template = constants.DT_DRBD8
@@ -13524,10 +14230,11 @@ class LUInstanceSetParams(LogicalUnit):
     old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
     new_disks = [d.children[0] for d in instance.disks]
 
-    # copy over size and mode
+    # copy over size, mode and name
     for parent, child in zip(old_disks, new_disks):
       child.size = parent.size
       child.mode = parent.mode
+      child.name = parent.name
 
     # this is a DRBD disk, return its port to the pool
     # NOTE: this must be done right before the call to cfg.Update!
@@ -13538,6 +14245,7 @@ class LUInstanceSetParams(LogicalUnit):
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
+    _UpdateIvNames(0, instance.disks)
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
@@ -13593,6 +14301,11 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
                         disk.iv_name, disk, node, err)
 
+    if self.cluster.prealloc_wipe_disks:
+      # Wipe new disk
+      _WipeDisks(self, instance,
+                 disks=[(idx, disk, 0)])
+
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
       ])
@@ -13602,11 +14315,17 @@ class LUInstanceSetParams(LogicalUnit):
     """Modifies a disk.
 
     """
-    disk.mode = params[constants.IDISK_MODE]
+    changes = []
+    mode = params.get(constants.IDISK_MODE, None)
+    if mode:
+      disk.mode = mode
+      changes.append(("disk.mode/%d" % idx, disk.mode))
 
-    return [
-      ("disk.mode/%d" % idx, disk.mode),
-      ]
+    name = params.get(constants.IDISK_NAME, None)
+    disk.name = name
+    changes.append(("disk.name/%d" % idx, disk.name))
+
+    return changes
 
   def _RemoveDisk(self, idx, root, _):
     """Removes a disk.
@@ -13624,18 +14343,22 @@ class LUInstanceSetParams(LogicalUnit):
     if root.dev_type in constants.LDS_DRBD:
       self.cfg.AddTcpUdpPort(root.logical_id[2])
 
-  @staticmethod
-  def _CreateNewNic(idx, params, private):
+  def _CreateNewNic(self, idx, params, private):
     """Creates data structure for a new network interface.
 
     """
     mac = params[constants.INIC_MAC]
     ip = params.get(constants.INIC_IP, None)
     net = params.get(constants.INIC_NETWORK, None)
+    name = params.get(constants.INIC_NAME, None)
+    net_uuid = self.cfg.LookupNetwork(net)
     #TODO: not private.filled?? can a nic have no nicparams??
     nicparams = private.filled
+    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
+                       nicparams=nicparams)
+    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 
-    return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
+    return (nobj, [
       ("nic.%d" % idx,
        "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
        (mac, ip, private.filled[constants.NIC_MODE],
@@ -13643,18 +14366,23 @@ class LUInstanceSetParams(LogicalUnit):
        net)),
       ])
 
-  @staticmethod
-  def _ApplyNicMods(idx, nic, params, private):
+  def _ApplyNicMods(self, idx, nic, params, private):
     """Modifies a network interface.
 
     """
     changes = []
 
-    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
+    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
       if key in params:
         changes.append(("nic.%s/%d" % (key, idx), params[key]))
         setattr(nic, key, params[key])
 
+    new_net = params.get(constants.INIC_NETWORK, nic.network)
+    new_net_uuid = self.cfg.LookupNetwork(new_net)
+    if new_net_uuid != nic.network:
+      changes.append(("nic.network/%d" % idx, new_net))
+      nic.network = new_net_uuid
+
     if private.filled:
       nic.nicparams = private.filled
 
@@ -13682,6 +14410,10 @@ class LUInstanceSetParams(LogicalUnit):
     result = []
     instance = self.instance
 
+    # New primary node
+    if self.op.pnode:
+      instance.primary_node = self.op.pnode
+
     # runtime memory
     if self.op.runtime_mem:
       rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
@@ -14858,7 +15590,7 @@ class LUGroupSetParams(LogicalUnit):
       violations = \
           _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                   self.group),
-                                        new_ipolicy, instances)
+                                        new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
@@ -15678,7 +16410,8 @@ class LUTestAllocator(NoHooksLU):
                                           nics=self.op.nics,
                                           vcpus=self.op.vcpus,
                                           spindle_use=self.op.spindle_use,
-                                          hypervisor=self.op.hypervisor)
+                                          hypervisor=self.op.hypervisor,
+                                          node_whitelist=None)
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       req = iallocator.IAReqRelocate(name=self.op.name,
                                      relocate_from=list(self.relocate_from))
@@ -15755,11 +16488,15 @@ class LUNetworkAdd(LogicalUnit):
       raise errors.OpPrereqError("Network must be given",
                                  errors.ECODE_INVAL)
 
-    uuid = self.cfg.LookupNetwork(self.op.network_name)
-
-    if uuid:
-      raise errors.OpPrereqError("Network '%s' already defined" %
-                                 self.op.network, errors.ECODE_EXISTS)
+    try:
+      existing_uuid = self.cfg.LookupNetwork(self.op.network_name)
+    except errors.OpPrereqError:
+      pass
+    else:
+      raise errors.OpPrereqError("Desired network name '%s' already exists as a"
+                                 " network (UUID: %s)" %
+                                 (self.op.network_name, existing_uuid),
+                                 errors.ECODE_EXISTS)
 
     # Check tag validity
     for tag in self.op.tags:
@@ -15776,7 +16513,6 @@ class LUNetworkAdd(LogicalUnit):
       "network6": self.op.network6,
       "gateway6": self.op.gateway6,
       "mac_prefix": self.op.mac_prefix,
-      "network_type": self.op.network_type,
       "tags": self.op.tags,
       }
     return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
@@ -15791,14 +16527,13 @@ class LUNetworkAdd(LogicalUnit):
                            network6=self.op.network6,
                            gateway6=self.op.gateway6,
                            mac_prefix=self.op.mac_prefix,
-                           network_type=self.op.network_type,
-                           uuid=self.network_uuid,
-                           family=constants.IP4_VERSION)
+                           uuid=self.network_uuid)
     # Initialize the associated address pool
     try:
       pool = network.AddressPool.InitializeNetwork(nobj)
-    except errors.AddressPoolError, e:
-      raise errors.OpExecError("Cannot create IP pool for this network. %s" % e)
+    except errors.AddressPoolError, err:
+      raise errors.OpExecError("Cannot create IP address pool for network"
+                               " '%s': %s" % (self.op.network_name, err))
 
     # Check if we need to reserve the nodes and the cluster master IP
     # These may not be allocated to any instances in routed mode, as
@@ -15811,25 +16546,26 @@ class LUNetworkAdd(LogicalUnit):
               pool.Reserve(ip)
               self.LogInfo("Reserved IP address of node '%s' (%s)",
                            node.name, ip)
-          except errors.AddressPoolError:
-            self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
-                            node.name, ip)
+          except errors.AddressPoolError, err:
+            self.LogWarning("Cannot reserve IP address '%s' of node '%s': %s",
+                            ip, node.name, err)
 
       master_ip = self.cfg.GetClusterInfo().master_ip
       try:
         if pool.Contains(master_ip):
           pool.Reserve(master_ip)
           self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
-      except errors.AddressPoolError:
-        self.LogWarning("Cannot reserve cluster master IP address (%s)",
-                        master_ip)
+      except errors.AddressPoolError, err:
+        self.LogWarning("Cannot reserve cluster master IP address (%s): %s",
+                        master_ip, err)
 
     if self.op.add_reserved_ips:
       for ip in self.op.add_reserved_ips:
         try:
           pool.Reserve(ip, external=True)
-        except errors.AddressPoolError, e:
-          raise errors.OpExecError("Cannot reserve IP %s. %s " % (ip, e))
+        except errors.AddressPoolError, err:
+          raise errors.OpExecError("Cannot reserve IP address '%s': %s" %
+                                   (ip, err))
 
     if self.op.tags:
       for tag in self.op.tags:
@@ -15847,11 +16583,6 @@ class LUNetworkRemove(LogicalUnit):
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
 
-    if not self.network_uuid:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name),
-                                 errors.ECODE_INVAL)
-
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -15920,10 +16651,6 @@ class LUNetworkSetParams(LogicalUnit):
 
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name),
-                                 errors.ECODE_INVAL)
 
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -15935,7 +16662,6 @@ class LUNetworkSetParams(LogicalUnit):
     """
     self.network = self.cfg.GetNetwork(self.network_uuid)
     self.gateway = self.network.gateway
-    self.network_type = self.network.network_type
     self.mac_prefix = self.network.mac_prefix
     self.network6 = self.network.network6
     self.gateway6 = self.network.gateway6
@@ -15949,14 +16675,9 @@ class LUNetworkSetParams(LogicalUnit):
       else:
         self.gateway = self.op.gateway
         if self.pool.IsReserved(self.gateway):
-          raise errors.OpPrereqError("%s is already reserved" %
-                                     self.gateway, errors.ECODE_INVAL)
-
-    if self.op.network_type:
-      if self.op.network_type == constants.VALUE_NONE:
-        self.network_type = None
-      else:
-        self.network_type = self.op.network_type
+          raise errors.OpPrereqError("Gateway IP address '%s' is already"
+                                     " reserved" % self.gateway,
+                                     errors.ECODE_STATE)
 
     if self.op.mac_prefix:
       if self.op.mac_prefix == constants.VALUE_NONE:
@@ -15988,7 +16709,6 @@ class LUNetworkSetParams(LogicalUnit):
       "network6": self.network6,
       "gateway6": self.gateway6,
       "mac_prefix": self.mac_prefix,
-      "network_type": self.network_type,
       "tags": self.tags,
       }
     return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
@@ -16048,9 +16768,6 @@ class LUNetworkSetParams(LogicalUnit):
     if self.op.gateway6:
       self.network.gateway6 = self.gateway6
 
-    if self.op.network_type:
-      self.network.network_type = self.network_type
-
     self.pool.Validate()
 
     self.cfg.Update(self.network, feedback_fn)
@@ -16061,23 +16778,19 @@ class _NetworkQuery(_QueryBase):
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
+    lu.share_locks = _ShareAll()
 
-    self._all_networks = lu.cfg.GetAllNetworksInfo()
-    name_to_uuid = dict((n.name, n.uuid) for n in self._all_networks.values())
+    self.do_locking = self.use_locking
 
-    if not self.names:
-      self.wanted = [name_to_uuid[name]
-                     for name in utils.NiceSort(name_to_uuid.keys())]
-    else:
-      # Accept names to be either names or UUIDs.
+    all_networks = lu.cfg.GetAllNetworksInfo()
+    name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
+
+    if self.names:
       missing = []
       self.wanted = []
-      all_uuid = frozenset(self._all_networks.keys())
 
       for name in self.names:
-        if name in all_uuid:
-          self.wanted.append(name)
-        elif name in name_to_uuid:
+        if name in name_to_uuid:
           self.wanted.append(name_to_uuid[name])
         else:
           missing.append(name)
@@ -16085,6 +16798,15 @@ class _NetworkQuery(_QueryBase):
       if missing:
         raise errors.OpPrereqError("Some networks do not exist: %s" % missing,
                                    errors.ECODE_NOENT)
+    else:
+      self.wanted = locking.ALL_SET
+
+    if self.do_locking:
+      lu.needed_locks[locking.LEVEL_NETWORK] = self.wanted
+      if query.NETQ_INST in self.requested_data:
+        lu.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+      if query.NETQ_GROUP in self.requested_data:
+        lu.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
 
   def DeclareLocks(self, lu, level):
     pass
@@ -16093,65 +16815,66 @@ class _NetworkQuery(_QueryBase):
     """Computes the list of networks and their attributes.
 
     """
+    all_networks = lu.cfg.GetAllNetworksInfo()
+
+    network_uuids = self._GetNames(lu, all_networks.keys(),
+                                   locking.LEVEL_NETWORK)
+
     do_instances = query.NETQ_INST in self.requested_data
-    do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
-    do_stats = query.NETQ_STATS in self.requested_data
+    do_groups = query.NETQ_GROUP in self.requested_data
 
-    network_to_groups = None
     network_to_instances = None
-    stats = None
+    network_to_groups = None
 
     # For NETQ_GROUP, we need to map network->[groups]
     if do_groups:
       all_groups = lu.cfg.GetAllNodeGroupsInfo()
-      network_to_groups = dict((uuid, []) for uuid in self.wanted)
+      network_to_groups = dict((uuid, []) for uuid in network_uuids)
+      for _, group in all_groups.iteritems():
+        for net_uuid in network_uuids:
+          netparams = group.networks.get(net_uuid, None)
+          if netparams:
+            info = (group.name, netparams[constants.NIC_MODE],
+                    netparams[constants.NIC_LINK])
 
-      if do_instances:
-        all_instances = lu.cfg.GetAllInstancesInfo()
-        all_nodes = lu.cfg.GetAllNodesInfo()
-        network_to_instances = dict((uuid, []) for uuid in self.wanted)
-
-      for group in all_groups.values():
-        if do_instances:
-          group_nodes = [node.name for node in all_nodes.values() if
-                         node.group == group.uuid]
-          group_instances = [instance for instance in all_instances.values()
-                             if instance.primary_node in group_nodes]
-
-        for net_uuid in group.networks.keys():
-          if net_uuid in network_to_groups:
-            netparams = group.networks[net_uuid]
-            mode = netparams[constants.NIC_MODE]
-            link = netparams[constants.NIC_LINK]
-            info = group.name + "(" + mode + ", " + link + ")"
             network_to_groups[net_uuid].append(info)
 
-            if do_instances:
-              for instance in group_instances:
-                for nic in instance.nics:
-                  if nic.network == self._all_networks[net_uuid].name:
-                    network_to_instances[net_uuid].append(instance.name)
-                    break
-
-    if do_stats:
-      stats = {}
-      for uuid, net in self._all_networks.items():
-        if uuid in self.wanted:
-          pool = network.AddressPool(net)
-          stats[uuid] = {
-            "free_count": pool.GetFreeCount(),
-            "reserved_count": pool.GetReservedCount(),
-            "map": pool.GetMap(),
-            "external_reservations":
-              utils.CommaJoin(pool.GetExternalReservations()),
-            }
-
-    return query.NetworkQueryData([self._all_networks[uuid]
-                                   for uuid in self.wanted],
+    if do_instances:
+      all_instances = lu.cfg.GetAllInstancesInfo()
+      network_to_instances = dict((uuid, []) for uuid in network_uuids)
+      for instance in all_instances.values():
+        for nic in instance.nics:
+          if nic.network in network_uuids:
+            network_to_instances[nic.network].append(instance.name)
+            break
+
+    if query.NETQ_STATS in self.requested_data:
+      stats = \
+        dict((uuid,
+              self._GetStats(network.AddressPool(all_networks[uuid])))
+             for uuid in network_uuids)
+    else:
+      stats = None
+
+    return query.NetworkQueryData([all_networks[uuid]
+                                   for uuid in network_uuids],
                                    network_to_groups,
                                    network_to_instances,
                                    stats)
 
+  @staticmethod
+  def _GetStats(pool):
+    """Returns statistics for a network address pool.
+
+    """
+    return {
+      "free_count": pool.GetFreeCount(),
+      "reserved_count": pool.GetReservedCount(),
+      "map": pool.GetMap(),
+      "external_reservations":
+        utils.CommaJoin(pool.GetExternalReservations()),
+      }
+
 
 class LUNetworkQuery(NoHooksLU):
   """Logical unit for querying networks.
@@ -16161,7 +16884,7 @@ class LUNetworkQuery(NoHooksLU):
 
   def CheckArguments(self):
     self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
-                            self.op.output_fields, False)
+                            self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.nq.ExpandNames(self)
@@ -16185,14 +16908,7 @@ class LUNetworkConnect(LogicalUnit):
     self.network_link = self.op.network_link
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network %s does not exist" %
-                                 self.network_name, errors.ECODE_INVAL)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group %s does not exist" %
-                                 self.group_name, errors.ECODE_INVAL)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16231,8 +16947,10 @@ class LUNetworkConnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
-    l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
-                                      for i in value)
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    if self.op.conflicts_check:
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
@@ -16248,38 +16966,62 @@ class LUNetworkConnect(LogicalUnit):
       self.LogWarning("Network '%s' is already mapped to group '%s'" %
                       (self.network_name, self.group.name))
       self.connected = True
-      return
 
-    if self.op.conflicts_check:
-      # Check if locked instances are still correct
-      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+    # check only if not already connected
+    elif self.op.conflicts_check:
+      pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
 
-      nobj = self.cfg.GetNetwork(self.network_uuid)
-      pool = network.AddressPool(nobj)
-      conflicting_instances = []
-
-      for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):
-        for idx, nic in enumerate(instance.nics):
-          if pool.Contains(nic.ip):
-            conflicting_instances.append((instance.name, idx, nic.ip))
-
-      if conflicting_instances:
-        self.LogWarning("Following occurences use IPs from network %s"
-                        " that is about to connect to nodegroup %s: %s" %
-                        (self.network_name, self.group.name,
-                        l(conflicting_instances)))
-        raise errors.OpPrereqError("Conflicting IPs found."
-                                   " Please remove/modify"
-                                   " corresponding NICs",
-                                   errors.ECODE_INVAL)
+      _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
+                            "connect to", owned_instances)
 
   def Exec(self, feedback_fn):
-    if self.connected:
-      return
+    # Connect the network and update the group only if not already connected
+    if not self.connected:
+      self.group.networks[self.network_uuid] = self.netparams
+      self.cfg.Update(self.group, feedback_fn)
 
-    self.group.networks[self.network_uuid] = self.netparams
-    self.cfg.Update(self.group, feedback_fn)
+
+def _NetworkConflictCheck(lu, check_fn, action, instances):
+  """Checks for network interface conflicts with a network.
+
+  @type lu: L{LogicalUnit}
+  @type check_fn: callable receiving one parameter (L{objects.NIC}) and
+    returning boolean
+  @param check_fn: Function checking for conflict
+  @type action: string
+  @param action: Part of error message (see code)
+  @raise errors.OpPrereqError: If conflicting IP addresses are found.
+
+  """
+  conflicts = []
+
+  for (_, instance) in lu.cfg.GetMultiInstanceInfo(instances):
+    instconflicts = [(idx, nic.ip)
+                     for (idx, nic) in enumerate(instance.nics)
+                     if check_fn(nic)]
+
+    if instconflicts:
+      conflicts.append((instance.name, instconflicts))
+
+  if conflicts:
+    lu.LogWarning("IP addresses from network '%s', which is about to %s"
+                  " node group '%s', are in use: %s" %
+                  (lu.network_name, action, lu.group.name,
+                   utils.CommaJoin(("%s: %s" %
+                                    (name, _FmtNetworkConflict(details)))
+                                   for (name, details) in conflicts)))
+
+    raise errors.OpPrereqError("Conflicting IP addresses found; "
+                               " remove/modify the corresponding network"
+                               " interfaces", errors.ECODE_STATE)
+
+
+def _FmtNetworkConflict(details):
+  """Utility for L{_NetworkConflictCheck}.
+
+  """
+  return utils.CommaJoin("nic%s/%s" % (idx, ipaddr)
+                         for (idx, ipaddr) in details)
 
 
 class LUNetworkDisconnect(LogicalUnit):
@@ -16295,14 +17037,7 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_name = self.op.group_name
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    if self.network_uuid is None:
-      raise errors.OpPrereqError("Network %s does not exist" %
-                                 self.network_name, errors.ECODE_INVAL)
-
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    if self.group_uuid is None:
-      raise errors.OpPrereqError("Group %s does not exist" %
-                                 self.group_name, errors.ECODE_INVAL)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16316,9 +17051,8 @@ class LUNetworkDisconnect(LogicalUnit):
 
       # Lock instances optimistically, needs verification once group lock has
       # been acquired
-      if self.op.conflicts_check:
-        self.needed_locks[locking.LEVEL_INSTANCE] = \
-          self.cfg.GetNodeGroupInstances(self.group_uuid)
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+        self.cfg.GetNodeGroupInstances(self.group_uuid)
 
   def BuildHooksEnv(self):
     ret = {
@@ -16335,8 +17069,9 @@ class LUNetworkDisconnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
-    l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
-                                      for i in value)
+    # Check if locked instances are still correct
+    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
@@ -16344,37 +17079,17 @@ class LUNetworkDisconnect(LogicalUnit):
       self.LogWarning("Network '%s' is not mapped to group '%s'",
                       self.network_name, self.group.name)
       self.connected = False
-      return
-
-    if self.op.conflicts_check:
-      # Check if locked instances are still correct
-      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
-      conflicting_instances = []
-
-      for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):
-        for idx, nic in enumerate(instance.nics):
-          if nic.network == self.network_name:
-            conflicting_instances.append((instance.name, idx, nic.ip))
-
-      if conflicting_instances:
-        self.LogWarning("Following occurences use IPs from network %s"
-                           " that is about to disconnected from the nodegroup"
-                           " %s: %s" %
-                           (self.network_name, self.group.name,
-                            l(conflicting_instances)))
-        raise errors.OpPrereqError("Conflicting IPs."
-                                   " Please remove/modify"
-                                   " corresponding NICS",
-                                   errors.ECODE_INVAL)
+    # We need this check only if network is not already connected
+    else:
+      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
+                            "disconnect from", owned_instances)
 
   def Exec(self, feedback_fn):
-    if not self.connected:
-      return
-
-    del self.group.networks[self.network_uuid]
-    self.cfg.Update(self.group, feedback_fn)
+    # Disconnect the network and update the group only if network is connected
+    if self.connected:
+      del self.group.networks[self.network_uuid]
+      self.cfg.Update(self.group, feedback_fn)
 
 
 #: Query type implementations
@@ -16385,6 +17100,7 @@ _QUERY_IMPL = {
   constants.QR_GROUP: _GroupQuery,
   constants.QR_NETWORK: _NetworkQuery,
   constants.QR_OS: _OsQuery,
+  constants.QR_EXTSTORAGE: _ExtStorageQuery,
   constants.QR_EXPORT: _ExportQuery,
   }
 
@@ -16405,18 +17121,18 @@ def _GetQueryImplementation(name):
 
 
 def _CheckForConflictingIp(lu, ip, node):
-  """In case of conflicting ip raise error.
+  """In case of conflicting IP address raise error.
 
   @type ip: string
-  @param ip: ip address
+  @param ip: IP address
   @type node: string
   @param node: node name
 
   """
   (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
   if conf_net is not None:
-    raise errors.OpPrereqError("Conflicting IP found:"
-                               " %s <> %s." % (ip, conf_net),
-                               errors.ECODE_INVAL)
+    raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
+                                (ip, conf_net)),
+                               errors.ECODE_STATE)
 
   return (None, None)