cluster-verify checks uniformity of PV sizes
[ganeti-local] / lib / cmdlib.py
index ae39560..f13b1c1 100644 (file)
@@ -697,6 +697,39 @@ def _SupportsOob(cfg, node):
   return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
 
 
+def _IsExclusiveStorageEnabledNode(cfg, node):
+  """Whether exclusive_storage is in effect for the given node.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type node: L{objects.Node}
+  @param node: The node
+  @rtype: bool
+  @return: The effective value of exclusive_storage
+
+  """
+  return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
+
+
+def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
+  """Whether exclusive_storage is in effect for the given node.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type nodename: string
+  @param nodename: The node
+  @rtype: bool
+  @return: The effective value of exclusive_storage
+  @raise errors.OpPrereqError: if no node exists with the given name
+
+  """
+  ni = cfg.GetNodeInfo(nodename)
+  if ni is None:
+    raise errors.OpPrereqError("Invalid node name %s" % nodename,
+                               errors.ECODE_NOENT)
+  return _IsExclusiveStorageEnabledNode(cfg, ni)
+
+
 def _CopyLockList(names):
   """Makes a copy of a list of lock names.
 
@@ -1367,27 +1400,6 @@ def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
   return env
 
 
-def _BuildNetworkHookEnvByObject(net):
-  """Builds network related env varliables for hooks
-
-  @type net: L{objects.Network}
-  @param net: the network object
-
-  """
-  args = {
-    "name": net.name,
-    "subnet": net.network,
-    "gateway": net.gateway,
-    "network6": net.network6,
-    "gateway6": net.gateway6,
-    "network_type": net.network_type,
-    "mac_prefix": net.mac_prefix,
-    "tags": net.tags,
-  }
-
-  return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
-
-
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
                           minmem, maxmem, vcpus, nics, disk_template, disks,
                           bep, hvp, hypervisor_name, tags):
@@ -2081,7 +2093,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
       msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
              (item, hv_name))
       try:
-        hv_class = hypervisor.GetHypervisor(hv_name)
+        hv_class = hypervisor.GetHypervisorClass(hv_name)
         utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
         hv_class.CheckParameterSyntax(hv_params)
       except errors.GenericError, err:
@@ -2201,6 +2213,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @ivar oslist: list of OSes as diagnosed by DiagnoseOS
     @type vm_capable: boolean
     @ivar vm_capable: whether the node can host instances
+    @type pv_min: float
+    @ivar pv_min: size in MiB of the smallest PVs
+    @type pv_max: float
+    @ivar pv_max: size in MiB of the biggest PVs
 
     """
     def __init__(self, offline=False, name=None, vm_capable=True):
@@ -2220,6 +2236,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self.ghost = False
       self.os_fail = False
       self.oslist = {}
+      self.pv_min = None
+      self.pv_max = None
 
   def ExpandNames(self):
     # This raises errors.OpPrereqError on its own:
@@ -2421,13 +2439,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
              "Node time diverges by at least %s from master node time",
              ntime_diff)
 
-  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
-    """Check the node LVM results.
+  def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
+    """Check the node LVM results and update info for cross-node checks.
 
     @type ninfo: L{objects.Node}
     @param ninfo: the node to check
     @param nresult: the remote results for the node
     @param vg_name: the configured VG name
+    @type nimg: L{NodeImage}
+    @param nimg: node image
 
     """
     if vg_name is None:
@@ -2445,19 +2465,56 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                             constants.MIN_VG_SIZE)
       _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
 
-    # check pv names
-    pvlist = nresult.get(constants.NV_PVLIST, None)
-    test = pvlist is None
+    # check pv names (and possibly sizes)
+    pvlist_dict = nresult.get(constants.NV_PVLIST, None)
+    test = pvlist_dict is None
     _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
     if not test:
+      pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
       # check that ':' is not present in PV names, since it's a
       # special character for lvcreate (denotes the range of PEs to
       # use on the PV)
-      for _, pvname, owner_vg in pvlist:
-        test = ":" in pvname
+      for pv in pvlist:
+        test = ":" in pv.name
         _ErrorIf(test, constants.CV_ENODELVM, node,
                  "Invalid character ':' in PV '%s' of VG '%s'",
-                 pvname, owner_vg)
+                 pv.name, pv.vg_name)
+      if self._exclusive_storage:
+        (errmsgs, (pvmin, pvmax)) = utils.LvmExclusiveCheckNodePvs(pvlist)
+        for msg in errmsgs:
+          self._Error(constants.CV_ENODELVM, node, msg)
+        nimg.pv_min = pvmin
+        nimg.pv_max = pvmax
+
+  def _VerifyGroupLVM(self, node_image, vg_name):
+    """Check cross-node consistency in LVM.
+
+    @type node_image: dict
+    @param node_image: info about nodes, mapping from node to names to
+      L{NodeImage} objects
+    @param vg_name: the configured VG name
+
+    """
+    if vg_name is None:
+      return
+
+    # Only exlcusive storage needs this kind of checks
+    if not self._exclusive_storage:
+      return
+
+    # exclusive_storage wants all PVs to have the same size (approximately),
+    # if the smallest and the biggest ones are okay, everything is fine.
+    # pv_min is None iff pv_max is None
+    vals = filter((lambda ni: ni.pv_min is not None), node_image.values())
+    if not vals:
+      return
+    (pvmin, minnode) = min((ni.pv_min, ni.name) for ni in vals)
+    (pvmax, maxnode) = max((ni.pv_max, ni.name) for ni in vals)
+    bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
+    self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
+                  "PV sizes differ too much in the group; smallest (%s MB) is"
+                  " on %s, biggest (%s MB) is on %s",
+                  pvmin, minnode, pvmax, maxnode)
 
   def _VerifyNodeBridges(self, ninfo, nresult, bridges):
     """Check the node bridges.
@@ -3154,7 +3211,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                  len(s) == 2 for s in statuses)
                       for inst, nnames in instdisk.items()
                       for nname, statuses in nnames.items())
-    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
+    if __debug__:
+      instdisk_keys = set(instdisk)
+      instanceinfo_keys = set(instanceinfo)
+      assert instdisk_keys == instanceinfo_keys, \
+        ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
+         (instdisk_keys, instanceinfo_keys))
 
     return instdisk
 
@@ -3353,6 +3415,23 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           nimg.sbp[pnode] = []
         nimg.sbp[pnode].append(instance)
 
+    es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg, self.my_node_names)
+    es_unset_nodes = []
+    # The value of exclusive_storage should be the same across the group, so if
+    # it's True for at least a node, we act as if it were set for all the nodes
+    self._exclusive_storage = compat.any(es_flags.values())
+    if self._exclusive_storage:
+      es_unset_nodes = [n for (n, es) in es_flags.items()
+                        if not es]
+
+    if es_unset_nodes:
+      self._Error(constants.CV_EGROUPMIXEDESFLAG, self.group_info.name,
+                  "The exclusive_storage flag should be uniform in a group,"
+                  " but these nodes have it unset: %s",
+                  utils.CommaJoin(utils.NiceSort(es_unset_nodes)))
+      self.LogWarning("Some checks required by exclusive storage will be"
+                      " performed also on nodes with the flag unset")
+
     # At this point, we have the in-memory data structures complete,
     # except for the runtime information, which we'll gather next
 
@@ -3456,7 +3535,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                    node == master_node)
 
       if nimg.vm_capable:
-        self._VerifyNodeLVM(node_i, nresult, vg_name)
+        self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
         self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
                              all_drbd_map)
 
@@ -3483,6 +3562,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
                    "node is running unknown instance %s", inst)
 
+    self._VerifyGroupLVM(node_image, vg_name)
+
     for node, result in extra_lv_nvinfo.items():
       self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
                               node_image[node], vg_name)
@@ -4246,7 +4327,10 @@ class LUClusterSetParams(LogicalUnit):
           self.new_os_hvp[os_name] = hvs
         else:
           for hv_name, hv_dict in hvs.items():
-            if hv_name not in self.new_os_hvp[os_name]:
+            if hv_dict is None:
+              # Delete if it exists
+              self.new_os_hvp[os_name].pop(hv_name, None)
+            elif hv_name not in self.new_os_hvp[os_name]:
               self.new_os_hvp[os_name][hv_name] = hv_dict
             else:
               self.new_os_hvp[os_name][hv_name].update(hv_dict)
@@ -4292,7 +4376,7 @@ class LUClusterSetParams(LogicalUnit):
             (self.op.enabled_hypervisors and
              hv_name in self.op.enabled_hypervisors)):
           # either this is a new hypervisor, or its parameters have changed
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
@@ -4306,7 +4390,7 @@ class LUClusterSetParams(LogicalUnit):
           # we need to fill in the new os_hvp on top of the actual hv_p
           cluster_defaults = self.new_hvparams.get(hv_name, {})
           new_osp = objects.FillDict(cluster_defaults, hv_params)
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           hv_class.CheckParameterSyntax(new_osp)
           _CheckHVParams(self, node_list, hv_name, new_osp)
 
@@ -4528,12 +4612,14 @@ def _ComputeAncillaryFiles(cluster, redist):
   files_vm = set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
 
   files_opt |= set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
 
   # Filenames in each category must be unique
   all_files_set = files_all | files_mc | files_vm
@@ -4814,10 +4900,11 @@ class LUOobCommand(NoHooksLU):
       locking.LEVEL_NODE: lock_names,
       }
 
+    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+
     if not self.op.node_names:
       # Acquire node allocation lock only if all nodes are affected
       self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -5139,6 +5226,159 @@ class LUOsDiagnose(NoHooksLU):
     return self.oq.OldStyleQuery(self)
 
 
+class _ExtStorageQuery(_QueryBase):
+  FIELDS = query.EXTSTORAGE_FIELDS
+
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
+    lu.needed_locks = {}
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  @staticmethod
+  def _DiagnoseByProvider(rlist):
+    """Remaps a per-node return list into an a per-provider per-node dictionary
+
+    @param rlist: a map with node names as keys and ExtStorage objects as values
+
+    @rtype: dict
+    @return: a dictionary with extstorage providers as keys and as
+        value another map, with nodes as keys and tuples of
+        (path, status, diagnose, parameters) as values, eg::
+
+          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+                         "node2": [(/srv/..., False, "missing file")]
+                         "node3": [(/srv/..., True, "", [])]
+          }
+
+    """
+    all_es = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
+        continue
+      for (name, path, status, diagnose, params) in nr.payload:
+        if name not in all_es:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_es[name] = {}
+          for nname in good_nodes:
+            all_es[name][nname] = []
+        # convert params from [name, help] to (name, help)
+        params = [tuple(v) for v in params]
+        all_es[name][node_name].append((path, status, diagnose, params))
+    return all_es
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    # Locking is not used
+    assert not (compat.any(lu.glm.is_owned(level)
+                           for level in locking.LEVELS
+                           if level != locking.LEVEL_CLUSTER) or
+                self.do_locking or self.use_locking)
+
+    valid_nodes = [node.name
+                   for node in lu.cfg.GetAllNodesInfo().values()
+                   if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+    data = {}
+
+    nodegroup_list = lu.cfg.GetNodeGroupList()
+
+    for (es_name, es_data) in pol.items():
+      # For every provider compute the nodegroup validity.
+      # To do this we need to check the validity of each node in es_data
+      # and then construct the corresponding nodegroup dict:
+      #      { nodegroup1: status
+      #        nodegroup2: status
+      #      }
+      ndgrp_data = {}
+      for nodegroup in nodegroup_list:
+        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+        nodegroup_nodes = ndgrp.members
+        nodegroup_name = ndgrp.name
+        node_statuses = []
+
+        for node in nodegroup_nodes:
+          if node in valid_nodes:
+            if es_data[node] != []:
+              node_status = es_data[node][0][1]
+              node_statuses.append(node_status)
+            else:
+              node_statuses.append(False)
+
+        if False in node_statuses:
+          ndgrp_data[nodegroup_name] = False
+        else:
+          ndgrp_data[nodegroup_name] = True
+
+      # Compute the provider's parameters
+      parameters = set()
+      for idx, esl in enumerate(es_data.values()):
+        valid = bool(esl and esl[0][1])
+        if not valid:
+          break
+
+        node_params = esl[0][3]
+        if idx == 0:
+          # First entry
+          parameters.update(node_params)
+        else:
+          # Filter out inconsistent values
+          parameters.intersection_update(node_params)
+
+      params = list(parameters)
+
+      # Now fill all the info for this provider
+      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+                                  nodegroup_status=ndgrp_data,
+                                  parameters=params)
+
+      data[es_name] = info
+
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+  """Logical unit for ExtStorage diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                               self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.eq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.eq.OldStyleQuery(self)
+
+
 class LUNodeRemove(LogicalUnit):
   """Logical unit for removing a node.
 
@@ -5269,8 +5509,9 @@ class _NodeQuery(_QueryBase):
       # filter out non-vm_capable nodes
       toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
 
+      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
       node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
-                                        [lu.cfg.GetHypervisorType()])
+                                        [lu.cfg.GetHypervisorType()], es_flags)
       live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
                        for (name, nresult) in node_data.items()
                        if not nresult.fail_msg and nresult.payload)
@@ -6787,9 +7028,9 @@ def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
   """Checks if a node has enough free memory.
 
-  This function check if a given node has the needed amount of free
+  This function checks if a given node has the needed amount of free
   memory. In case the node has less memory or we cannot get the
-  information from the node, this function raise an OpPrereqError
+  information from the node, this function raises an OpPrereqError
   exception.
 
   @type lu: C{LogicalUnit}
@@ -6808,7 +7049,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
       we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
   nodeinfo[node].Raise("Can't get data from node %s" % node,
                        prereq=True, ecode=errors.ECODE_ENVIRON)
   (_, _, (hv_info, )) = nodeinfo[node].payload
@@ -6827,11 +7068,11 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
 
 
 def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
-  """Checks if nodes have enough free disk space in the all VGs.
+  """Checks if nodes have enough free disk space in all the VGs.
 
-  This function check if all given nodes have the needed amount of
+  This function checks if all given nodes have the needed amount of
   free disk. In case any node has less disk or we cannot get the
-  information from the node, this function raise an OpPrereqError
+  information from the node, this function raises an OpPrereqError
   exception.
 
   @type lu: C{LogicalUnit}
@@ -6852,9 +7093,9 @@ def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
   """Checks if nodes have enough free disk space in the specified VG.
 
-  This function check if all given nodes have the needed amount of
+  This function checks if all given nodes have the needed amount of
   free disk. In case any node has less disk or we cannot get the
-  information from the node, this function raise an OpPrereqError
+  information from the node, this function raises an OpPrereqError
   exception.
 
   @type lu: C{LogicalUnit}
@@ -6869,7 +7110,8 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
+  es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
+  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, es_flags)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6905,7 +7147,7 @@ def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6983,7 +7225,7 @@ class LUInstanceStartup(LogicalUnit):
       utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
       filled_hvp = cluster.FillHV(instance)
       filled_hvp.update(self.op.hvparams)
-      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
       hv_type.CheckParameterSyntax(filled_hvp)
       _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
 
@@ -7176,7 +7418,10 @@ class LUInstanceShutdown(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
-    _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+    if not self.op.force:
+      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+    else:
+      self.LogWarning("Ignoring offline instance check")
 
     self.primary_offline = \
       self.cfg.GetNodeInfo(self.instance.primary_node).offline
@@ -7194,7 +7439,9 @@ class LUInstanceShutdown(LogicalUnit):
     node_current = instance.primary_node
     timeout = self.op.timeout
 
-    if not self.op.no_remember:
+    # If the instance is offline we shouldn't mark it as down, as that
+    # resets the offline flag.
+    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
       self.cfg.MarkInstanceDown(instance.name)
 
     if self.primary_offline:
@@ -7305,7 +7552,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   REQ_BGL = False
 
-  _MODIFYABLE = frozenset([
+  _MODIFYABLE = compat.UniqueFrozenset([
     constants.IDISK_SIZE,
     constants.IDISK_MODE,
     ])
@@ -7317,6 +7564,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # TODO: Implement support changing VG while recreating
     constants.IDISK_VG,
     constants.IDISK_METAVG,
+    constants.IDISK_PROVIDER,
     ]))
 
   def _RunAllocator(self):
@@ -7355,7 +7603,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
                                         disks=[{constants.IDISK_SIZE: d.size,
                                                 constants.IDISK_MODE: d.mode}
                                                 for d in self.instance.disks],
-                                        hypervisor=self.instance.hypervisor)
+                                        hypervisor=self.instance.hypervisor,
+                                        node_whitelist=None)
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
@@ -7873,6 +8122,8 @@ def _DeclareLocksForMigration(lu, level):
 
     instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
 
+    # Node locks are already declared here rather than at LEVEL_NODE as we need
+    # the instance object anyway to declare the node allocation lock.
     if instance.disk_template in constants.DTS_EXT_MIRROR:
       if lu.op.target_node is None:
         lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
@@ -7886,7 +8137,8 @@ def _DeclareLocksForMigration(lu, level):
 
   elif level == locking.LEVEL_NODE:
     # Node locks are declared together with the node allocation lock
-    assert lu.needed_locks[locking.LEVEL_NODE]
+    assert (lu.needed_locks[locking.LEVEL_NODE] or
+            lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
 
   elif level == locking.LEVEL_NODE_RES:
     # Copy node locks
@@ -8712,7 +8964,7 @@ class TLMigrateInstance(Tasklet):
 
     # Check for hypervisor version mismatch and warn the user.
     nodeinfo = self.rpc.call_node_info([source_node, target_node],
-                                       None, [self.instance.hypervisor])
+                                       None, [self.instance.hypervisor], False)
     for ninfo in nodeinfo.values():
       ninfo.Raise("Unable to retrieve node information from node '%s'" %
                   ninfo.node)
@@ -8861,9 +9113,9 @@ class TLMigrateInstance(Tasklet):
       self._GoReconnect(False)
       self._WaitUntilSync()
 
-    # If the instance's disk template is `rbd' and there was a successful
-    # migration, unmap the device from the source node.
-    if self.instance.disk_template == constants.DT_RBD:
+    # If the instance's disk template is `rbd' or `ext' and there was a
+    # successful migration, unmap the device from the source node.
+    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
       disks = _ExpandCheckDisks(instance, instance.disks)
       self.feedback_fn("* unmapping instance's disks from %s" % source_node)
       for disk in disks:
@@ -8994,12 +9246,13 @@ def _CreateBlockDev(lu, node, instance, device, force_create, info,
 
   """
   (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
+  excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
   return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
-                              force_open)
+                              force_open, excl_stor)
 
 
 def _CreateBlockDevInner(lu, node, instance, device, force_create,
-                         info, force_open):
+                         info, force_open, excl_stor):
   """Create a tree of block devices on a given node.
 
   If this device type has to be created on secondaries, create it and
@@ -9026,6 +9279,8 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
       L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
+  @type excl_stor: boolean
+  @param excl_stor: Whether exclusive_storage is active for the node
 
   """
   if device.CreateOnSecondary():
@@ -9034,15 +9289,17 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   if device.children:
     for child in device.children:
       _CreateBlockDevInner(lu, node, instance, child, force_create,
-                           info, force_open)
+                           info, force_open, excl_stor)
 
   if not force_create:
     return
 
-  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
+  _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                        excl_stor)
 
 
-def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
+def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                          excl_stor):
   """Create a single block device on a given node.
 
   This will not recurse over children of the device, so they must be
@@ -9061,11 +9318,14 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
       L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
+  @type excl_stor: boolean
+  @param excl_stor: Whether exclusive_storage is active for the node
 
   """
   lu.cfg.SetDiskID(device, node)
   result = lu.rpc.call_blockdev_create(node, device, device.size,
-                                       instance.name, force_open, info)
+                                       instance.name, force_open, info,
+                                       excl_stor)
   result.Raise("Can't create block device %s on"
                " node %s for instance %s" % (device, node, instance.name))
   if device.physical_id is None:
@@ -9113,6 +9373,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
 _DISK_TEMPLATE_NAME_PREFIX = {
   constants.DT_PLAIN: "",
   constants.DT_RBD: ".rbd",
+  constants.DT_EXT: ".ext",
   }
 
 
@@ -9122,6 +9383,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   constants.DT_SHARED_FILE: constants.LD_FILE,
   constants.DT_BLOCK: constants.LD_BLOCKDEV,
   constants.DT_RBD: constants.LD_RBD,
+  constants.DT_EXT: constants.LD_EXT,
   }
 
 
@@ -9133,8 +9395,6 @@ def _GenerateDiskTemplate(
   """Generate the entire disk layout for a given template type.
 
   """
-  #TODO: compute space requirements
-
   vgname = lu.cfg.GetVGName()
   disk_count = len(disk_info)
   disks = []
@@ -9203,12 +9463,27 @@ def _GenerateDiskTemplate(
                                        disk[constants.IDISK_ADOPT])
     elif template_name == constants.DT_RBD:
       logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+    elif template_name == constants.DT_EXT:
+      def logical_id_fn(idx, _, disk):
+        provider = disk.get(constants.IDISK_PROVIDER, None)
+        if provider is None:
+          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
+                                       " not found", constants.DT_EXT,
+                                       constants.IDISK_PROVIDER)
+        return (provider, names[idx])
     else:
       raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
 
     dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
 
     for idx, disk in enumerate(disk_info):
+      params = {}
+      # Only for the Ext template add disk_info to params
+      if template_name == constants.DT_EXT:
+        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            params[key] = disk[key]
       disk_index = idx + base_index
       size = disk[constants.IDISK_SIZE]
       feedback_fn("* disk %s, size %s" %
@@ -9217,7 +9492,7 @@ def _GenerateDiskTemplate(
                                 logical_id=logical_id_fn(idx, disk_index, disk),
                                 iv_name="disk/%d" % disk_index,
                                 mode=disk[constants.IDISK_MODE],
-                                params={}))
+                                params=params))
 
   return disks
 
@@ -9667,7 +9942,7 @@ def _ComputeDisks(op, default_vg):
   @param op: The instance opcode
   @param default_vg: The default_vg to assume
 
-  @return: The computer disks
+  @return: The computed disks
 
   """
   disks = []
@@ -9685,16 +9960,37 @@ def _ComputeDisks(op, default_vg):
       raise errors.OpPrereqError("Invalid disk size '%s'" % size,
                                  errors.ECODE_INVAL)
 
+    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
+    if ext_provider and op.disk_template != constants.DT_EXT:
+      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+                                 " disk template, not %s" %
+                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
+                                 op.disk_template), errors.ECODE_INVAL)
+
     data_vg = disk.get(constants.IDISK_VG, default_vg)
     new_disk = {
       constants.IDISK_SIZE: size,
       constants.IDISK_MODE: mode,
       constants.IDISK_VG: data_vg,
       }
+
     if constants.IDISK_METAVG in disk:
       new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
     if constants.IDISK_ADOPT in disk:
       new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
+
+    # For extstorage, demand the `provider' option and add any
+    # additional parameters (ext-params) to the dict
+    if op.disk_template == constants.DT_EXT:
+      if ext_provider:
+        new_disk[constants.IDISK_PROVIDER] = ext_provider
+        for key in disk:
+          if key not in constants.IDISK_PARAMS:
+            new_disk[key] = disk[key]
+      else:
+        raise errors.OpPrereqError("Missing provider for template '%s'" %
+                                   constants.DT_EXT, errors.ECODE_INVAL)
+
     disks.append(new_disk)
 
   return disks
@@ -9718,6 +10014,16 @@ def _ComputeFullBeParams(op, cluster):
   return cluster.SimpleFillBE(op.beparams)
 
 
+def _CheckOpportunisticLocking(op):
+  """Generate error if opportunistic locking is not possible.
+
+  """
+  if op.opportunistic_locking and not op.iallocator:
+    raise errors.OpPrereqError("Opportunistic locking is only available in"
+                               " combination with an instance allocator",
+                               errors.ECODE_INVAL)
+
+
 class LUInstanceCreate(LogicalUnit):
   """Create an instance.
 
@@ -9751,7 +10057,8 @@ class LUInstanceCreate(LogicalUnit):
     # check disks. parameter names and consistent adopt/no-adopt strategy
     has_adopt = has_no_adopt = False
     for disk in self.op.disks:
-      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+      if self.op.disk_template != constants.DT_EXT:
+        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
       if constants.IDISK_ADOPT in disk:
         has_adopt = True
       else:
@@ -9813,6 +10120,8 @@ class LUInstanceCreate(LogicalUnit):
                         " template")
         self.op.snode = None
 
+    _CheckOpportunisticLocking(self.op)
+
     self._cds = _GetClusterDomainSecret()
 
     if self.op.mode == constants.INSTANCE_IMPORT:
@@ -10255,7 +10564,7 @@ class LUInstanceCreate(LogicalUnit):
     utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
     filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
                                       self.op.hvparams)
-    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
+    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
@@ -10386,10 +10695,10 @@ class LUInstanceCreate(LogicalUnit):
                                          " or does not belong to network %s" %
                                          (nic.ip, net),
                                          errors.ECODE_NOTUNIQUE)
-      else:
-        # net is None, ip None or given
-        if self.op.conflicts_check:
-          _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+
+      # net is None, ip None or given
+      elif self.op.conflicts_check:
+        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
 
     # mirror node verification
     if self.op.disk_template in constants.DTS_INT_MIRROR:
@@ -10408,6 +10717,16 @@ class LUInstanceCreate(LogicalUnit):
                         " from the first disk's node group will be"
                         " used")
 
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      nodes = [pnode]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        nodes.append(snode)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        raise errors.OpPrereqError("Disk template %s not supported with"
+                                   " exclusive storage" % self.op.disk_template,
+                                   errors.ECODE_STATE)
+
     nodenames = [pnode.name] + self.secondaries
 
     # Verify instance specs
@@ -10435,6 +10754,9 @@ class LUInstanceCreate(LogicalUnit):
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
         _CheckRADOSFreeSpace()
+      elif self.op.disk_template == constants.DT_EXT:
+        # FIXME: Function that checks prereqs if needed
+        pass
       else:
         # Check lv size requirements, if not adopting
         req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
@@ -10539,6 +10861,9 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
+    #TODO: _CheckExtParams (remotely)
+    # Check parameters for extstorage
+
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
@@ -10828,6 +11153,8 @@ class LUInstanceMultiAlloc(NoHooksLU):
                                    " or set a cluster-wide default iallocator",
                                    errors.ECODE_INVAL)
 
+    _CheckOpportunisticLocking(self.op)
+
     dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
     if dups:
       raise errors.OpPrereqError("There are duplicate instance names: %s" %
@@ -10900,7 +11227,7 @@ class LUInstanceMultiAlloc(NoHooksLU):
     self.ia_result = ial.result
 
     if self.op.dry_run:
-      self.dry_run_rsult = objects.FillDict(self._ConstructPartialResult(), {
+      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
         constants.JOB_IDS_KEY: [],
         })
 
@@ -11007,7 +11334,7 @@ def _GetInstanceConsole(cluster, instance):
   @rtype: dict
 
   """
-  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
   # beparams and hvparams are passed separately, to avoid editing the
   # instance and then saving the defaults in the instance itself.
   hvparams = cluster.FillHV(instance)
@@ -11427,8 +11754,8 @@ class TLReplaceDisks(Tasklet):
 
     feedback_fn("Replacing disk(s) %s for instance '%s'" %
                 (utils.CommaJoin(self.disks), self.instance.name))
-    feedback_fn("Current primary node: %s", self.instance.primary_node)
-    feedback_fn("Current seconary node: %s",
+    feedback_fn("Current primary node: %s" % self.instance.primary_node)
+    feedback_fn("Current seconary node: %s" %
                 utils.CommaJoin(self.instance.secondary_nodes))
 
     activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
@@ -11549,11 +11876,13 @@ class TLReplaceDisks(Tasklet):
       new_lvs = [lv_data, lv_meta]
       old_lvs = [child.Copy() for child in dev.children]
       iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
+      excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
 
       # we pass force_create=True to force the LVM creation
       for new_lv in new_lvs:
         _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
-                             _GetInstanceInfoText(self.instance), False)
+                             _GetInstanceInfoText(self.instance), False,
+                             excl_stor)
 
     return iv_names
 
@@ -11762,13 +12091,15 @@ class TLReplaceDisks(Tasklet):
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
     disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
     for idx, dev in enumerate(disks):
       self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
                       (self.new_node, idx))
       # we pass force_create=True to force LVM creation
       for new_lv in dev.children:
         _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
-                             True, _GetInstanceInfoText(self.instance), False)
+                             True, _GetInstanceInfoText(self.instance), False,
+                             excl_stor)
 
     # Step 4: dbrd minors and drbd setups changes
     # after this, we must manually remove the drbd minors on both the
@@ -11812,7 +12143,8 @@ class TLReplaceDisks(Tasklet):
       try:
         _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
                               anno_new_drbd,
-                              _GetInstanceInfoText(self.instance), False)
+                              _GetInstanceInfoText(self.instance), False,
+                              excl_stor)
       except errors.GenericError:
         self.cfg.ReleaseDRBDMinors(self.instance.name)
         raise
@@ -12316,13 +12648,22 @@ class LUInstanceGrowDisk(LogicalUnit):
                                    utils.FormatUnit(self.delta, "h"),
                                    errors.ECODE_INVAL)
 
-    if instance.disk_template not in (constants.DT_FILE,
-                                      constants.DT_SHARED_FILE,
-                                      constants.DT_RBD):
+    self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
+
+  def _CheckDiskSpace(self, nodenames, req_vgspace):
+    template = self.instance.disk_template
+    if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
       # TODO: check the free disk space for file, when that feature will be
       # supported
-      _CheckNodesFreeDiskPerVG(self, nodenames,
-                               self.disk.ComputeGrowth(self.delta))
+      nodes = map(self.cfg.GetNodeInfo, nodenames)
+      es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+                        nodes)
+      if es_nodes:
+        # With exclusive storage we need to something smarter than just looking
+        # at free space; for now, let's simply abort the operation.
+        raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
+                                   " is enabled", errors.ECODE_STATE)
+      _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
 
   def Exec(self, feedback_fn):
     """Execute disk grow.
@@ -12818,7 +13159,10 @@ class LUInstanceSetParams(LogicalUnit):
     for (op, _, params) in mods:
       assert ht.TDict(params)
 
-      utils.ForceDictType(params, key_types)
+      # If 'key_types' is an empty dict, we assume we have an
+      # 'ext' template and thus do not ForceDictType
+      if key_types:
+        utils.ForceDictType(params, key_types)
 
       if op == constants.DDM_REMOVE:
         if params:
@@ -12854,9 +13198,18 @@ class LUInstanceSetParams(LogicalUnit):
 
       params[constants.IDISK_SIZE] = size
 
-    elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
-      raise errors.OpPrereqError("Disk size change not possible, use"
-                                 " grow-disk", errors.ECODE_INVAL)
+    elif op == constants.DDM_MODIFY:
+      if constants.IDISK_SIZE in params:
+        raise errors.OpPrereqError("Disk size change not possible, use"
+                                   " grow-disk", errors.ECODE_INVAL)
+      if constants.IDISK_MODE not in params:
+        raise errors.OpPrereqError("Disk 'mode' is the only kind of"
+                                   " modification supported, but missing",
+                                   errors.ECODE_NOENT)
+      if len(params) > 1:
+        raise errors.OpPrereqError("Disk modification doesn't support"
+                                   " additional arbitrary parameters",
+                                   errors.ECODE_INVAL)
 
   @staticmethod
   def _VerifyNicModification(op, params):
@@ -12920,10 +13273,6 @@ class LUInstanceSetParams(LogicalUnit):
     self.op.nics = self._UpgradeDiskNicMods(
       "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
 
-    # Check disk modifications
-    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
-                    self._VerifyDiskModification)
-
     if self.op.disks and self.op.disk_template is not None:
       raise errors.OpPrereqError("Disk template conversion and other disk"
                                  " changes not supported at the same time",
@@ -13115,10 +13464,10 @@ class LUInstanceSetParams(LogicalUnit):
         elif new_ip.lower() == constants.NIC_IP_POOL:
           raise errors.OpPrereqError("ip=pool, but no network found",
                                      errors.ECODE_INVAL)
-        else:
-          # new net is None
-          if self.op.conflicts_check:
-            _CheckForConflictingIp(self, new_ip, pnode)
+
+        # new net is None
+        elif self.op.conflicts_check:
+          _CheckForConflictingIp(self, new_ip, pnode)
 
       if old_ip:
         if old_net:
@@ -13138,6 +13487,64 @@ class LUInstanceSetParams(LogicalUnit):
     private.params = new_params
     private.filled = new_filled_params
 
+  def _PreCheckDiskTemplate(self, pnode_info):
+    """CheckPrereq checks related to a new disk template."""
+    # Arguments are passed to avoid configuration lookups
+    instance = self.instance
+    pnode = instance.primary_node
+    cluster = self.cluster
+    if instance.disk_template == self.op.disk_template:
+      raise errors.OpPrereqError("Instance already has disk template %s" %
+                                 instance.disk_template, errors.ECODE_INVAL)
+
+    if (instance.disk_template,
+        self.op.disk_template) not in self._DISK_CONVERSIONS:
+      raise errors.OpPrereqError("Unsupported disk template conversion from"
+                                 " %s to %s" % (instance.disk_template,
+                                                self.op.disk_template),
+                                 errors.ECODE_INVAL)
+    _CheckInstanceState(self, instance, INSTANCE_DOWN,
+                        msg="cannot change disk template")
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      if self.op.remote_node == pnode:
+        raise errors.OpPrereqError("Given new secondary node %s is the same"
+                                   " as the primary node of the instance" %
+                                   self.op.remote_node, errors.ECODE_STATE)
+      _CheckNodeOnline(self, self.op.remote_node)
+      _CheckNodeNotDrained(self, self.op.remote_node)
+      # FIXME: here we assume that the old instance type is DT_PLAIN
+      assert instance.disk_template == constants.DT_PLAIN
+      disks = [{constants.IDISK_SIZE: d.size,
+                constants.IDISK_VG: d.logical_id[0]}
+               for d in instance.disks]
+      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
+      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+
+      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+      snode_group = self.cfg.GetNodeGroup(snode_info.group)
+      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+                                                              snode_group)
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+                              ignore=self.op.ignore_ipolicy)
+      if pnode_info.group != snode_info.group:
+        self.LogWarning("The primary and secondary nodes are in two"
+                        " different node groups; the disk parameters"
+                        " from the first disk's node group will be"
+                        " used")
+
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      # Make sure none of the nodes require exclusive storage
+      nodes = [pnode_info]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        assert snode_info
+        nodes.append(snode_info)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
+                  " storage is enabled" % (instance.disk_template,
+                                           self.op.disk_template))
+        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -13164,10 +13571,46 @@ class LUInstanceSetParams(LogicalUnit):
     # dictionary with instance information after the modification
     ispec = {}
 
+    # Check disk modifications. This is done here and not in CheckArguments
+    # (as with NICs), because we need to know the instance's disk template
+    if instance.disk_template == constants.DT_EXT:
+      self._CheckMods("disk", self.op.disks, {},
+                      self._VerifyDiskModification)
+    else:
+      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+                      self._VerifyDiskModification)
+
     # Prepare disk/NIC modifications
     self.diskmod = PrepareContainerMods(self.op.disks, None)
     self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
 
+    # Check the validity of the `provider' parameter
+    if instance.disk_template in constants.DT_EXT:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if mod[0] == constants.DDM_ADD:
+          if ext_provider is None:
+            raise errors.OpPrereqError("Instance template is '%s' and parameter"
+                                       " '%s' missing, during disk add" %
+                                       (constants.DT_EXT,
+                                        constants.IDISK_PROVIDER),
+                                       errors.ECODE_NOENT)
+        elif mod[0] == constants.DDM_MODIFY:
+          if ext_provider:
+            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
+                                       " modification" %
+                                       constants.IDISK_PROVIDER,
+                                       errors.ECODE_INVAL)
+    else:
+      for mod in self.diskmod:
+        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+        if ext_provider is not None:
+          raise errors.OpPrereqError("Parameter '%s' is only valid for"
+                                     " instances of type '%s'" %
+                                     (constants.IDISK_PROVIDER,
+                                      constants.DT_EXT),
+                                     errors.ECODE_INVAL)
+
     # OS change
     if self.op.os_name and not self.op.force:
       _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
@@ -13180,44 +13623,7 @@ class LUInstanceSetParams(LogicalUnit):
       "Can't modify disk template and apply disk changes at the same time"
 
     if self.op.disk_template:
-      if instance.disk_template == self.op.disk_template:
-        raise errors.OpPrereqError("Instance already has disk template %s" %
-                                   instance.disk_template, errors.ECODE_INVAL)
-
-      if (instance.disk_template,
-          self.op.disk_template) not in self._DISK_CONVERSIONS:
-        raise errors.OpPrereqError("Unsupported disk template conversion from"
-                                   " %s to %s" % (instance.disk_template,
-                                                  self.op.disk_template),
-                                   errors.ECODE_INVAL)
-      _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                          msg="cannot change disk template")
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if self.op.remote_node == pnode:
-          raise errors.OpPrereqError("Given new secondary node %s is the same"
-                                     " as the primary node of the instance" %
-                                     self.op.remote_node, errors.ECODE_STATE)
-        _CheckNodeOnline(self, self.op.remote_node)
-        _CheckNodeNotDrained(self, self.op.remote_node)
-        # FIXME: here we assume that the old instance type is DT_PLAIN
-        assert instance.disk_template == constants.DT_PLAIN
-        disks = [{constants.IDISK_SIZE: d.size,
-                  constants.IDISK_VG: d.logical_id[0]}
-                 for d in instance.disks]
-        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
-        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
-
-        snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
-        snode_group = self.cfg.GetNodeGroup(snode_info.group)
-        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                                snode_group)
-        _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
-                                ignore=self.op.ignore_ipolicy)
-        if pnode_info.group != snode_info.group:
-          self.LogWarning("The primary and secondary nodes are in two"
-                          " different node groups; the disk parameters"
-                          " from the first disk's node group will be"
-                          " used")
+      self._PreCheckDiskTemplate(pnode_info)
 
     # hvparams processing
     if self.op.hvparams:
@@ -13227,7 +13633,7 @@ class LUInstanceSetParams(LogicalUnit):
       hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
 
       # local check
-      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
+      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
       _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
@@ -13297,7 +13703,7 @@ class LUInstanceSetParams(LogicalUnit):
       instance_info = self.rpc.call_instance_info(pnode, instance.name,
                                                   instance.hypervisor)
       nodeinfo = self.rpc.call_node_info(mem_check_list, None,
-                                         [instance.hypervisor])
+                                         [instance.hypervisor], False)
       pninfo = nodeinfo[pnode]
       msg = pninfo.fail_msg
       if msg:
@@ -13417,12 +13823,9 @@ class LUInstanceSetParams(LogicalUnit):
     ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
     ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
 
-    if self.op.offline is not None:
-      if self.op.offline:
-        msg = "can't change to offline"
-      else:
-        msg = "can't change to online"
-      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
+    if self.op.offline is not None and self.op.offline:
+      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+                          msg="can't change to offline")
 
     # Pre-compute NIC changes (necessary to use result in hooks)
     self._nic_chgdesc = []
@@ -13486,15 +13889,18 @@ class LUInstanceSetParams(LogicalUnit):
                                       self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
                                         self.diskparams)
+    p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+    s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
     info = _GetInstanceInfoText(instance)
     feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in anno_disks:
       # unfortunately this is... not too nice
       _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
-                            info, True)
+                            info, True, p_excl_stor)
       for child in disk.children:
-        _CreateSingleBlockDev(self, snode, instance, child, info, True)
+        _CreateSingleBlockDev(self, snode, instance, child, info, True,
+                              s_excl_stor)
     # at this stage, all new LVs have been created, we can rename the
     # old ones
     feedback_fn("Renaming original volumes...")
@@ -13506,9 +13912,10 @@ class LUInstanceSetParams(LogicalUnit):
     feedback_fn("Initializing DRBD devices...")
     # all child devices are in place, we can now create the DRBD devices
     for disk in anno_disks:
-      for node in [pnode, snode]:
+      for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
         f_create = node == pnode
-        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
+        _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+                              excl_stor)
 
     # at this point, the instance has been modified
     instance.disk_template = constants.DT_DRBD8
@@ -15759,8 +16166,10 @@ class LUNetworkAdd(LogicalUnit):
 
     if self.op.conflicts_check:
       self.share_locks[locking.LEVEL_NODE] = 1
+      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
       self.needed_locks = {
         locking.LEVEL_NODE: locking.ALL_SET,
+        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
     else:
       self.needed_locks = {}
@@ -15775,8 +16184,8 @@ class LUNetworkAdd(LogicalUnit):
     uuid = self.cfg.LookupNetwork(self.op.network_name)
 
     if uuid:
-      raise errors.OpPrereqError("Network '%s' already defined" %
-                                 self.op.network, errors.ECODE_EXISTS)
+      raise errors.OpPrereqError(("Network with name '%s' already exists" %
+                                  self.op.network_name), errors.ECODE_EXISTS)
 
     # Check tag validity
     for tag in self.op.tags:
@@ -15815,7 +16224,7 @@ class LUNetworkAdd(LogicalUnit):
     try:
       pool = network.AddressPool.InitializeNetwork(nobj)
     except errors.AddressPoolError, e:
-      raise errors.OpExecError("Cannot create IP pool for this network. %s" % e)
+      raise errors.OpExecError("Cannot create IP pool for this network: %s" % e)
 
     # Check if we need to reserve the nodes and the cluster master IP
     # These may not be allocated to any instances in routed mode, as
@@ -15866,8 +16275,7 @@ class LUNetworkRemove(LogicalUnit):
 
     if not self.network_uuid:
       raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name),
-                                 errors.ECODE_INVAL)
+                                  self.op.network_name), errors.ECODE_NOENT)
 
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
     self.needed_locks = {
@@ -15939,8 +16347,7 @@ class LUNetworkSetParams(LogicalUnit):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
     if self.network_uuid is None:
       raise errors.OpPrereqError(("Network '%s' not found" %
-                                  self.op.network_name),
-                                 errors.ECODE_INVAL)
+                                  self.op.network_name), errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
@@ -15966,8 +16373,9 @@ class LUNetworkSetParams(LogicalUnit):
       else:
         self.gateway = self.op.gateway
         if self.pool.IsReserved(self.gateway):
-          raise errors.OpPrereqError("%s is already reserved" %
-                                     self.gateway, errors.ECODE_INVAL)
+          raise errors.OpPrereqError("Gateway IP address '%s' is already"
+                                     " reserved" % self.gateway,
+                                     errors.ECODE_STATE)
 
     if self.op.network_type:
       if self.op.network_type == constants.VALUE_NONE:
@@ -16078,23 +16486,19 @@ class _NetworkQuery(_QueryBase):
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
+    lu.share_locks = _ShareAll()
 
-    self._all_networks = lu.cfg.GetAllNetworksInfo()
-    name_to_uuid = dict((n.name, n.uuid) for n in self._all_networks.values())
+    self.do_locking = self.use_locking
 
-    if not self.names:
-      self.wanted = [name_to_uuid[name]
-                     for name in utils.NiceSort(name_to_uuid.keys())]
-    else:
-      # Accept names to be either names or UUIDs.
+    all_networks = lu.cfg.GetAllNetworksInfo()
+    name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
+
+    if self.names:
       missing = []
       self.wanted = []
-      all_uuid = frozenset(self._all_networks.keys())
 
       for name in self.names:
-        if name in all_uuid:
-          self.wanted.append(name)
-        elif name in name_to_uuid:
+        if name in name_to_uuid:
           self.wanted.append(name_to_uuid[name])
         else:
           missing.append(name)
@@ -16102,6 +16506,15 @@ class _NetworkQuery(_QueryBase):
       if missing:
         raise errors.OpPrereqError("Some networks do not exist: %s" % missing,
                                    errors.ECODE_NOENT)
+    else:
+      self.wanted = locking.ALL_SET
+
+    if self.do_locking:
+      lu.needed_locks[locking.LEVEL_NETWORK] = self.wanted
+      if query.NETQ_INST in self.requested_data:
+        lu.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+      if query.NETQ_GROUP in self.requested_data:
+        lu.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
 
   def DeclareLocks(self, lu, level):
     pass
@@ -16110,65 +16523,70 @@ class _NetworkQuery(_QueryBase):
     """Computes the list of networks and their attributes.
 
     """
+    all_networks = lu.cfg.GetAllNetworksInfo()
+
+    network_uuids = self._GetNames(lu, all_networks.keys(),
+                                   locking.LEVEL_NETWORK)
+
+    name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
+
     do_instances = query.NETQ_INST in self.requested_data
-    do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
-    do_stats = query.NETQ_STATS in self.requested_data
+    do_groups = query.NETQ_GROUP in self.requested_data
 
-    network_to_groups = None
     network_to_instances = None
-    stats = None
+    network_to_groups = None
 
     # For NETQ_GROUP, we need to map network->[groups]
     if do_groups:
       all_groups = lu.cfg.GetAllNodeGroupsInfo()
-      network_to_groups = dict((uuid, []) for uuid in self.wanted)
+      network_to_groups = dict((uuid, []) for uuid in network_uuids)
+      for _, group in all_groups.iteritems():
+        for net_uuid in network_uuids:
+          netparams = group.networks.get(net_uuid, None)
+          if netparams:
+            info = (group.name, netparams[constants.NIC_MODE],
+                    netparams[constants.NIC_LINK])
 
-      if do_instances:
-        all_instances = lu.cfg.GetAllInstancesInfo()
-        all_nodes = lu.cfg.GetAllNodesInfo()
-        network_to_instances = dict((uuid, []) for uuid in self.wanted)
-
-      for group in all_groups.values():
-        if do_instances:
-          group_nodes = [node.name for node in all_nodes.values() if
-                         node.group == group.uuid]
-          group_instances = [instance for instance in all_instances.values()
-                             if instance.primary_node in group_nodes]
-
-        for net_uuid in group.networks.keys():
-          if net_uuid in network_to_groups:
-            netparams = group.networks[net_uuid]
-            mode = netparams[constants.NIC_MODE]
-            link = netparams[constants.NIC_LINK]
-            info = group.name + "(" + mode + ", " + link + ")"
             network_to_groups[net_uuid].append(info)
 
-            if do_instances:
-              for instance in group_instances:
-                for nic in instance.nics:
-                  if nic.network == self._all_networks[net_uuid].name:
-                    network_to_instances[net_uuid].append(instance.name)
-                    break
-
-    if do_stats:
-      stats = {}
-      for uuid, net in self._all_networks.items():
-        if uuid in self.wanted:
-          pool = network.AddressPool(net)
-          stats[uuid] = {
-            "free_count": pool.GetFreeCount(),
-            "reserved_count": pool.GetReservedCount(),
-            "map": pool.GetMap(),
-            "external_reservations":
-              utils.CommaJoin(pool.GetExternalReservations()),
-            }
-
-    return query.NetworkQueryData([self._all_networks[uuid]
-                                   for uuid in self.wanted],
+    if do_instances:
+      all_instances = lu.cfg.GetAllInstancesInfo()
+      network_to_instances = dict((uuid, []) for uuid in network_uuids)
+      for instance in all_instances.values():
+        for nic in instance.nics:
+          if nic.network:
+            net_uuid = name_to_uuid[nic.network]
+            if net_uuid in network_uuids:
+              network_to_instances[net_uuid].append(instance.name)
+            break
+
+    if query.NETQ_STATS in self.requested_data:
+      stats = \
+        dict((uuid,
+              self._GetStats(network.AddressPool(all_networks[uuid])))
+             for uuid in network_uuids)
+    else:
+      stats = None
+
+    return query.NetworkQueryData([all_networks[uuid]
+                                   for uuid in network_uuids],
                                    network_to_groups,
                                    network_to_instances,
                                    stats)
 
+  @staticmethod
+  def _GetStats(pool):
+    """Returns statistics for a network address pool.
+
+    """
+    return {
+      "free_count": pool.GetFreeCount(),
+      "reserved_count": pool.GetReservedCount(),
+      "map": pool.GetMap(),
+      "external_reservations":
+        utils.CommaJoin(pool.GetExternalReservations()),
+      }
+
 
 class LUNetworkQuery(NoHooksLU):
   """Logical unit for querying networks.
@@ -16178,7 +16596,7 @@ class LUNetworkQuery(NoHooksLU):
 
   def CheckArguments(self):
     self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
-                            self.op.output_fields, False)
+                            self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.nq.ExpandNames(self)
@@ -16203,13 +16621,13 @@ class LUNetworkConnect(LogicalUnit):
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
     if self.network_uuid is None:
-      raise errors.OpPrereqError("Network %s does not exist" %
-                                 self.network_name, errors.ECODE_INVAL)
+      raise errors.OpPrereqError("Network '%s' does not exist" %
+                                 self.network_name, errors.ECODE_NOENT)
 
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
     if self.group_uuid is None:
-      raise errors.OpPrereqError("Group %s does not exist" %
-                                 self.group_name, errors.ECODE_INVAL)
+      raise errors.OpPrereqError("Group '%s' does not exist" %
+                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16248,9 +16666,6 @@ class LUNetworkConnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
-    l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
-                                      for i in value)
-
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
       constants.NIC_LINK: self.network_link,
@@ -16268,28 +16683,10 @@ class LUNetworkConnect(LogicalUnit):
       return
 
     if self.op.conflicts_check:
-      # Check if locked instances are still correct
-      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
-
-      nobj = self.cfg.GetNetwork(self.network_uuid)
-      pool = network.AddressPool(nobj)
-      conflicting_instances = []
-
-      for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):
-        for idx, nic in enumerate(instance.nics):
-          if pool.Contains(nic.ip):
-            conflicting_instances.append((instance.name, idx, nic.ip))
-
-      if conflicting_instances:
-        self.LogWarning("Following occurences use IPs from network %s"
-                        " that is about to connect to nodegroup %s: %s" %
-                        (self.network_name, self.group.name,
-                        l(conflicting_instances)))
-        raise errors.OpPrereqError("Conflicting IPs found."
-                                   " Please remove/modify"
-                                   " corresponding NICs",
-                                   errors.ECODE_INVAL)
+      pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
+
+      _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
+                            "connect to")
 
   def Exec(self, feedback_fn):
     if self.connected:
@@ -16299,6 +16696,53 @@ class LUNetworkConnect(LogicalUnit):
     self.cfg.Update(self.group, feedback_fn)
 
 
+def _NetworkConflictCheck(lu, check_fn, action):
+  """Checks for network interface conflicts with a network.
+
+  @type lu: L{LogicalUnit}
+  @type check_fn: callable receiving one parameter (L{objects.NIC}) and
+    returning boolean
+  @param check_fn: Function checking for conflict
+  @type action: string
+  @param action: Part of error message (see code)
+  @raise errors.OpPrereqError: If conflicting IP addresses are found.
+
+  """
+  # Check if locked instances are still correct
+  owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+  _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
+
+  conflicts = []
+
+  for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
+    instconflicts = [(idx, nic.ip)
+                     for (idx, nic) in enumerate(instance.nics)
+                     if check_fn(nic)]
+
+    if instconflicts:
+      conflicts.append((instance.name, instconflicts))
+
+  if conflicts:
+    lu.LogWarning("IP addresses from network '%s', which is about to %s"
+                  " node group '%s', are in use: %s" %
+                  (lu.network_name, action, lu.group.name,
+                   utils.CommaJoin(("%s: %s" %
+                                    (name, _FmtNetworkConflict(details)))
+                                   for (name, details) in conflicts)))
+
+    raise errors.OpPrereqError("Conflicting IP addresses found; "
+                               " remove/modify the corresponding network"
+                               " interfaces", errors.ECODE_STATE)
+
+
+def _FmtNetworkConflict(details):
+  """Utility for L{_NetworkConflictCheck}.
+
+  """
+  return utils.CommaJoin("nic%s/%s" % (idx, ipaddr)
+                         for (idx, ipaddr) in details)
+
+
 class LUNetworkDisconnect(LogicalUnit):
   """Disconnect a network to a nodegroup
 
@@ -16313,13 +16757,13 @@ class LUNetworkDisconnect(LogicalUnit):
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
     if self.network_uuid is None:
-      raise errors.OpPrereqError("Network %s does not exist" %
-                                 self.network_name, errors.ECODE_INVAL)
+      raise errors.OpPrereqError("Network '%s' does not exist" %
+                                 self.network_name, errors.ECODE_NOENT)
 
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
     if self.group_uuid is None:
-      raise errors.OpPrereqError("Group %s does not exist" %
-                                 self.group_name, errors.ECODE_INVAL)
+      raise errors.OpPrereqError("Group '%s' does not exist" %
+                                 self.group_name, errors.ECODE_NOENT)
 
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
@@ -16352,9 +16796,6 @@ class LUNetworkDisconnect(LogicalUnit):
 
     assert self.group_uuid in owned_groups
 
-    l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
-                                      for i in value)
-
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
     if self.network_uuid not in self.group.networks:
@@ -16364,27 +16805,8 @@ class LUNetworkDisconnect(LogicalUnit):
       return
 
     if self.op.conflicts_check:
-      # Check if locked instances are still correct
-      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
-
-      conflicting_instances = []
-
-      for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):
-        for idx, nic in enumerate(instance.nics):
-          if nic.network == self.network_name:
-            conflicting_instances.append((instance.name, idx, nic.ip))
-
-      if conflicting_instances:
-        self.LogWarning("Following occurences use IPs from network %s"
-                           " that is about to disconnected from the nodegroup"
-                           " %s: %s" %
-                           (self.network_name, self.group.name,
-                            l(conflicting_instances)))
-        raise errors.OpPrereqError("Conflicting IPs."
-                                   " Please remove/modify"
-                                   " corresponding NICS",
-                                   errors.ECODE_INVAL)
+      _NetworkConflictCheck(self, lambda nic: nic.network == self.network_name,
+                            "disconnect from")
 
   def Exec(self, feedback_fn):
     if not self.connected:
@@ -16402,6 +16824,7 @@ _QUERY_IMPL = {
   constants.QR_GROUP: _GroupQuery,
   constants.QR_NETWORK: _NetworkQuery,
   constants.QR_OS: _OsQuery,
+  constants.QR_EXTSTORAGE: _ExtStorageQuery,
   constants.QR_EXPORT: _ExportQuery,
   }
 
@@ -16422,18 +16845,18 @@ def _GetQueryImplementation(name):
 
 
 def _CheckForConflictingIp(lu, ip, node):
-  """In case of conflicting ip raise error.
+  """In case of conflicting IP address raise error.
 
   @type ip: string
-  @param ip: ip address
+  @param ip: IP address
   @type node: string
   @param node: node name
 
   """
   (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
   if conf_net is not None:
-    raise errors.OpPrereqError("Conflicting IP found:"
-                               " %s <> %s." % (ip, conf_net),
-                               errors.ECODE_INVAL)
+    raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
+                                (ip, conf_net)),
+                               errors.ECODE_STATE)
 
   return (None, None)