Check for running instances when disabling templates
[ganeti-local] / lib / cmdlib / cluster.py
index 6a4dd5e..8d527ba 100644 (file)
@@ -56,7 +56,9 @@ from ganeti.cmdlib.common import ShareAll, RunPostHook, \
   GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
   GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
   CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
-  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
+  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
+  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
+  CheckDiskAccessModeConsistency
 
 import ganeti.masterd.instance
 
@@ -290,6 +292,7 @@ class LUClusterQuery(NoHooksLU):
       "config_version": constants.CONFIG_VERSION,
       "os_api_version": max(constants.OS_API_VERSIONS),
       "export_version": constants.EXPORT_VERSION,
+      "vcs_version": constants.VCS_VERSION,
       "architecture": runtime.GetArchInfo(),
       "name": cluster.cluster_name,
       "master": self.cfg.GetMasterNodeName(),
@@ -493,7 +496,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
     @param disk: an L{ganeti.objects.Disk} object
 
     """
-    if disk.dev_type == constants.LD_DRBD8:
+    if disk.dev_type == constants.DT_DRBD8:
       assert disk.children, "Empty children for DRBD8?"
       fchild = disk.children[0]
       mismatch = fchild.size < disk.size
@@ -531,9 +534,11 @@ class LUClusterRepairDiskSizes(NoHooksLU):
 
     changed = []
     for node_uuid, dskl in per_node_disks.items():
-      newl = [v[2].Copy() for v in dskl]
-      for dsk in newl:
-        self.cfg.SetDiskID(dsk, node_uuid)
+      if not dskl:
+        # no disks on the node
+        continue
+
+      newl = [([v[2].Copy()], v[0]) for v in dskl]
       node_name = self.cfg.GetNodeName(node_uuid)
       result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
       if result.fail_msg:
@@ -609,6 +614,68 @@ def _ValidateNetmask(cfg, netmask):
                                (netmask), errors.ECODE_INVAL)
 
 
+def CheckFileBasedStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates,
+    file_disk_template):
+  """Checks whether the given file-based storage directory is acceptable.
+
+  Note: This function is public, because it is also used in bootstrap.py.
+
+  @type logging_warn_fn: function
+  @param logging_warn_fn: function which accepts a string and logs it
+  @type file_storage_dir: string
+  @param file_storage_dir: the directory to be used for file-based instances
+  @type enabled_disk_templates: list of string
+  @param enabled_disk_templates: the list of enabled disk templates
+  @type file_disk_template: string
+  @param file_disk_template: the file-based disk template for which the
+      path should be checked
+
+  """
+  assert (file_disk_template in
+          utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
+  file_storage_enabled = file_disk_template in enabled_disk_templates
+  if file_storage_dir is not None:
+    if file_storage_dir == "":
+      if file_storage_enabled:
+        raise errors.OpPrereqError(
+            "Unsetting the '%s' storage directory while having '%s' storage"
+            " enabled is not permitted." %
+            (file_disk_template, file_disk_template))
+    else:
+      if not file_storage_enabled:
+        logging_warn_fn(
+            "Specified a %s storage directory, although %s storage is not"
+            " enabled." % (file_disk_template, file_disk_template))
+  else:
+    raise errors.ProgrammerError("Received %s storage dir with value"
+                                 " 'None'." % file_disk_template)
+
+
+def CheckFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_FILE)
+
+
+def CheckSharedFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given shared file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_SHARED_FILE)
+
+
 class LUClusterSetParams(LogicalUnit):
   """Change the parameters of the cluster.
 
@@ -638,6 +705,7 @@ class LUClusterSetParams(LogicalUnit):
         utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
       try:
         utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
+        CheckDiskAccessModeValidity(self.op.diskparams)
       except errors.OpPrereqError, err:
         raise errors.OpPrereqError("While verify diskparams options: %s" % err,
                                    errors.ECODE_INVAL)
@@ -677,16 +745,28 @@ class LUClusterSetParams(LogicalUnit):
        unset whether there are instances still using it.
 
     """
+    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
+    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
+                                            new_enabled_disk_templates)
+    current_vg_name = self.cfg.GetVGName()
+
+    if self.op.vg_name == '':
+      if lvm_is_enabled:
+        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                   " disk templates are or get enabled.")
+
+    if self.op.vg_name is None:
+      if current_vg_name is None and lvm_is_enabled:
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.")
+
     if self.op.vg_name is not None and not self.op.vg_name:
-      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
+      if self.cfg.HasAnyDiskOfType(constants.DT_PLAIN):
         raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
                                    " instances exist", errors.ECODE_INVAL)
 
-    if (self.op.vg_name is not None and
-        utils.IsLvmEnabled(enabled_disk_templates)) or \
-           (self.cfg.GetVGName() is not None and
-            utils.LvmGetsEnabled(enabled_disk_templates,
-                                 new_enabled_disk_templates)):
+    if (self.op.vg_name is not None and lvm_is_enabled) or \
+        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
       self._CheckVgNameOnNodes(node_uuids)
 
   def _CheckVgNameOnNodes(self, node_uuids):
@@ -710,35 +790,154 @@ class LUClusterSetParams(LogicalUnit):
                                    (self.cfg.GetNodeName(node_uuid), vgstatus),
                                    errors.ECODE_ENVIRON)
 
-  def _GetEnabledDiskTemplates(self, cluster):
+  @staticmethod
+  def _GetEnabledDiskTemplatesInner(op_enabled_disk_templates,
+                                    old_enabled_disk_templates):
     """Determines the enabled disk templates and the subset of disk templates
        that are newly enabled by this operation.
 
     """
     enabled_disk_templates = None
     new_enabled_disk_templates = []
-    if self.op.enabled_disk_templates:
-      enabled_disk_templates = self.op.enabled_disk_templates
+    if op_enabled_disk_templates:
+      enabled_disk_templates = op_enabled_disk_templates
       new_enabled_disk_templates = \
         list(set(enabled_disk_templates)
-             - set(cluster.enabled_disk_templates))
+             - set(old_enabled_disk_templates))
     else:
-      enabled_disk_templates = cluster.enabled_disk_templates
+      enabled_disk_templates = old_enabled_disk_templates
     return (enabled_disk_templates, new_enabled_disk_templates)
 
-  def CheckPrereq(self):
-    """Check prerequisites.
+  def _GetEnabledDiskTemplates(self, cluster):
+    """Determines the enabled disk templates and the subset of disk templates
+       that are newly enabled by this operation.
 
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
+    """
+    return self._GetEnabledDiskTemplatesInner(self.op.enabled_disk_templates,
+                                              cluster.enabled_disk_templates)
+
+  def _CheckIpolicy(self, cluster, enabled_disk_templates):
+    """Checks the ipolicy.
+
+    @type cluster: C{objects.Cluster}
+    @param cluster: the cluster's configuration
+    @type enabled_disk_templates: list of string
+    @param enabled_disk_templates: list of (possibly newly) enabled disk
+      templates
+
+    """
+    # FIXME: write unit tests for this
+    if self.op.ipolicy:
+      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+                                           group_policy=False)
+
+      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
+                                  enabled_disk_templates)
+
+      all_instances = self.cfg.GetAllInstancesInfo().values()
+      violations = set()
+      for group in self.cfg.GetAllNodeGroupsInfo().values():
+        instances = frozenset([inst for inst in all_instances
+                               if compat.any(nuuid in group.members
+                                             for nuuid in inst.all_nodes)])
+        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
+        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
+                                           self.cfg)
+        if new:
+          violations.update(new)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(utils.NiceSort(violations)))
+    else:
+      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
+                                  enabled_disk_templates)
+
+  def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
+    """Checks whether the set DRBD helper actually exists on the nodes.
+
+    @type drbd_helper: string
+    @param drbd_helper: path of the drbd usermode helper binary
+    @type node_uuids: list of strings
+    @param node_uuids: list of node UUIDs to check for the helper
 
     """
-    if self.op.drbd_helper is not None and not self.op.drbd_helper:
-      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
+    # checks given drbd helper on all nodes
+    helpers = self.rpc.call_drbd_helper(node_uuids)
+    for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
+      if ninfo.offline:
+        self.LogInfo("Not checking drbd helper on offline node %s",
+                     ninfo.name)
+        continue
+      msg = helpers[ninfo.uuid].fail_msg
+      if msg:
+        raise errors.OpPrereqError("Error checking drbd helper on node"
+                                   " '%s': %s" % (ninfo.name, msg),
+                                   errors.ECODE_ENVIRON)
+      node_helper = helpers[ninfo.uuid].payload
+      if node_helper != drbd_helper:
+        raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
+                                   (ninfo.name, node_helper),
+                                   errors.ECODE_ENVIRON)
+
+  def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
+    """Check the DRBD usermode helper.
+
+    @type node_uuids: list of strings
+    @param node_uuids: a list of nodes' UUIDs
+    @type drbd_enabled: boolean
+    @param drbd_enabled: whether DRBD will be enabled after this operation
+      (no matter if it was disabled before or not)
+    @type drbd_gets_enabled: boolen
+    @param drbd_gets_enabled: true if DRBD was disabled before this
+      operation, but will be enabled afterwards
+
+    """
+    if self.op.drbd_helper == '':
+      if drbd_enabled:
+        raise errors.OpPrereqError("Cannot disable drbd helper while"
+                                   " DRBD is enabled.")
+      if self.cfg.HasAnyDiskOfType(constants.DT_DRBD8):
         raise errors.OpPrereqError("Cannot disable drbd helper while"
                                    " drbd-based instances exist",
                                    errors.ECODE_INVAL)
 
+    else:
+      if self.op.drbd_helper is not None and drbd_enabled:
+        self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
+      else:
+        if drbd_gets_enabled:
+          current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
+          if current_drbd_helper is not None:
+            self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
+          else:
+            raise errors.OpPrereqError("Cannot enable DRBD without a"
+                                       " DRBD usermode helper set.")
+
+  def _CheckInstancesOfDisabledDiskTemplates(
+      self, disabled_disk_templates):
+    """Check whether we try to a disk template that is in use.
+
+    @type disabled_disk_templates: list of string
+    @param disabled_disk_templates: list of disk templates that are going to
+      be disabled by this operation
+
+    """
+    for disk_template in disabled_disk_templates:
+      if self.cfg.HasAnyDiskOfType(disk_template):
+        raise errors.OpPrereqError(
+            "Cannot disable disk template '%s', because there is at least one"
+            " instance using it." % disk_template)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
     node_uuids = self.owned_locks(locking.LEVEL_NODE)
     self.cluster = cluster = self.cfg.GetClusterInfo()
 
@@ -752,24 +951,18 @@ class LUClusterSetParams(LogicalUnit):
     self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
                       new_enabled_disk_templates)
 
-    if self.op.drbd_helper:
-      # checks given drbd helper on all nodes
-      helpers = self.rpc.call_drbd_helper(node_uuids)
-      for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
-        if ninfo.offline:
-          self.LogInfo("Not checking drbd helper on offline node %s",
-                       ninfo.name)
-          continue
-        msg = helpers[ninfo.uuid].fail_msg
-        if msg:
-          raise errors.OpPrereqError("Error checking drbd helper on node"
-                                     " '%s': %s" % (ninfo.name, msg),
-                                     errors.ECODE_ENVIRON)
-        node_helper = helpers[ninfo.uuid].payload
-        if node_helper != self.op.drbd_helper:
-          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
-                                     (ninfo.name, node_helper),
-                                     errors.ECODE_ENVIRON)
+    if self.op.file_storage_dir is not None:
+      CheckFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
+
+    if self.op.shared_file_storage_dir is not None:
+      CheckSharedFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.shared_file_storage_dir,
+          enabled_disk_templates)
+
+    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
+    drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
+    self._CheckDrbdHelper(node_uuids, drbd_enabled, drbd_gets_enabled)
 
     # validate params changes
     if self.op.beparams:
@@ -801,27 +994,7 @@ class LUClusterSetParams(LogicalUnit):
                             for name, values in svalues.items()))
              for storage, svalues in new_disk_state.items())
 
-    if self.op.ipolicy:
-      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
-                                           group_policy=False)
-
-      all_instances = self.cfg.GetAllInstancesInfo().values()
-      violations = set()
-      for group in self.cfg.GetAllNodeGroupsInfo().values():
-        instances = frozenset([inst for inst in all_instances
-                               if compat.any(nuuid in group.members
-                                             for nuuid in inst.all_nodes)])
-        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
-        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
-        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
-                                           self.cfg)
-        if new:
-          violations.update(new)
-
-      if violations:
-        self.LogWarning("After the ipolicy change the following instances"
-                        " violate them: %s",
-                        utils.CommaJoin(utils.NiceSort(violations)))
+    self._CheckIpolicy(cluster, enabled_disk_templates)
 
     if self.op.nicparams:
       utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
@@ -864,10 +1037,11 @@ class LUClusterSetParams(LogicalUnit):
     self.new_diskparams = objects.FillDict(cluster.diskparams, {})
     if self.op.diskparams:
       for dt_name, dt_params in self.op.diskparams.items():
-        if dt_name not in self.op.diskparams:
+        if dt_name not in self.new_diskparams:
           self.new_diskparams[dt_name] = dt_params
         else:
           self.new_diskparams[dt_name].update(dt_params)
+      CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
 
     # os hypervisor parameters
     self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
@@ -977,41 +1151,34 @@ class LUClusterSetParams(LogicalUnit):
 
     """
     if self.op.vg_name is not None:
-      if self.op.vg_name and not \
-           utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
-        feedback_fn("Note that you specified a volume group, but did not"
-                    " enable any lvm disk template.")
       new_volume = self.op.vg_name
       if not new_volume:
-        if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
-          raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
-                                     " disk templates are enabled.")
         new_volume = None
       if new_volume != self.cfg.GetVGName():
         self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
-    else:
-      if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
-          not self.cfg.GetVGName():
-        raise errors.OpPrereqError("Please specify a volume group when"
-                                   " enabling lvm-based disk-templates.")
 
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
+  def _SetFileStorageDir(self, feedback_fn):
+    """Set the file storage directory.
 
     """
-    if self.op.enabled_disk_templates:
-      self.cluster.enabled_disk_templates = \
-        list(set(self.op.enabled_disk_templates))
+    if self.op.file_storage_dir is not None:
+      if self.cluster.file_storage_dir == self.op.file_storage_dir:
+        feedback_fn("Global file storage dir already set to value '%s'"
+                    % self.cluster.file_storage_dir)
+      else:
+        self.cluster.file_storage_dir = self.op.file_storage_dir
 
-    self._SetVgName(feedback_fn)
+  def _SetDrbdHelper(self, feedback_fn):
+    """Set the DRBD usermode helper.
 
+    """
     if self.op.drbd_helper is not None:
       if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
-        feedback_fn("Note that you specified a drbd user helper, but did"
-                    " enabled the drbd disk template.")
+        feedback_fn("Note that you specified a drbd user helper, but did not"
+                    " enable the drbd disk template.")
       new_helper = self.op.drbd_helper
       if not new_helper:
         new_helper = None
@@ -1020,6 +1187,19 @@ class LUClusterSetParams(LogicalUnit):
       else:
         feedback_fn("Cluster DRBD helper already in desired state,"
                     " not changing")
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(set(self.op.enabled_disk_templates))
+
+    self._SetVgName(feedback_fn)
+    self._SetFileStorageDir(feedback_fn)
+    self._SetDrbdHelper(feedback_fn)
+
     if self.op.hvparams:
       self.cluster.hvparams = self.new_hvparams
     if self.op.os_hvp:
@@ -1358,12 +1538,12 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
       (errcode, msg) = _VerifyCertificate(cert_filename)
       self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
 
-    self._ErrorIf(not utils.CanRead(constants.CONFD_USER,
+    self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
                                     pathutils.NODED_CERT_FILE),
                   constants.CV_ECLUSTERCERT,
                   None,
                   pathutils.NODED_CERT_FILE + " must be accessible by the " +
-                    constants.CONFD_USER + " user")
+                    constants.LUXID_USER + " user")
 
     feedback_fn("* Verifying hypervisor parameters")
 
@@ -1391,9 +1571,8 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     pretty_dangling = [
         "%s (%s)" %
         (node.name,
-         utils.CommaJoin(
-           self.cfg.GetInstanceNames(
-             dangling_instances.get(node.uuid, ["no instances"]))))
+         utils.CommaJoin(inst.name for
+                         inst in dangling_instances.get(node.uuid, [])))
         for node in dangling_nodes]
 
     self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
@@ -1404,8 +1583,8 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
                   None,
                   "the following instances have a non-existing primary-node:"
-                  " %s", utils.CommaJoin(
-                           self.cfg.GetInstanceNames(no_node_instances)))
+                  " %s", utils.CommaJoin(inst.name for
+                                         inst in no_node_instances))
 
     return not self.bad
 
@@ -2023,7 +2202,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
          self.all_node_info[node_uuid].group != self.group_uuid:
         # we're skipping nodes marked offline and nodes in other groups from
         # the N+1 warning, since most likely we don't have good memory
-        # infromation from them; we already list instances living on such
+        # information from them; we already list instances living on such
         # nodes, and that's enough warning
         continue
       #TODO(dynmem): also consider ballooning out other instances
@@ -2154,17 +2333,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                     "File %s found with %s different checksums (%s)",
                     filename, len(checksums), "; ".join(variants))
 
-  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
-                      drbd_map):
-    """Verifies and the node DRBD status.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param instanceinfo: the dict of instances
-    @param drbd_helper: the configured DRBD usermode helper
-    @param drbd_map: the DRBD map as returned by
-        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
+  def _VerifyNodeDrbdHelper(self, ninfo, nresult, drbd_helper):
+    """Verify the drbd helper.
 
     """
     if drbd_helper:
@@ -2181,6 +2351,21 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
                       "wrong drbd usermode helper: %s", payload)
 
+  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
+                      drbd_map):
+    """Verifies and the node DRBD status.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param instanceinfo: the dict of instances
+    @param drbd_helper: the configured DRBD usermode helper
+    @param drbd_map: the DRBD map as returned by
+        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
+
+    """
+    self._VerifyNodeDrbdHelper(ninfo, nresult, drbd_helper)
+
     # compute the DRBD minors
     node_drbd = {}
     for minor, inst_uuid in drbd_map[ninfo.uuid].items():
@@ -2334,21 +2519,54 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                     "Node should not have returned forbidden file storage"
                     " paths")
 
-  def _VerifyStoragePaths(self, ninfo, nresult):
+  def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
+                          verify_key, error_key):
     """Verifies (file) storage paths.
 
     @type ninfo: L{objects.Node}
     @param ninfo: the node to check
     @param nresult: the remote results for the node
+    @type file_disk_template: string
+    @param file_disk_template: file-based disk template, whose directory
+        is supposed to be verified
+    @type verify_key: string
+    @param verify_key: key for the verification map of this file
+        verification step
+    @param error_key: error key to be added to the verification results
+        in case something goes wrong in this verification step
 
     """
+    assert (file_disk_template in
+            utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
     cluster = self.cfg.GetClusterInfo()
-    if cluster.IsFileStorageEnabled():
+    if cluster.IsDiskTemplateEnabled(file_disk_template):
       self._ErrorIf(
-          constants.NV_FILE_STORAGE_PATH in nresult,
-          constants.CV_ENODEFILESTORAGEPATHUNUSABLE, ninfo.name,
-          "The configured file storage path is unusable: %s" %
-          nresult.get(constants.NV_FILE_STORAGE_PATH))
+          verify_key in nresult,
+          error_key, ninfo.name,
+          "The configured %s storage path is unusable: %s" %
+          (file_disk_template, nresult.get(verify_key)))
+
+  def _VerifyFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_FILE,
+        constants.NV_FILE_STORAGE_PATH,
+        constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
+
+  def _VerifySharedFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_SHARED_FILE,
+        constants.NV_SHARED_FILE_STORAGE_PATH,
+        constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
 
   def _VerifyOob(self, ninfo, nresult):
     """Verifies out of band functionality of a node.
@@ -2469,7 +2687,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node_disks = {}
-    node_disks_devonly = {}
+    node_disks_dev_inst_only = {}
     diskless_instances = set()
     diskless = constants.DT_DISKLESS
 
@@ -2489,20 +2707,19 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       node_disks[nuuid] = disks
 
       # _AnnotateDiskParams makes already copies of the disks
-      devonly = []
+      dev_inst_only = []
       for (inst_uuid, dev) in disks:
         (anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
                                           self.cfg)
-        self.cfg.SetDiskID(anno_disk, nuuid)
-        devonly.append(anno_disk)
+        dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
 
-      node_disks_devonly[nuuid] = devonly
+      node_disks_dev_inst_only[nuuid] = dev_inst_only
 
-    assert len(node_disks) == len(node_disks_devonly)
+    assert len(node_disks) == len(node_disks_dev_inst_only)
 
     # Collect data from all nodes with disks
-    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
-                                                          node_disks_devonly)
+    result = self.rpc.call_blockdev_getmirrorstatus_multi(
+               node_disks.keys(), node_disks_dev_inst_only)
 
     assert len(result) == len(node_disks)
 
@@ -2687,10 +2904,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       node_verify_param[constants.NV_LVLIST] = vg_name
       node_verify_param[constants.NV_PVLIST] = [vg_name]
 
-    if drbd_helper:
-      node_verify_param[constants.NV_DRBDVERSION] = None
-      node_verify_param[constants.NV_DRBDLIST] = None
-      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
+    if cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
+      if drbd_helper:
+        node_verify_param[constants.NV_DRBDVERSION] = None
+        node_verify_param[constants.NV_DRBDLIST] = None
+        node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
 
     if cluster.IsFileStorageEnabled() or \
         cluster.IsSharedFileStorageEnabled():
@@ -2865,7 +3083,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self._VerifyOob(node_i, nresult)
       self._VerifyAcceptedFileStoragePaths(node_i, nresult,
                                            node_i.uuid == master_node_uuid)
-      self._VerifyStoragePaths(node_i, nresult)
+      self._VerifyFileStoragePaths(node_i, nresult)
+      self._VerifySharedFileStoragePaths(node_i, nresult)
 
       if nimg.vm_capable:
         self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)