ClusterSetParams: move vg-name checks from to CheckPrereq
[ganeti-local] / lib / cmdlib / cluster.py
index 64121e7..32e24de 100644 (file)
@@ -56,7 +56,8 @@ from ganeti.cmdlib.common import ShareAll, RunPostHook, \
   GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
   GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
   CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
-  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
+  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
+  CheckIpolicyVsDiskTemplates
 
 import ganeti.masterd.instance
 
@@ -290,6 +291,7 @@ class LUClusterQuery(NoHooksLU):
       "config_version": constants.CONFIG_VERSION,
       "os_api_version": max(constants.OS_API_VERSIONS),
       "export_version": constants.EXPORT_VERSION,
+      "vcs_version": constants.VCS_VERSION,
       "architecture": runtime.GetArchInfo(),
       "name": cluster.cluster_name,
       "master": self.cfg.GetMasterNodeName(),
@@ -609,6 +611,68 @@ def _ValidateNetmask(cfg, netmask):
                                (netmask), errors.ECODE_INVAL)
 
 
+def CheckFileBasedStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates,
+    file_disk_template):
+  """Checks whether the given file-based storage directory is acceptable.
+
+  Note: This function is public, because it is also used in bootstrap.py.
+
+  @type logging_warn_fn: function
+  @param logging_warn_fn: function which accepts a string and logs it
+  @type file_storage_dir: string
+  @param file_storage_dir: the directory to be used for file-based instances
+  @type enabled_disk_templates: list of string
+  @param enabled_disk_templates: the list of enabled disk templates
+  @type file_disk_template: string
+  @param file_disk_template: the file-based disk template for which the
+      path should be checked
+
+  """
+  assert (file_disk_template in
+          utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
+  file_storage_enabled = file_disk_template in enabled_disk_templates
+  if file_storage_dir is not None:
+    if file_storage_dir == "":
+      if file_storage_enabled:
+        raise errors.OpPrereqError(
+            "Unsetting the '%s' storage directory while having '%s' storage"
+            " enabled is not permitted." %
+            (file_disk_template, file_disk_template))
+    else:
+      if not file_storage_enabled:
+        logging_warn_fn(
+            "Specified a %s storage directory, although %s storage is not"
+            " enabled." % (file_disk_template, file_disk_template))
+  else:
+    raise errors.ProgrammerError("Received %s storage dir with value"
+                                 " 'None'." % file_disk_template)
+
+
+def CheckFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_FILE)
+
+
+def CheckSharedFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given shared file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_SHARED_FILE)
+
+
 class LUClusterSetParams(LogicalUnit):
   """Change the parameters of the cluster.
 
@@ -677,16 +741,28 @@ class LUClusterSetParams(LogicalUnit):
        unset whether there are instances still using it.
 
     """
+    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
+    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
+                                            new_enabled_disk_templates)
+    current_vg_name = self.cfg.GetVGName()
+
+    if self.op.vg_name == '':
+      if lvm_is_enabled:
+        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                   " disk templates are or get enabled.")
+
+    if self.op.vg_name is None:
+      if current_vg_name is None and lvm_is_enabled:
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.")
+
     if self.op.vg_name is not None and not self.op.vg_name:
       if self.cfg.HasAnyDiskOfType(constants.LD_LV):
         raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
                                    " instances exist", errors.ECODE_INVAL)
 
-    if (self.op.vg_name is not None and
-        utils.IsLvmEnabled(enabled_disk_templates)) or \
-           (self.cfg.GetVGName() is not None and
-            utils.LvmGetsEnabled(enabled_disk_templates,
-                                 new_enabled_disk_templates)):
+    if (self.op.vg_name is not None and lvm_is_enabled) or \
+        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
       self._CheckVgNameOnNodes(node_uuids)
 
   def _CheckVgNameOnNodes(self, node_uuids):
@@ -710,22 +786,71 @@ class LUClusterSetParams(LogicalUnit):
                                    (self.cfg.GetNodeName(node_uuid), vgstatus),
                                    errors.ECODE_ENVIRON)
 
-  def _GetEnabledDiskTemplates(self, cluster):
+  @staticmethod
+  def _GetEnabledDiskTemplatesInner(op_enabled_disk_templates,
+                                    old_enabled_disk_templates):
     """Determines the enabled disk templates and the subset of disk templates
        that are newly enabled by this operation.
 
     """
     enabled_disk_templates = None
     new_enabled_disk_templates = []
-    if self.op.enabled_disk_templates:
-      enabled_disk_templates = self.op.enabled_disk_templates
+    if op_enabled_disk_templates:
+      enabled_disk_templates = op_enabled_disk_templates
       new_enabled_disk_templates = \
         list(set(enabled_disk_templates)
-             - set(cluster.enabled_disk_templates))
+             - set(old_enabled_disk_templates))
     else:
-      enabled_disk_templates = cluster.enabled_disk_templates
+      enabled_disk_templates = old_enabled_disk_templates
     return (enabled_disk_templates, new_enabled_disk_templates)
 
+  def _GetEnabledDiskTemplates(self, cluster):
+    """Determines the enabled disk templates and the subset of disk templates
+       that are newly enabled by this operation.
+
+    """
+    return self._GetEnabledDiskTemplatesInner(self.op.enabled_disk_templates,
+                                              cluster.enabled_disk_templates)
+
+  def _CheckIpolicy(self, cluster, enabled_disk_templates):
+    """Checks the ipolicy.
+
+    @type cluster: C{objects.Cluster}
+    @param cluster: the cluster's configuration
+    @type enabled_disk_templates: list of string
+    @param enabled_disk_templates: list of (possibly newly) enabled disk
+      templates
+
+    """
+    # FIXME: write unit tests for this
+    if self.op.ipolicy:
+      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+                                           group_policy=False)
+
+      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
+                                  enabled_disk_templates)
+
+      all_instances = self.cfg.GetAllInstancesInfo().values()
+      violations = set()
+      for group in self.cfg.GetAllNodeGroupsInfo().values():
+        instances = frozenset([inst for inst in all_instances
+                               if compat.any(nuuid in group.members
+                                             for nuuid in inst.all_nodes)])
+        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
+        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
+                                           self.cfg)
+        if new:
+          violations.update(new)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(utils.NiceSort(violations)))
+    else:
+      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
+                                  enabled_disk_templates)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -752,6 +877,15 @@ class LUClusterSetParams(LogicalUnit):
     self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
                       new_enabled_disk_templates)
 
+    if self.op.file_storage_dir is not None:
+      CheckFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
+
+    if self.op.shared_file_storage_dir is not None:
+      CheckSharedFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.shared_file_storage_dir,
+          enabled_disk_templates)
+
     if self.op.drbd_helper:
       # checks given drbd helper on all nodes
       helpers = self.rpc.call_drbd_helper(node_uuids)
@@ -801,27 +935,7 @@ class LUClusterSetParams(LogicalUnit):
                             for name, values in svalues.items()))
              for storage, svalues in new_disk_state.items())
 
-    if self.op.ipolicy:
-      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
-                                           group_policy=False)
-
-      all_instances = self.cfg.GetAllInstancesInfo().values()
-      violations = set()
-      for group in self.cfg.GetAllNodeGroupsInfo().values():
-        instances = frozenset([inst for inst in all_instances
-                               if compat.any(nuuid in group.members
-                                             for nuuid in inst.all_nodes)])
-        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
-        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
-        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
-                                           self.cfg)
-        if new:
-          violations.update(new)
-
-      if violations:
-        self.LogWarning("After the ipolicy change the following instances"
-                        " violate them: %s",
-                        utils.CommaJoin(utils.NiceSort(violations)))
+    self._CheckIpolicy(cluster, enabled_disk_templates)
 
     if self.op.nicparams:
       utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
@@ -864,7 +978,7 @@ class LUClusterSetParams(LogicalUnit):
     self.new_diskparams = objects.FillDict(cluster.diskparams, {})
     if self.op.diskparams:
       for dt_name, dt_params in self.op.diskparams.items():
-        if dt_name not in self.op.diskparams:
+        if dt_name not in self.new_diskparams:
           self.new_diskparams[dt_name] = dt_params
         else:
           self.new_diskparams[dt_name].update(dt_params)
@@ -977,26 +1091,25 @@ class LUClusterSetParams(LogicalUnit):
 
     """
     if self.op.vg_name is not None:
-      if self.op.vg_name and not \
-           utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
-        feedback_fn("Note that you specified a volume group, but did not"
-                    " enable any lvm disk template.")
       new_volume = self.op.vg_name
       if not new_volume:
-        if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
-          raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
-                                     " disk templates are enabled.")
         new_volume = None
       if new_volume != self.cfg.GetVGName():
         self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
-    else:
-      if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
-          not self.cfg.GetVGName():
-        raise errors.OpPrereqError("Please specify a volume group when"
-                                   " enabling lvm-based disk-templates.")
+
+  def _SetFileStorageDir(self, feedback_fn):
+    """Set the file storage directory.
+
+    """
+    if self.op.file_storage_dir is not None:
+      if self.cluster.file_storage_dir == self.op.file_storage_dir:
+        feedback_fn("Global file storage dir already set to value '%s'"
+                    % self.cluster.file_storage_dir)
+      else:
+        self.cluster.file_storage_dir = self.op.file_storage_dir
 
   def Exec(self, feedback_fn):
     """Change the parameters of the cluster.
@@ -1007,6 +1120,7 @@ class LUClusterSetParams(LogicalUnit):
         list(set(self.op.enabled_disk_templates))
 
     self._SetVgName(feedback_fn)
+    self._SetFileStorageDir(feedback_fn)
 
     if self.op.drbd_helper is not None:
       if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
@@ -1055,6 +1169,9 @@ class LUClusterSetParams(LogicalUnit):
                     " maintenance is not useful (still enabling it)")
       self.cluster.maintain_node_health = self.op.maintain_node_health
 
+    if self.op.modify_etc_hosts is not None:
+      self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
+
     if self.op.prealloc_wipe_disks is not None:
       self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
 
@@ -1355,6 +1472,13 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
       (errcode, msg) = _VerifyCertificate(cert_filename)
       self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
 
+    self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
+                                    pathutils.NODED_CERT_FILE),
+                  constants.CV_ECLUSTERCERT,
+                  None,
+                  pathutils.NODED_CERT_FILE + " must be accessible by the " +
+                    constants.LUXID_USER + " user")
+
     feedback_fn("* Verifying hypervisor parameters")
 
     self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
@@ -1381,9 +1505,8 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     pretty_dangling = [
         "%s (%s)" %
         (node.name,
-         utils.CommaJoin(
-           self.cfg.GetInstanceNames(
-             dangling_instances.get(node.uuid, ["no instances"]))))
+         utils.CommaJoin(inst.name for
+                         inst in dangling_instances.get(node.uuid, [])))
         for node in dangling_nodes]
 
     self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
@@ -1394,8 +1517,8 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
                   None,
                   "the following instances have a non-existing primary-node:"
-                  " %s", utils.CommaJoin(
-                           self.cfg.GetInstanceNames(no_node_instances)))
+                  " %s", utils.CommaJoin(inst.name for
+                                         inst in no_node_instances))
 
     return not self.bad
 
@@ -2294,8 +2417,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                   " but missing on this node: %s",
                   self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
 
-  def _VerifyFileStoragePaths(self, ninfo, nresult, is_master,
-                              enabled_disk_templates):
+  def _VerifyAcceptedFileStoragePaths(self, ninfo, nresult, is_master):
     """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
 
     @type ninfo: L{objects.Node}
@@ -2305,11 +2427,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param is_master: Whether node is the master node
 
     """
+    cluster = self.cfg.GetClusterInfo()
     if (is_master and
-        (utils.storage.IsFileStorageEnabled(enabled_disk_templates) or
-         utils.storage.IsSharedFileStorageEnabled(enabled_disk_templates))):
+        (cluster.IsFileStorageEnabled() or
+         cluster.IsSharedFileStorageEnabled())):
       try:
-        fspaths = nresult[constants.NV_FILE_STORAGE_PATHS]
+        fspaths = nresult[constants.NV_ACCEPTED_STORAGE_PATHS]
       except KeyError:
         # This should never happen
         self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
@@ -2319,11 +2442,60 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                       "Found forbidden file storage paths: %s",
                       utils.CommaJoin(fspaths))
     else:
-      self._ErrorIf(constants.NV_FILE_STORAGE_PATHS in nresult,
+      self._ErrorIf(constants.NV_ACCEPTED_STORAGE_PATHS in nresult,
                     constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
                     "Node should not have returned forbidden file storage"
                     " paths")
 
+  def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
+                          verify_key, error_key):
+    """Verifies (file) storage paths.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @type file_disk_template: string
+    @param file_disk_template: file-based disk template, whose directory
+        is supposed to be verified
+    @type verify_key: string
+    @param verify_key: key for the verification map of this file
+        verification step
+    @param error_key: error key to be added to the verification results
+        in case something goes wrong in this verification step
+
+    """
+    assert (file_disk_template in
+            utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
+    cluster = self.cfg.GetClusterInfo()
+    if cluster.IsDiskTemplateEnabled(file_disk_template):
+      self._ErrorIf(
+          verify_key in nresult,
+          error_key, ninfo.name,
+          "The configured %s storage path is unusable: %s" %
+          (file_disk_template, nresult.get(verify_key)))
+
+  def _VerifyFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_FILE,
+        constants.NV_FILE_STORAGE_PATH,
+        constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
+
+  def _VerifySharedFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_SHARED_FILE,
+        constants.NV_SHARED_FILE_STORAGE_PATH,
+        constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
+
   def _VerifyOob(self, ninfo, nresult):
     """Verifies out of band functionality of a node.
 
@@ -2666,10 +2838,14 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       node_verify_param[constants.NV_DRBDLIST] = None
       node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
 
-    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
+    if cluster.IsFileStorageEnabled() or \
+        cluster.IsSharedFileStorageEnabled():
       # Load file storage paths only from master node
-      node_verify_param[constants.NV_FILE_STORAGE_PATHS] = \
+      node_verify_param[constants.NV_ACCEPTED_STORAGE_PATHS] = \
         self.cfg.GetMasterNodeName()
+      if cluster.IsFileStorageEnabled():
+        node_verify_param[constants.NV_FILE_STORAGE_PATH] = \
+          cluster.file_storage_dir
 
     # bridge checks
     # FIXME: this needs to be changed per node-group, not cluster-wide
@@ -2833,9 +3009,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self._VerifyNodeNetwork(node_i, nresult)
       self._VerifyNodeUserScripts(node_i, nresult)
       self._VerifyOob(node_i, nresult)
-      self._VerifyFileStoragePaths(node_i, nresult,
-                                   node_i.uuid == master_node_uuid,
-                                   cluster.enabled_disk_templates)
+      self._VerifyAcceptedFileStoragePaths(node_i, nresult,
+                                           node_i.uuid == master_node_uuid)
+      self._VerifyFileStoragePaths(node_i, nresult)
+      self._VerifySharedFileStoragePaths(node_i, nresult)
 
       if nimg.vm_capable:
         self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)