GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
- ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
+ ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
+ CheckIpolicyVsDiskTemplates
import ganeti.masterd.instance
"config_version": constants.CONFIG_VERSION,
"os_api_version": max(constants.OS_API_VERSIONS),
"export_version": constants.EXPORT_VERSION,
+ "vcs_version": constants.VCS_VERSION,
"architecture": runtime.GetArchInfo(),
"name": cluster.cluster_name,
"master": self.cfg.GetMasterNodeName(),
(netmask), errors.ECODE_INVAL)
+def CheckFileBasedStoragePathVsEnabledDiskTemplates(
+ logging_warn_fn, file_storage_dir, enabled_disk_templates,
+ file_disk_template):
+ """Checks whether the given file-based storage directory is acceptable.
+
+ Note: This function is public, because it is also used in bootstrap.py.
+
+ @type logging_warn_fn: function
+ @param logging_warn_fn: function which accepts a string and logs it
+ @type file_storage_dir: string
+ @param file_storage_dir: the directory to be used for file-based instances
+ @type enabled_disk_templates: list of string
+ @param enabled_disk_templates: the list of enabled disk templates
+ @type file_disk_template: string
+ @param file_disk_template: the file-based disk template for which the
+ path should be checked
+
+ """
+ assert (file_disk_template in
+ utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
+ file_storage_enabled = file_disk_template in enabled_disk_templates
+ if file_storage_dir is not None:
+ if file_storage_dir == "":
+ if file_storage_enabled:
+ raise errors.OpPrereqError(
+ "Unsetting the '%s' storage directory while having '%s' storage"
+ " enabled is not permitted." %
+ (file_disk_template, file_disk_template))
+ else:
+ if not file_storage_enabled:
+ logging_warn_fn(
+ "Specified a %s storage directory, although %s storage is not"
+ " enabled." % (file_disk_template, file_disk_template))
+ else:
+ raise errors.ProgrammerError("Received %s storage dir with value"
+ " 'None'." % file_disk_template)
+
+
+def CheckFileStoragePathVsEnabledDiskTemplates(
+ logging_warn_fn, file_storage_dir, enabled_disk_templates):
+ """Checks whether the given file storage directory is acceptable.
+
+ @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+ """
+ CheckFileBasedStoragePathVsEnabledDiskTemplates(
+ logging_warn_fn, file_storage_dir, enabled_disk_templates,
+ constants.DT_FILE)
+
+
+def CheckSharedFileStoragePathVsEnabledDiskTemplates(
+ logging_warn_fn, file_storage_dir, enabled_disk_templates):
+ """Checks whether the given shared file storage directory is acceptable.
+
+ @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+ """
+ CheckFileBasedStoragePathVsEnabledDiskTemplates(
+ logging_warn_fn, file_storage_dir, enabled_disk_templates,
+ constants.DT_SHARED_FILE)
+
+
class LUClusterSetParams(LogicalUnit):
"""Change the parameters of the cluster.
enabled_disk_templates = cluster.enabled_disk_templates
return (enabled_disk_templates, new_enabled_disk_templates)
+ def _CheckIpolicy(self, cluster, enabled_disk_templates):
+ """Checks the ipolicy.
+
+ @type cluster: C{objects.Cluster}
+ @param cluster: the cluster's configuration
+ @type enabled_disk_templates: list of string
+ @param enabled_disk_templates: list of (possibly newly) enabled disk
+ templates
+
+ """
+ # FIXME: write unit tests for this
+ if self.op.ipolicy:
+ self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+ group_policy=False)
+
+ CheckIpolicyVsDiskTemplates(self.new_ipolicy,
+ enabled_disk_templates)
+
+ all_instances = self.cfg.GetAllInstancesInfo().values()
+ violations = set()
+ for group in self.cfg.GetAllNodeGroupsInfo().values():
+ instances = frozenset([inst for inst in all_instances
+ if compat.any(nuuid in group.members
+ for nuuid in inst.all_nodes)])
+ new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+ ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
+ new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
+ self.cfg)
+ if new:
+ violations.update(new)
+
+ if violations:
+ self.LogWarning("After the ipolicy change the following instances"
+ " violate them: %s",
+ utils.CommaJoin(utils.NiceSort(violations)))
+ else:
+ CheckIpolicyVsDiskTemplates(cluster.ipolicy,
+ enabled_disk_templates)
+
def CheckPrereq(self):
"""Check prerequisites.
self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
new_enabled_disk_templates)
+ if self.op.file_storage_dir is not None:
+ CheckFileStoragePathVsEnabledDiskTemplates(
+ self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
+
+ if self.op.shared_file_storage_dir is not None:
+ CheckSharedFileStoragePathVsEnabledDiskTemplates(
+ self.LogWarning, self.op.shared_file_storage_dir,
+ enabled_disk_templates)
+
if self.op.drbd_helper:
# checks given drbd helper on all nodes
helpers = self.rpc.call_drbd_helper(node_uuids)
for name, values in svalues.items()))
for storage, svalues in new_disk_state.items())
- if self.op.ipolicy:
- self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
- group_policy=False)
-
- all_instances = self.cfg.GetAllInstancesInfo().values()
- violations = set()
- for group in self.cfg.GetAllNodeGroupsInfo().values():
- instances = frozenset([inst for inst in all_instances
- if compat.any(nuuid in group.members
- for nuuid in inst.all_nodes)])
- new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
- ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
- new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
- self.cfg)
- if new:
- violations.update(new)
-
- if violations:
- self.LogWarning("After the ipolicy change the following instances"
- " violate them: %s",
- utils.CommaJoin(utils.NiceSort(violations)))
+ self._CheckIpolicy(cluster, enabled_disk_templates)
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
self.new_diskparams = objects.FillDict(cluster.diskparams, {})
if self.op.diskparams:
for dt_name, dt_params in self.op.diskparams.items():
- if dt_name not in self.op.diskparams:
+ if dt_name not in self.new_diskparams:
self.new_diskparams[dt_name] = dt_params
else:
self.new_diskparams[dt_name].update(dt_params)
raise errors.OpPrereqError("Please specify a volume group when"
" enabling lvm-based disk-templates.")
+ def _SetFileStorageDir(self, feedback_fn):
+ """Set the file storage directory.
+
+ """
+ if self.op.file_storage_dir is not None:
+ if self.cluster.file_storage_dir == self.op.file_storage_dir:
+ feedback_fn("Global file storage dir already set to value '%s'"
+ % self.cluster.file_storage_dir)
+ else:
+ self.cluster.file_storage_dir = self.op.file_storage_dir
+
def Exec(self, feedback_fn):
"""Change the parameters of the cluster.
list(set(self.op.enabled_disk_templates))
self._SetVgName(feedback_fn)
+ self._SetFileStorageDir(feedback_fn)
if self.op.drbd_helper is not None:
if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
" maintenance is not useful (still enabling it)")
self.cluster.maintain_node_health = self.op.maintain_node_health
+ if self.op.modify_etc_hosts is not None:
+ self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
+
if self.op.prealloc_wipe_disks is not None:
self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
(errcode, msg) = _VerifyCertificate(cert_filename)
self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
+ self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
+ pathutils.NODED_CERT_FILE),
+ constants.CV_ECLUSTERCERT,
+ None,
+ pathutils.NODED_CERT_FILE + " must be accessible by the " +
+ constants.LUXID_USER + " user")
+
feedback_fn("* Verifying hypervisor parameters")
self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
pretty_dangling = [
"%s (%s)" %
(node.name,
- utils.CommaJoin(
- self.cfg.GetInstanceNames(
- dangling_instances.get(node.uuid, ["no instances"]))))
+ utils.CommaJoin(inst.name for
+ inst in dangling_instances.get(node.uuid, [])))
for node in dangling_nodes]
self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
None,
"the following instances have a non-existing primary-node:"
- " %s", utils.CommaJoin(
- self.cfg.GetInstanceNames(no_node_instances)))
+ " %s", utils.CommaJoin(inst.name for
+ inst in no_node_instances))
return not self.bad
"Node should not have returned forbidden file storage"
" paths")
+ def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
+ verify_key, error_key):
+ """Verifies (file) storage paths.
+
+ @type ninfo: L{objects.Node}
+ @param ninfo: the node to check
+ @param nresult: the remote results for the node
+ @type file_disk_template: string
+ @param file_disk_template: file-based disk template, whose directory
+ is supposed to be verified
+ @type verify_key: string
+ @param verify_key: key for the verification map of this file
+ verification step
+ @param error_key: error key to be added to the verification results
+ in case something goes wrong in this verification step
+
+ """
+ assert (file_disk_template in
+ utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
+ cluster = self.cfg.GetClusterInfo()
+ if cluster.IsDiskTemplateEnabled(file_disk_template):
+ self._ErrorIf(
+ verify_key in nresult,
+ error_key, ninfo.name,
+ "The configured %s storage path is unusable: %s" %
+ (file_disk_template, nresult.get(verify_key)))
+
+ def _VerifyFileStoragePaths(self, ninfo, nresult):
+ """Verifies (file) storage paths.
+
+ @see: C{_VerifyStoragePaths}
+
+ """
+ self._VerifyStoragePaths(
+ ninfo, nresult, constants.DT_FILE,
+ constants.NV_FILE_STORAGE_PATH,
+ constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
+
+ def _VerifySharedFileStoragePaths(self, ninfo, nresult):
+ """Verifies (file) storage paths.
+
+ @see: C{_VerifyStoragePaths}
+
+ """
+ self._VerifyStoragePaths(
+ ninfo, nresult, constants.DT_SHARED_FILE,
+ constants.NV_SHARED_FILE_STORAGE_PATH,
+ constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
+
def _VerifyOob(self, ninfo, nresult):
"""Verifies out of band functionality of a node.
self._VerifyOob(node_i, nresult)
self._VerifyAcceptedFileStoragePaths(node_i, nresult,
node_i.uuid == master_node_uuid)
+ self._VerifyFileStoragePaths(node_i, nresult)
+ self._VerifySharedFileStoragePaths(node_i, nresult)
if nimg.vm_capable:
self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)