if master.secondary:
cmd.append("--secondary-ip=%s" % master.secondary)
- vgname = qa_config.get("vg-name", None)
- if vgname:
- cmd.append("--vg-name=%s" % vgname)
+ if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+ if vgname:
+ cmd.append("--vg-name=%s" % vgname)
+ else:
+ raise qa_error.Error("Please specify a volume group if you enable"
+ " lvm-based disk templates in the QA.")
master_netdev = qa_config.get("master-netdev", None)
if master_netdev:
AssertCommand(["gnt-cluster", "verify-disks"])
+# pylint: disable=W0613
+def TestClusterVerifyDisksBrokenDRBD(instance, inst_nodes):
+ """gnt-cluster verify-disks with broken DRBD"""
+ pass
+
+# FIXME (thomasth): reenable once it works (see issue 516!)
+# qa_daemon.TestPauseWatcher()
+#
+# try:
+# info = qa_instance.GetInstanceInfo(instance.name)
+# snode = inst_nodes[1]
+# for idx, minor in enumerate(info["drbd-minors"][snode.primary]):
+# if idx % 2 == 0:
+# break_drbd_cmd = \
+# "(drbdsetup %d down >/dev/null 2>&1;" \
+# " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
+# (minor, minor)
+# else:
+# break_drbd_cmd = \
+# "(drbdsetup %d detach >/dev/null 2>&1;" \
+# " drbdsetup detach %d >/dev/null 2>&1) || /bin/true" % \
+# (minor, minor)
+# AssertCommand(break_drbd_cmd, node=snode)
+#
+# verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+# "gnt-cluster verify-disks")
+# activation_msg = "Activating disks for instance '%s'" % instance.name
+# if activation_msg not in verify_output:
+# raise qa_error.Error("gnt-cluster verify-disks did not activate broken"
+# " DRBD disks:\n%s" % verify_output)
+#
+# verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+# "gnt-cluster verify-disks")
+# if activation_msg in verify_output:
+# raise qa_error.Error("gnt-cluster verify-disks wants to activate broken"
+# " DRBD disks on second attempt:\n%s" % verify_output)
+#
+# AssertCommand(_CLUSTER_VERIFY)
+# finally:
+# qa_daemon.TestResumeWatcher()
+
+
def TestJobqueue():
"""gnt-debug test-jobqueue"""
AssertCommand(["gnt-debug", "test-jobqueue"])
_TestClusterModifyDiskTemplatesArguments(default_disk_template,
enabled_disk_templates)
+ _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates)
_RestoreEnabledDiskTemplates()
nodes = qa_config.AcquireManyNodes(2)
other tests.
"""
- AssertCommand(
- ["gnt-cluster", "modify",
- "--enabled-disk-template=%s" %
- ",".join(qa_config.GetEnabledDiskTemplates())],
- fail=False)
+ cmd = ["gnt-cluster", "modify", "--enabled-disk-templates=%s" %
+ ",".join(qa_config.GetEnabledDiskTemplates())]
+
+ if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+ cmd.append("--vg-name=%s" % vgname)
+
+ AssertCommand(cmd, fail=False)
def _TestClusterModifyDiskTemplatesArguments(default_disk_template,
of instances.
"""
- AssertCommand(
- ["gnt-cluster", "modify",
- "--enabled-disk-template=%s" %
- ",".join(enabled_disk_templates)],
- fail=False)
+ _RestoreEnabledDiskTemplates()
# bogus templates
AssertCommand(["gnt-cluster", "modify",
(default_disk_template, default_disk_template)],
fail=False)
+ if constants.DT_DRBD8 in enabled_disk_templates:
+ # interaction with --drbd-usermode-helper option
+ drbd_usermode_helper = qa_config.get("drbd-usermode-helper", None)
+ if not drbd_usermode_helper:
+ drbd_usermode_helper = "/bin/true"
+ # specifying a helper when drbd gets disabled is ok. Note that drbd still
+ # has to be installed on the nodes in this case
+ AssertCommand(["gnt-cluster", "modify",
+ "--drbd-usermode-helper=%s" % drbd_usermode_helper,
+ "--enabled-disk-templates=%s" % constants.DT_DISKLESS],
+ fail=False)
+ # specifying a helper when drbd is re-enabled
+ AssertCommand(["gnt-cluster", "modify",
+ "--drbd-usermode-helper=%s" % drbd_usermode_helper,
+ "--enabled-disk-templates=%s" %
+ ",".join(enabled_disk_templates)],
+ fail=False)
+
+
+def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
+ """Tests argument handling of 'gnt-cluster modify' with respect to
+ the parameter '--enabled-disk-templates' and '--vg-name'. This test is
+ independent of instances.
+
+ """
+ if not utils.IsLvmEnabled(enabled_disk_templates):
+ # These tests only make sense if lvm is enabled for QA
+ return
+
+ # determine an LVM and a non-LVM disk template for the tests
+ non_lvm_templates = list(set(enabled_disk_templates)
+ - set(utils.GetLvmDiskTemplates()))
+ lvm_template = list(set(enabled_disk_templates)
+ .intersection(set(utils.GetLvmDiskTemplates())))[0]
+ non_lvm_template = None
+ if non_lvm_templates:
+ non_lvm_template = non_lvm_templates[0]
+ else:
+ # If no non-lvm disk template is available for QA, choose 'diskless' and
+ # hope for the best.
+ non_lvm_template = constants.ST_DISKLESS
+
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+
+ # Clean start: unset volume group name, disable lvm storage
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % non_lvm_template,
+ "--vg-name="],
+ fail=False)
+
+ # Try to enable lvm, when no volume group is given
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template],
+ fail=True)
+
+ # Set volume group, with lvm still disabled: just a warning
+ AssertCommand(["gnt-cluster", "modify", "--vg-name=%s" % vgname], fail=False)
+
+ # Try unsetting vg name and enabling lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--vg-name="],
+ fail=True)
+
+ # Enable lvm with vg name present
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template],
+ fail=False)
+
+ # Try unsetting vg name with lvm still enabled
+ AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=True)
+
+ # Disable lvm with vg name still set
+ AssertCommand(
+ ["gnt-cluster", "modify", "--enabled-disk-templates=%s" % non_lvm_template],
+ fail=False)
+
+ # Try unsetting vg name with lvm disabled
+ AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=False)
+
+ # Set vg name and enable lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--vg-name=%s" % vgname],
+ fail=False)
+
+ # Unset vg name and disable lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % non_lvm_template,
+ "--vg-name="],
+ fail=False)
+
+ _RestoreEnabledDiskTemplates()
+
def _TestClusterModifyUsedDiskTemplate(instance_template,
enabled_disk_templates):
new_disk_templates = list(set(enabled_disk_templates)
- set([instance_template]))
if not new_disk_templates:
- new_disk_templates = list(set(constants.DISK_TEMPLATES)
+ new_disk_templates = list(set([constants.DT_DISKLESS, constants.DT_BLOCK])
- set([instance_template]))
AssertCommand(
["gnt-cluster", "modify",
def _TestClusterModifyUnusedDiskTemplate(instance_template):
"""Tests that unused disk templates can be disabled safely."""
all_disk_templates = constants.DISK_TEMPLATES
+ if not utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ all_disk_templates = list(set(all_disk_templates) -
+ set(utils.GetLvmDiskTemplates()))
+
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
@rtype: tuple
@return: (policy, specs), where:
- policy is a dictionary of the policy values, instance specs excluded
- - specs is dict of dict, specs[par][key] is a spec value, where key is
- "min", "max", or "std"
+ - specs is a dictionary containing only the specs, using the internal
+ format (see L{constants.IPOLICY_DEFAULTS} for an example)
"""
info = qa_utils.GetObjectInfo(["gnt-cluster", "info"])
policy = info["Instance policy - limits for instances"]
- ret_specs = {}
- ret_policy = {}
- ispec_keys = constants.ISPECS_MINMAX_KEYS | frozenset([constants.ISPECS_STD])
- for (key, val) in policy.items():
- if key in ispec_keys:
- for (par, pval) in val.items():
- if par == "memory-size":
- par = "mem-size"
- d = ret_specs.setdefault(par, {})
- d[key] = pval
- else:
- ret_policy[key] = val
+ (ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy)
# Sanity checks
- assert len(ret_specs) > 0
- good = all("min" in d and "std" in d and "max" in d
- for d in ret_specs.values())
- assert good, "Missing item in specs: %s" % ret_specs
+ assert "minmax" in ret_specs and "std" in ret_specs
+ assert len(ret_specs["minmax"]) > 0
assert len(ret_policy) > 0
return (ret_policy, ret_specs)
# Disk templates are treated slightly differently
par = "disk-templates"
- disp_str = "enabled disk templates"
+ disp_str = "allowed disk templates"
curr_val = old_policy[disp_str]
test_values = [
(True, constants.DT_PLAIN),
AssertEqual(eff_policy[p], old_policy[p])
-def TestClusterSetISpecs(new_specs, fail=False, old_values=None):
+def TestClusterSetISpecs(new_specs=None, diff_specs=None, fail=False,
+ old_values=None):
"""Change instance specs.
- @type new_specs: dict of dict
- @param new_specs: new_specs[par][key], where key is "min", "max", "std". It
- can be an empty dictionary.
+ At most one of new_specs or diff_specs can be specified.
+
+ @type new_specs: dict
+ @param new_specs: new complete specs, in the same format returned by
+ L{_GetClusterIPolicy}
+ @type diff_specs: dict
+ @param diff_specs: partial specs, it can be an incomplete specifications, but
+ if min/max specs are specified, their number must match the number of the
+ existing specs
@type fail: bool
@param fail: if the change is expected to fail
@type old_values: tuple
@param old_values: (old_policy, old_specs), as returned by
- L{_GetClusterIPolicy}
+ L{_GetClusterIPolicy}
@return: same as L{_GetClusterIPolicy}
"""
- if old_values:
- (old_policy, old_specs) = old_values
- else:
- (old_policy, old_specs) = _GetClusterIPolicy()
- if new_specs:
- cmd = ["gnt-cluster", "modify"]
- for (par, keyvals) in new_specs.items():
- if par == "spindle-use":
- # ignore spindle-use, which is not settable
- continue
- cmd += [
- "--specs-%s" % par,
- ",".join(["%s=%s" % (k, v) for (k, v) in keyvals.items()]),
- ]
- AssertCommand(cmd, fail=fail)
- # Check the new state
- (eff_policy, eff_specs) = _GetClusterIPolicy()
- AssertEqual(eff_policy, old_policy)
- if fail:
- AssertEqual(eff_specs, old_specs)
- else:
- for par in eff_specs:
- for key in eff_specs[par]:
- if par in new_specs and key in new_specs[par]:
- AssertEqual(int(eff_specs[par][key]), int(new_specs[par][key]))
- else:
- AssertEqual(int(eff_specs[par][key]), int(old_specs[par][key]))
- return (eff_policy, eff_specs)
+ build_cmd = lambda opts: ["gnt-cluster", "modify"] + opts
+ return qa_utils.TestSetISpecs(
+ new_specs=new_specs, diff_specs=diff_specs,
+ get_policy_fn=_GetClusterIPolicy, build_cmd_fn=build_cmd,
+ fail=fail, old_values=old_values)
def TestClusterModifyISpecs():
"""gnt-cluster modify --specs-*"""
- params = ["mem-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
+ params = ["memory-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
(cur_policy, cur_specs) = _GetClusterIPolicy()
+ # This test assumes that there is only one min/max bound
+ assert len(cur_specs[constants.ISPECS_MINMAX]) == 1
for par in params:
test_values = [
(True, 0, 4, 12),
(False, 0, 4, "a"),
# This is to restore the old values
(True,
- cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"])
+ cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MIN][par],
+ cur_specs[constants.ISPECS_STD][par],
+ cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MAX][par])
]
for (good, mn, st, mx) in test_values:
- new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}}
+ new_vals = {
+ constants.ISPECS_MINMAX: [{
+ constants.ISPECS_MIN: {par: mn},
+ constants.ISPECS_MAX: {par: mx}
+ }],
+ constants.ISPECS_STD: {par: st}
+ }
cur_state = (cur_policy, cur_specs)
# We update cur_specs, as we've copied the values to restore already
- (cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good,
- old_values=cur_state)
+ (cur_policy, cur_specs) = TestClusterSetISpecs(
+ diff_specs=new_vals, fail=not good, old_values=cur_state)
# Get the ipolicy command
mnode = qa_config.GetMasterNode()