from ganeti import pathutils
import qa_config
+import qa_daemon
import qa_utils
import qa_error
+import qa_instance
from qa_utils import AssertEqual, AssertCommand, GetCommandOutput
"""
cmd = utils.ShellQuoteArgs(["cat", filename])
for node in qa_config.get("nodes"):
- AssertEqual(qa_utils.GetCommandOutput(node["primary"], cmd), content)
+ AssertEqual(qa_utils.GetCommandOutput(node.primary, cmd), content)
-# "gnt-cluster info" fields
-_CIFIELD_RE = re.compile(r"^[-\s]*(?P<field>[^\s:]+):\s*(?P<value>\S.*)$")
+def _GetClusterField(field_path):
+ """Get the value of a cluster field.
-
-def _GetBoolClusterField(field):
- """Get the Boolean value of a cluster field.
-
- This function currently assumes that the field name is unique in the cluster
- configuration. An assertion checks this assumption.
-
- @type field: string
- @param field: Name of the field
- @rtype: bool
- @return: The effective value of the field
+ @type field_path: list of strings
+ @param field_path: Names of the groups/fields to navigate to get the desired
+ value, e.g. C{["Default node parameters", "oob_program"]}
+ @return: The effective value of the field (the actual type depends on the
+ chosen field)
"""
- master = qa_config.GetMasterNode()
- infocmd = "gnt-cluster info"
- info_out = qa_utils.GetCommandOutput(master["primary"], infocmd)
- ret = None
- for l in info_out.splitlines():
- m = _CIFIELD_RE.match(l)
- # FIXME: There should be a way to specify a field through a hierarchy
- if m and m.group("field") == field:
- # Make sure that ignoring the hierarchy doesn't cause a double match
- assert ret is None
- ret = (m.group("value").lower() == "true")
- if ret is not None:
- return ret
- raise qa_error.Error("Field not found in cluster configuration: %s" % field)
+ assert isinstance(field_path, list)
+ assert field_path
+ ret = qa_utils.GetObjectInfo(["gnt-cluster", "info"])
+ for key in field_path:
+ ret = ret[key]
+ return ret
# Cluster-verify errors (date, "ERROR", then error code)
-_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- ERROR:([A-Z0-9_-]+):")
+_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- (ERROR|WARNING):([A-Z0-9_-]+):")
def _GetCVErrorCodes(cvout):
- ret = set()
+ errs = set()
+ warns = set()
for l in cvout.splitlines():
m = _CVERROR_RE.match(l)
if m:
- ecode = m.group(1)
- ret.add(ecode)
- return ret
+ etype = m.group(1)
+ ecode = m.group(2)
+ if etype == "ERROR":
+ errs.add(ecode)
+ elif etype == "WARNING":
+ warns.add(ecode)
+ return (errs, warns)
+
+
+def _CheckVerifyErrors(actual, expected, etype):
+ exp_codes = compat.UniqueFrozenset(e for (_, e, _) in expected)
+ if not actual.issuperset(exp_codes):
+ missing = exp_codes.difference(actual)
+ raise qa_error.Error("Cluster-verify didn't return these expected"
+ " %ss: %s" % (etype, utils.CommaJoin(missing)))
-def AssertClusterVerify(fail=False, errors=None):
+def AssertClusterVerify(fail=False, errors=None, warnings=None):
"""Run cluster-verify and check the result
@type fail: bool
@param errors: List of CV_XXX errors that are expected; if specified, all the
errors listed must appear in cluster-verify output. A non-empty value
implies C{fail=True}.
+ @type warnings: list of tuples
+ @param warnings: Same as C{errors} but for warnings.
"""
cvcmd = "gnt-cluster verify"
mnode = qa_config.GetMasterNode()
- if errors:
- cvout = GetCommandOutput(mnode["primary"], cvcmd + " --error-codes",
- fail=True)
- actual = _GetCVErrorCodes(cvout)
- expected = compat.UniqueFrozenset(e for (_, e, _) in errors)
- if not actual.issuperset(expected):
- missing = expected.difference(actual)
- raise qa_error.Error("Cluster-verify didn't return these expected"
- " errors: %s" % utils.CommaJoin(missing))
+ if errors or warnings:
+ cvout = GetCommandOutput(mnode.primary, cvcmd + " --error-codes",
+ fail=(fail or errors))
+ (act_errs, act_warns) = _GetCVErrorCodes(cvout)
+ if errors:
+ _CheckVerifyErrors(act_errs, errors, "error")
+ if warnings:
+ _CheckVerifyErrors(act_warns, warnings, "warning")
else:
AssertCommand(cvcmd, fail=fail, node=mnode)
"""gnt-cluster init"""
master = qa_config.GetMasterNode()
- rapi_dir = os.path.dirname(pathutils.RAPI_USERS_FILE)
+ rapi_users_path = qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE)
+ rapi_dir = os.path.dirname(rapi_users_path)
# First create the RAPI credentials
fh = tempfile.NamedTemporaryFile()
fh.write("%s %s write\n" % (rapi_user, rapi_secret))
fh.flush()
- tmpru = qa_utils.UploadFile(master["primary"], fh.name)
+ tmpru = qa_utils.UploadFile(master.primary, fh.name)
try:
AssertCommand(["mkdir", "-p", rapi_dir])
- AssertCommand(["mv", tmpru, pathutils.RAPI_USERS_FILE])
+ AssertCommand(["mv", tmpru, rapi_users_path])
finally:
AssertCommand(["rm", "-f", tmpru])
finally:
fh.close()
# Initialize cluster
+ enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
cmd = [
"gnt-cluster", "init",
"--primary-ip-version=%d" % qa_config.get("primary_ip_version", 4),
"--enabled-hypervisors=%s" % ",".join(qa_config.GetEnabledHypervisors()),
+ "--enabled-disk-templates=%s" %
+ ",".join(enabled_disk_templates),
]
+ if constants.DT_FILE in enabled_disk_templates:
+ cmd.append(
+ "--file-storage-dir=%s" %
+ qa_config.get("default-file-storage-dir",
+ pathutils.DEFAULT_FILE_STORAGE_DIR))
for spec_type in ("mem-size", "disk-size", "disk-count", "cpu-count",
"nic-count"):
for spec_val in ("min", "max", "std"):
spec = qa_config.get("ispec_%s_%s" %
(spec_type.replace("-", "_"), spec_val), None)
- if spec:
+ if spec is not None:
cmd.append("--specs-%s=%s=%d" % (spec_type, spec_val, spec))
- if master.get("secondary", None):
- cmd.append("--secondary-ip=%s" % master["secondary"])
+ if master.secondary:
+ cmd.append("--secondary-ip=%s" % master.secondary)
- vgname = qa_config.get("vg-name", None)
- if vgname:
- cmd.append("--vg-name=%s" % vgname)
+ if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+ if vgname:
+ cmd.append("--vg-name=%s" % vgname)
+ else:
+ raise qa_error.Error("Please specify a volume group if you enable"
+ " lvm-based disk templates in the QA.")
master_netdev = qa_config.get("master-netdev", None)
if master_netdev:
master = qa_config.GetMasterNode()
# Assert that OOB is unavailable for all nodes
- result_output = GetCommandOutput(master["primary"],
+ result_output = GetCommandOutput(master.primary,
"gnt-node list --verbose --no-headers -o"
" powered")
AssertEqual(compat.all(powered == "(unavail)"
AssertCommand(["gnt-cluster", "epo", "--all", "some_arg"], fail=True)
# Unless --all is given master is not allowed to be in the list
- AssertCommand(["gnt-cluster", "epo", "-f", master["primary"]], fail=True)
+ AssertCommand(["gnt-cluster", "epo", "-f", master.primary], fail=True)
# This shouldn't fail
AssertCommand(["gnt-cluster", "epo", "-f", "--all"])
# All instances should have been stopped now
- result_output = GetCommandOutput(master["primary"],
+ result_output = GetCommandOutput(master.primary,
"gnt-instance list --no-headers -o status")
# ERROR_down because the instance is stopped but not recorded as such
AssertEqual(compat.all(status == "ERROR_down"
AssertCommand(["gnt-cluster", "epo", "--on", "-f", "--all"])
# All instances should have been started now
- result_output = GetCommandOutput(master["primary"],
+ result_output = GetCommandOutput(master.primary,
"gnt-instance list --no-headers -o status")
AssertEqual(compat.all(status == "running"
for status in result_output.splitlines()), True)
AssertCommand(["gnt-cluster", "verify-disks"])
+def TestClusterVerifyDisksBrokenDRBD(instance, inst_nodes):
+ """gnt-cluster verify-disks with broken DRBD"""
+ qa_daemon.TestPauseWatcher()
+
+ try:
+ info = qa_instance.GetInstanceInfo(instance.name)
+ snode = inst_nodes[1]
+ for idx, minor in enumerate(info["drbd-minors"][snode.primary]):
+ if idx % 2 == 0:
+ break_drbd_cmd = \
+ "(drbdsetup %d down >/dev/null 2>&1;" \
+ " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
+ (minor, minor)
+ else:
+ break_drbd_cmd = \
+ "(drbdsetup %d detach >/dev/null 2>&1;" \
+ " drbdsetup detach %d >/dev/null 2>&1) || /bin/true" % \
+ (minor, minor)
+ AssertCommand(break_drbd_cmd, node=snode)
+
+ verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+ "gnt-cluster verify-disks")
+ activation_msg = "Activating disks for instance '%s'" % instance.name
+ if activation_msg not in verify_output:
+ raise qa_error.Error("gnt-cluster verify-disks did not activate broken"
+ " DRBD disks:\n%s" % verify_output)
+
+ verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+ "gnt-cluster verify-disks")
+ if activation_msg in verify_output:
+ raise qa_error.Error("gnt-cluster verify-disks wants to activate broken"
+ " DRBD disks on second attempt:\n%s" % verify_output)
+
+ AssertCommand(_CLUSTER_VERIFY)
+ finally:
+ qa_daemon.TestResumeWatcher()
+
+
def TestJobqueue():
"""gnt-debug test-jobqueue"""
AssertCommand(["gnt-debug", "test-jobqueue"])
AssertCommand(["gnt-debug", "delay", "1"])
AssertCommand(["gnt-debug", "delay", "--no-master", "1"])
AssertCommand(["gnt-debug", "delay", "--no-master",
- "-n", node["primary"], "1"])
+ "-n", node.primary, "1"])
def TestClusterReservedLvs():
"""gnt-cluster reserved lvs"""
+ # if no lvm-based templates are supported, skip the test
+ if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG):
+ return
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
lvname = _QA_LV_PREFIX + "test"
lvfullname = "/".join([vgname, lvname])
AssertCommand(["gnt-cluster", "modify", "-D", param], fail=True)
+def _GetOtherEnabledDiskTemplate(undesired_disk_templates,
+ enabled_disk_templates):
+ """Returns one template that is not in the undesired set.
+
+ @type undesired_disk_templates: list of string
+ @param undesired_disk_templates: a list of disk templates that we want to
+ exclude when drawing one disk template from the list of enabled
+ disk templates
+ @type enabled_disk_templates: list of string
+ @param enabled_disk_templates: list of enabled disk templates (in QA)
+
+ """
+ desired_templates = list(set(enabled_disk_templates)
+ - set(undesired_disk_templates))
+ if desired_templates:
+ template = desired_templates[0]
+ else:
+ # If no desired disk template is available for QA, choose 'diskless' and
+ # hope for the best.
+ template = constants.ST_DISKLESS
+
+ return template
+
+
+def TestClusterModifyFileBasedStorageDir(
+ file_disk_template, dir_config_key, default_dir, option_name):
+ """Tests gnt-cluster modify wrt to file-based directory options.
+
+ @type file_disk_template: string
+ @param file_disk_template: file-based disk template
+ @type dir_config_key: string
+ @param dir_config_key: key for the QA config to retrieve the default
+ directory value
+ @type default_dir: string
+ @param default_dir: default directory, if the QA config does not specify
+ it
+ @type option_name: string
+ @param option_name: name of the option of 'gnt-cluster modify' to
+ change the directory
+
+ """
+ enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
+ assert file_disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]
+ if not qa_config.IsTemplateSupported(file_disk_template):
+ return
+
+ # Get some non-file-based disk template to disable file storage
+ other_disk_template = _GetOtherEnabledDiskTemplate(
+ utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE),
+ enabled_disk_templates)
+
+ file_storage_dir = qa_config.get(dir_config_key, default_dir)
+ invalid_file_storage_dir = "/boot/"
+
+ for fail, cmd in [
+ (False, ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % file_disk_template]),
+ (False, ["gnt-cluster", "modify",
+ "--%s=%s" % (option_name, file_storage_dir)]),
+ (False, ["gnt-cluster", "modify",
+ "--%s=%s" % (option_name, invalid_file_storage_dir)]),
+ # file storage dir is set to an inacceptable path, thus verify
+ # should fail
+ (True, ["gnt-cluster", "verify"]),
+ # unsetting the storage dir while file storage is enabled
+ # should fail
+ (True, ["gnt-cluster", "modify",
+ "--%s=" % option_name]),
+ (False, ["gnt-cluster", "modify",
+ "--%s=%s" % (option_name, file_storage_dir)]),
+ (False, ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % other_disk_template]),
+ (False, ["gnt-cluster", "modify",
+ "--%s=%s" % (option_name, invalid_file_storage_dir)]),
+ # file storage is set to an inacceptable path, but file storage
+ # is disabled, thus verify should not fail
+ (False, ["gnt-cluster", "verify"]),
+ # unsetting the file storage dir while file storage is not enabled
+ # should be fine
+ (False, ["gnt-cluster", "modify",
+ "--%s=" % option_name]),
+ # resetting everything to sane values
+ (False, ["gnt-cluster", "modify",
+ "--%s=%s" % (option_name, file_storage_dir),
+ "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates)])
+ ]:
+ AssertCommand(cmd, fail=fail)
+
+
+def TestClusterModifyFileStorageDir():
+ """gnt-cluster modify --file-storage-dir=..."""
+ TestClusterModifyFileBasedStorageDir(
+ constants.DT_FILE, "default-file-storage-dir",
+ pathutils.DEFAULT_FILE_STORAGE_DIR,
+ "file-storage-dir")
+
+
+def TestClusterModifySharedFileStorageDir():
+ """gnt-cluster modify --shared-file-storage-dir=..."""
+ TestClusterModifyFileBasedStorageDir(
+ constants.DT_SHARED_FILE, "default-shared-file-storage-dir",
+ pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
+ "shared-file-storage-dir")
+
+
+def TestClusterModifyDiskTemplates():
+ """gnt-cluster modify --enabled-disk-templates=..."""
+ enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
+ default_disk_template = qa_config.GetDefaultDiskTemplate()
+
+ _TestClusterModifyDiskTemplatesArguments(default_disk_template,
+ enabled_disk_templates)
+ _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates)
+
+ _RestoreEnabledDiskTemplates()
+ nodes = qa_config.AcquireManyNodes(2)
+
+ instance_template = enabled_disk_templates[0]
+ instance = qa_instance.CreateInstanceByDiskTemplate(nodes, instance_template)
+
+ _TestClusterModifyUnusedDiskTemplate(instance_template)
+ _TestClusterModifyUsedDiskTemplate(instance_template,
+ enabled_disk_templates)
+
+ qa_instance.TestInstanceRemove(instance)
+ _RestoreEnabledDiskTemplates()
+
+
+def _RestoreEnabledDiskTemplates():
+ """Sets the list of enabled disk templates back to the list of enabled disk
+ templates from the QA configuration. This can be used to make sure that
+ the tests that modify the list of disk templates do not interfere with
+ other tests.
+
+ """
+ enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
+ cmd = ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
+ "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates),
+ ]
+
+ if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+ cmd.append("--vg-name=%s" % vgname)
+
+ AssertCommand(cmd, fail=False)
+
+
+def _TestClusterModifyDiskTemplatesArguments(default_disk_template,
+ enabled_disk_templates):
+ """Tests argument handling of 'gnt-cluster modify' with respect to
+ the parameter '--enabled-disk-templates'. This test is independent
+ of instances.
+
+ """
+ _RestoreEnabledDiskTemplates()
+
+ # bogus templates
+ AssertCommand(["gnt-cluster", "modify",
+ "--enabled-disk-templates=pinkbunny"],
+ fail=True)
+
+ # duplicate entries do no harm
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s,%s" %
+ (default_disk_template, default_disk_template),
+ "--ipolicy-disk-templates=%s" % default_disk_template],
+ fail=False)
+
+ if constants.DT_DRBD8 in enabled_disk_templates:
+ # interaction with --drbd-usermode-helper option
+ drbd_usermode_helper = qa_config.get("drbd-usermode-helper", None)
+ if not drbd_usermode_helper:
+ drbd_usermode_helper = "/bin/true"
+ # specifying a helper when drbd gets disabled is ok. Note that drbd still
+ # has to be installed on the nodes in this case
+ AssertCommand(["gnt-cluster", "modify",
+ "--drbd-usermode-helper=%s" % drbd_usermode_helper,
+ "--enabled-disk-templates=%s" % constants.DT_DISKLESS,
+ "--ipolicy-disk-templates=%s" % constants.DT_DISKLESS],
+ fail=False)
+ # specifying a helper when drbd is re-enabled
+ AssertCommand(["gnt-cluster", "modify",
+ "--drbd-usermode-helper=%s" % drbd_usermode_helper,
+ "--enabled-disk-templates=%s" %
+ ",".join(enabled_disk_templates),
+ "--ipolicy-disk-templates=%s" %
+ ",".join(enabled_disk_templates)],
+ fail=False)
+
+
+def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
+ """Tests argument handling of 'gnt-cluster modify' with respect to
+ the parameter '--enabled-disk-templates' and '--vg-name'. This test is
+ independent of instances.
+
+ """
+ if not utils.IsLvmEnabled(enabled_disk_templates):
+ # These tests only make sense if lvm is enabled for QA
+ return
+
+ # determine an LVM and a non-LVM disk template for the tests
+ non_lvm_template = _GetOtherEnabledDiskTemplate(utils.GetLvmDiskTemplates(),
+ enabled_disk_templates)
+ lvm_template = list(set(enabled_disk_templates)
+ .intersection(set(utils.GetLvmDiskTemplates())))[0]
+
+ vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+
+ # Clean start: unset volume group name, disable lvm storage
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % non_lvm_template,
+ "--ipolicy-disk-templates=%s" % non_lvm_template,
+ "--vg-name="],
+ fail=False)
+
+ # Try to enable lvm, when no volume group is given
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--ipolicy-disk-templates=%s" % lvm_template],
+ fail=True)
+
+ # Set volume group, with lvm still disabled: just a warning
+ AssertCommand(["gnt-cluster", "modify", "--vg-name=%s" % vgname], fail=False)
+
+ # Try unsetting vg name and enabling lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--ipolicy-disk-templates=%s" % lvm_template,
+ "--vg-name="],
+ fail=True)
+
+ # Enable lvm with vg name present
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--ipolicy-disk-templates=%s" % lvm_template],
+ fail=False)
+
+ # Try unsetting vg name with lvm still enabled
+ AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=True)
+
+ # Disable lvm with vg name still set
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % non_lvm_template,
+ "--ipolicy-disk-templates=%s" % non_lvm_template,
+ ],
+ fail=False)
+
+ # Try unsetting vg name with lvm disabled
+ AssertCommand(["gnt-cluster", "modify", "--vg-name="], fail=False)
+
+ # Set vg name and enable lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % lvm_template,
+ "--ipolicy-disk-templates=%s" % lvm_template,
+ "--vg-name=%s" % vgname],
+ fail=False)
+
+ # Unset vg name and disable lvm at the same time
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % non_lvm_template,
+ "--ipolicy-disk-templates=%s" % non_lvm_template,
+ "--vg-name="],
+ fail=False)
+
+ _RestoreEnabledDiskTemplates()
+
+
+def _TestClusterModifyUsedDiskTemplate(instance_template,
+ enabled_disk_templates):
+ """Tests that disk templates that are currently in use by instances cannot
+ be disabled on the cluster.
+
+ """
+ # If the list of enabled disk templates contains only one template
+ # we need to add some other templates, because the list of enabled disk
+ # templates can only be set to a non-empty list.
+ new_disk_templates = list(set(enabled_disk_templates)
+ - set([instance_template]))
+ if not new_disk_templates:
+ new_disk_templates = list(set([constants.DT_DISKLESS, constants.DT_BLOCK])
+ - set([instance_template]))
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % ",".join(new_disk_templates),
+ "--ipolicy-disk-templates=%s" % ",".join(new_disk_templates)],
+ fail=True)
+
+
+def _TestClusterModifyUnusedDiskTemplate(instance_template):
+ """Tests that unused disk templates can be disabled safely."""
+ all_disk_templates = constants.DISK_TEMPLATES
+ if not utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
+ all_disk_templates = list(set(all_disk_templates) -
+ set(utils.GetLvmDiskTemplates()))
+
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % ",".join(all_disk_templates),
+ "--ipolicy-disk-templates=%s" % ",".join(all_disk_templates)],
+ fail=False)
+ new_disk_templates = [instance_template]
+ AssertCommand(
+ ["gnt-cluster", "modify",
+ "--enabled-disk-templates=%s" % ",".join(new_disk_templates),
+ "--ipolicy-disk-templates=%s" % ",".join(new_disk_templates)],
+ fail=False)
+
+
def TestClusterModifyBe():
"""gnt-cluster modify -B"""
for fail, cmd in [
AssertCommand(["gnt-cluster", "modify", "-B", bep])
+def _GetClusterIPolicy():
+ """Return the run-time values of the cluster-level instance policy.
+
+ @rtype: tuple
+ @return: (policy, specs), where:
+ - policy is a dictionary of the policy values, instance specs excluded
+ - specs is a dictionary containing only the specs, using the internal
+ format (see L{constants.IPOLICY_DEFAULTS} for an example)
+
+ """
+ info = qa_utils.GetObjectInfo(["gnt-cluster", "info"])
+ policy = info["Instance policy - limits for instances"]
+ (ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy)
+
+ # Sanity checks
+ assert "minmax" in ret_specs and "std" in ret_specs
+ assert len(ret_specs["minmax"]) > 0
+ assert len(ret_policy) > 0
+ return (ret_policy, ret_specs)
+
+
+def TestClusterModifyIPolicy():
+ """gnt-cluster modify --ipolicy-*"""
+ basecmd = ["gnt-cluster", "modify"]
+ (old_policy, old_specs) = _GetClusterIPolicy()
+ for par in ["vcpu-ratio", "spindle-ratio"]:
+ curr_val = float(old_policy[par])
+ test_values = [
+ (True, 1.0),
+ (True, 1.5),
+ (True, 2),
+ (False, "a"),
+ # Restore the old value
+ (True, curr_val),
+ ]
+ for (good, val) in test_values:
+ cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
+ AssertCommand(cmd, fail=not good)
+ if good:
+ curr_val = val
+ # Check the affected parameter
+ (eff_policy, eff_specs) = _GetClusterIPolicy()
+ AssertEqual(float(eff_policy[par]), curr_val)
+ # Check everything else
+ AssertEqual(eff_specs, old_specs)
+ for p in eff_policy.keys():
+ if p == par:
+ continue
+ AssertEqual(eff_policy[p], old_policy[p])
+
+ # Disk templates are treated slightly differently
+ par = "disk-templates"
+ disp_str = "allowed disk templates"
+ curr_val = old_policy[disp_str]
+ test_values = [
+ (True, constants.DT_PLAIN),
+ (True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)),
+ (False, "thisisnotadisktemplate"),
+ (False, ""),
+ # Restore the old value
+ (True, curr_val.replace(" ", "")),
+ ]
+ for (good, val) in test_values:
+ cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
+ AssertCommand(cmd, fail=not good)
+ if good:
+ curr_val = val
+ # Check the affected parameter
+ (eff_policy, eff_specs) = _GetClusterIPolicy()
+ AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val)
+ # Check everything else
+ AssertEqual(eff_specs, old_specs)
+ for p in eff_policy.keys():
+ if p == disp_str:
+ continue
+ AssertEqual(eff_policy[p], old_policy[p])
+
+
+def TestClusterSetISpecs(new_specs=None, diff_specs=None, fail=False,
+ old_values=None):
+ """Change instance specs.
+
+ At most one of new_specs or diff_specs can be specified.
+
+ @type new_specs: dict
+ @param new_specs: new complete specs, in the same format returned by
+ L{_GetClusterIPolicy}
+ @type diff_specs: dict
+ @param diff_specs: partial specs, it can be an incomplete specifications, but
+ if min/max specs are specified, their number must match the number of the
+ existing specs
+ @type fail: bool
+ @param fail: if the change is expected to fail
+ @type old_values: tuple
+ @param old_values: (old_policy, old_specs), as returned by
+ L{_GetClusterIPolicy}
+ @return: same as L{_GetClusterIPolicy}
+
+ """
+ build_cmd = lambda opts: ["gnt-cluster", "modify"] + opts
+ return qa_utils.TestSetISpecs(
+ new_specs=new_specs, diff_specs=diff_specs,
+ get_policy_fn=_GetClusterIPolicy, build_cmd_fn=build_cmd,
+ fail=fail, old_values=old_values)
+
+
+def TestClusterModifyISpecs():
+ """gnt-cluster modify --specs-*"""
+ params = ["memory-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
+ (cur_policy, cur_specs) = _GetClusterIPolicy()
+ # This test assumes that there is only one min/max bound
+ assert len(cur_specs[constants.ISPECS_MINMAX]) == 1
+ for par in params:
+ test_values = [
+ (True, 0, 4, 12),
+ (True, 4, 4, 12),
+ (True, 4, 12, 12),
+ (True, 4, 4, 4),
+ (False, 4, 0, 12),
+ (False, 4, 16, 12),
+ (False, 4, 4, 0),
+ (False, 12, 4, 4),
+ (False, 12, 4, 0),
+ (False, "a", 4, 12),
+ (False, 0, "a", 12),
+ (False, 0, 4, "a"),
+ # This is to restore the old values
+ (True,
+ cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MIN][par],
+ cur_specs[constants.ISPECS_STD][par],
+ cur_specs[constants.ISPECS_MINMAX][0][constants.ISPECS_MAX][par])
+ ]
+ for (good, mn, st, mx) in test_values:
+ new_vals = {
+ constants.ISPECS_MINMAX: [{
+ constants.ISPECS_MIN: {par: mn},
+ constants.ISPECS_MAX: {par: mx}
+ }],
+ constants.ISPECS_STD: {par: st}
+ }
+ cur_state = (cur_policy, cur_specs)
+ # We update cur_specs, as we've copied the values to restore already
+ (cur_policy, cur_specs) = TestClusterSetISpecs(
+ diff_specs=new_vals, fail=not good, old_values=cur_state)
+
+ # Get the ipolicy command
+ mnode = qa_config.GetMasterNode()
+ initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
+ modcmd = ["gnt-cluster", "modify"]
+ opts = initcmd.split()
+ assert opts[0:2] == ["gnt-cluster", "init"]
+ for k in range(2, len(opts) - 1):
+ if opts[k].startswith("--ipolicy-"):
+ assert k + 2 <= len(opts)
+ modcmd.extend(opts[k:k + 2])
+ # Re-apply the ipolicy (this should be a no-op)
+ AssertCommand(modcmd)
+ new_initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd")
+ AssertEqual(initcmd, new_initcmd)
+
+
def TestClusterInfo():
"""gnt-cluster info"""
AssertCommand(["gnt-cluster", "info"])
"--rapi-certificate=/dev/null"]
AssertCommand(cmd, fail=True)
- rapi_cert_backup = qa_utils.BackupFile(master["primary"],
+ rapi_cert_backup = qa_utils.BackupFile(master.primary,
pathutils.RAPI_CERT_FILE)
try:
# Custom RAPI certificate
utils.GenerateSelfSignedSslCert(fh.name, validity=validity)
- tmpcert = qa_utils.UploadFile(master["primary"], fh.name)
+ tmpcert = qa_utils.UploadFile(master.primary, fh.name)
try:
AssertCommand(["gnt-cluster", "renew-crypto", "--force",
"--rapi-certificate=%s" % tmpcert])
cds_fh.write("\n")
cds_fh.flush()
- tmpcds = qa_utils.UploadFile(master["primary"], cds_fh.name)
+ tmpcds = qa_utils.UploadFile(master.primary, cds_fh.name)
try:
AssertCommand(["gnt-cluster", "renew-crypto", "--force",
"--cluster-domain-secret=%s" % tmpcds])
master = qa_config.GetMasterNode()
options = qa_config.get("options", {})
- disk_template = options.get("burnin-disk-template", "drbd")
+ disk_template = options.get("burnin-disk-template", constants.DT_DRBD8)
parallel = options.get("burnin-in-parallel", False)
check_inst = options.get("burnin-check-instances", False)
do_rename = options.get("burnin-rename", "")
if len(instances) < 1:
raise qa_error.Error("Burnin needs at least one instance")
- script = qa_utils.UploadFile(master["primary"], "../tools/burnin")
+ script = qa_utils.UploadFile(master.primary, "../tools/burnin")
try:
+ disks = qa_config.GetDiskOptions()
# Run burnin
cmd = [script,
"--os=%s" % qa_config.get("os"),
"--minmem-size=%s" % qa_config.get(constants.BE_MINMEM),
"--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM),
- "--disk-size=%s" % ",".join(qa_config.get("disk")),
- "--disk-growth=%s" % ",".join(qa_config.get("disk-growth")),
+ "--disk-size=%s" % ",".join([d.get("size") for d in disks]),
+ "--disk-growth=%s" % ",".join([d.get("growth") for d in disks]),
"--disk-template=%s" % disk_template]
if parallel:
cmd.append("--parallel")
cmd.append("--no-reboot")
else:
cmd.append("--reboot-types=%s" % ",".join(reboot_types))
- cmd += [inst["name"] for inst in instances]
+ cmd += [inst.name for inst in instances]
AssertCommand(cmd)
finally:
AssertCommand(["rm", "-f", script])
finally:
for inst in instances:
- qa_config.ReleaseInstance(inst)
+ inst.Release()
def TestClusterMasterFailover():
# Back to original master node
AssertCommand(cmd, node=master)
finally:
- qa_config.ReleaseNode(failovermaster)
+ failovermaster.Release()
+
+
+def _NodeQueueDrainFile(node):
+ """Returns path to queue drain file for a node.
+
+ """
+ return qa_utils.MakeNodePath(node, pathutils.JOB_QUEUE_DRAIN_FILE)
+
+
+def _AssertDrainFile(node, **kwargs):
+ """Checks for the queue drain file.
+
+ """
+ AssertCommand(["test", "-f", _NodeQueueDrainFile(node)], node=node, **kwargs)
def TestClusterMasterFailoverWithDrainedQueue():
"""gnt-cluster master-failover with drained queue"""
- drain_check = ["test", "-f", pathutils.JOB_QUEUE_DRAIN_FILE]
-
master = qa_config.GetMasterNode()
failovermaster = qa_config.AcquireNode(exclude=master)
# Ensure queue is not drained
for node in [master, failovermaster]:
- AssertCommand(drain_check, node=node, fail=True)
+ _AssertDrainFile(node, fail=True)
# Drain queue on failover master
- AssertCommand(["touch", pathutils.JOB_QUEUE_DRAIN_FILE], node=failovermaster)
+ AssertCommand(["touch", _NodeQueueDrainFile(failovermaster)],
+ node=failovermaster)
cmd = ["gnt-cluster", "master-failover"]
try:
- AssertCommand(drain_check, node=failovermaster)
+ _AssertDrainFile(failovermaster)
AssertCommand(cmd, node=failovermaster)
- AssertCommand(drain_check, fail=True)
- AssertCommand(drain_check, node=failovermaster, fail=True)
+ _AssertDrainFile(master, fail=True)
+ _AssertDrainFile(failovermaster, fail=True)
# Back to original master node
AssertCommand(cmd, node=master)
finally:
- qa_config.ReleaseNode(failovermaster)
+ failovermaster.Release()
- AssertCommand(drain_check, fail=True)
- AssertCommand(drain_check, node=failovermaster, fail=True)
+ # Ensure queue is not drained
+ for node in [master, failovermaster]:
+ _AssertDrainFile(node, fail=True)
def TestClusterCopyfile():
f.seek(0)
# Upload file to master node
- testname = qa_utils.UploadFile(master["primary"], f.name)
+ testname = qa_utils.UploadFile(master.primary, f.name)
try:
# Copy file to all nodes
AssertCommand(["gnt-cluster", "copyfile", testname])
@return: The old value of exclusive_storage
"""
- oldvalue = _GetBoolClusterField("exclusive_storage")
+ es_path = ["Default node parameters", "exclusive_storage"]
+ oldvalue = _GetClusterField(es_path)
AssertCommand(["gnt-cluster", "modify", "--node-parameters",
"exclusive_storage=%s" % newvalue])
- effvalue = _GetBoolClusterField("exclusive_storage")
+ effvalue = _GetClusterField(es_path)
if effvalue != newvalue:
raise qa_error.Error("exclusive_storage has the wrong value: %s instead"
" of %s" % (effvalue, newvalue))
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
lvname1 = _QA_LV_PREFIX + "vol1"
lvname2 = _QA_LV_PREFIX + "vol2"
- node_name = node["primary"]
+ node_name = node.primary
AssertCommand(["lvcreate", "-L1G", "-n", lvname1, vgname], node=node_name)
AssertClusterVerify(fail=True, errors=[constants.CV_ENODEORPHANLV])
AssertCommand(["lvcreate", "-L1G", "-n", lvname2, vgname], node=node_name)