X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/3e8b5a9cef576dbdab939651359f738fd5ad0ec0..9c265dd7fc7fd55a844d71bbc5483682b56a3b91:/qa/qa_cluster.py diff --git a/qa/qa_cluster.py b/qa/qa_cluster.py index 553c6ff..f532826 100644 --- a/qa/qa_cluster.py +++ b/qa/qa_cluster.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2007, 2010, 2011 Google Inc. +# Copyright (C) 2007, 2010, 2011, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,20 +23,30 @@ """ +import re import tempfile import os.path from ganeti import constants from ganeti import compat from ganeti import utils +from ganeti import pathutils import qa_config import qa_utils import qa_error +import qa_instance from qa_utils import AssertEqual, AssertCommand, GetCommandOutput +# Prefix for LVM volumes created by QA code during tests +_QA_LV_PREFIX = "qa-" + +#: cluster verify command +_CLUSTER_VERIFY = ["gnt-cluster", "verify"] + + def _RemoveFileFromAllNodes(filename): """Removes a file from all nodes. @@ -51,14 +61,101 @@ def _CheckFileOnAllNodes(filename, content): """ cmd = utils.ShellQuoteArgs(["cat", filename]) for node in qa_config.get("nodes"): - AssertEqual(qa_utils.GetCommandOutput(node["primary"], cmd), content) + AssertEqual(qa_utils.GetCommandOutput(node.primary, cmd), content) + + +def _GetClusterField(field_path): + """Get the value of a cluster field. + + @type field_path: list of strings + @param field_path: Names of the groups/fields to navigate to get the desired + value, e.g. C{["Default node parameters", "oob_program"]} + @return: The effective value of the field (the actual type depends on the + chosen field) + + """ + assert isinstance(field_path, list) + assert field_path + ret = qa_utils.GetObjectInfo(["gnt-cluster", "info"]) + for key in field_path: + ret = ret[key] + return ret + + +# Cluster-verify errors (date, "ERROR", then error code) +_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- (ERROR|WARNING):([A-Z0-9_-]+):") + + +def _GetCVErrorCodes(cvout): + errs = set() + warns = set() + for l in cvout.splitlines(): + m = _CVERROR_RE.match(l) + if m: + etype = m.group(1) + ecode = m.group(2) + if etype == "ERROR": + errs.add(ecode) + elif etype == "WARNING": + warns.add(ecode) + return (errs, warns) + + +def _CheckVerifyErrors(actual, expected, etype): + exp_codes = compat.UniqueFrozenset(e for (_, e, _) in expected) + if not actual.issuperset(exp_codes): + missing = exp_codes.difference(actual) + raise qa_error.Error("Cluster-verify didn't return these expected" + " %ss: %s" % (etype, utils.CommaJoin(missing))) + + +def AssertClusterVerify(fail=False, errors=None, warnings=None): + """Run cluster-verify and check the result + + @type fail: bool + @param fail: if cluster-verify is expected to fail instead of succeeding + @type errors: list of tuples + @param errors: List of CV_XXX errors that are expected; if specified, all the + errors listed must appear in cluster-verify output. A non-empty value + implies C{fail=True}. + @type warnings: list of tuples + @param warnings: Same as C{errors} but for warnings. + + """ + cvcmd = "gnt-cluster verify" + mnode = qa_config.GetMasterNode() + if errors or warnings: + cvout = GetCommandOutput(mnode.primary, cvcmd + " --error-codes", + fail=(fail or errors)) + (act_errs, act_warns) = _GetCVErrorCodes(cvout) + if errors: + _CheckVerifyErrors(act_errs, errors, "error") + if warnings: + _CheckVerifyErrors(act_warns, warnings, "warning") + else: + AssertCommand(cvcmd, fail=fail, node=mnode) + + +# data for testing failures due to bad keys/values for disk parameters +_FAIL_PARAMS = ["nonexistent:resync-rate=1", + "drbd:nonexistent=1", + "drbd:resync-rate=invalid", + ] + + +def TestClusterInitDisk(): + """gnt-cluster init -D""" + name = qa_config.get("name") + for param in _FAIL_PARAMS: + AssertCommand(["gnt-cluster", "init", "-D", param, name], fail=True) def TestClusterInit(rapi_user, rapi_secret): """gnt-cluster init""" master = qa_config.GetMasterNode() - rapi_dir = os.path.dirname(constants.RAPI_USERS_FILE) + rapi_users_path = qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE) + rapi_dir = os.path.dirname(rapi_users_path) # First create the RAPI credentials fh = tempfile.NamedTemporaryFile() @@ -66,55 +163,105 @@ def TestClusterInit(rapi_user, rapi_secret): fh.write("%s %s write\n" % (rapi_user, rapi_secret)) fh.flush() - tmpru = qa_utils.UploadFile(master["primary"], fh.name) + tmpru = qa_utils.UploadFile(master.primary, fh.name) try: AssertCommand(["mkdir", "-p", rapi_dir]) - AssertCommand(["mv", tmpru, constants.RAPI_USERS_FILE]) + AssertCommand(["mv", tmpru, rapi_users_path]) finally: AssertCommand(["rm", "-f", tmpru]) finally: fh.close() # Initialize cluster - cmd = ['gnt-cluster', 'init'] + cmd = [ + "gnt-cluster", "init", + "--primary-ip-version=%d" % qa_config.get("primary_ip_version", 4), + "--enabled-hypervisors=%s" % ",".join(qa_config.GetEnabledHypervisors()), + "--enabled-disk-templates=%s" % + ",".join(qa_config.GetEnabledDiskTemplates()) + ] + + for spec_type in ("mem-size", "disk-size", "disk-count", "cpu-count", + "nic-count"): + for spec_val in ("min", "max", "std"): + spec = qa_config.get("ispec_%s_%s" % + (spec_type.replace("-", "_"), spec_val), None) + if spec is not None: + cmd.append("--specs-%s=%s=%d" % (spec_type, spec_val, spec)) + + if master.secondary: + cmd.append("--secondary-ip=%s" % master.secondary) - cmd.append("--primary-ip-version=%d" % - qa_config.get("primary_ip_version", 4)) + vgname = qa_config.get("vg-name", None) + if vgname: + cmd.append("--vg-name=%s" % vgname) - if master.get('secondary', None): - cmd.append('--secondary-ip=%s' % master['secondary']) + master_netdev = qa_config.get("master-netdev", None) + if master_netdev: + cmd.append("--master-netdev=%s" % master_netdev) - bridge = qa_config.get('bridge', None) - if bridge: - cmd.append('--bridge=%s' % bridge) - cmd.append('--master-netdev=%s' % bridge) + nicparams = qa_config.get("default-nicparams", None) + if nicparams: + cmd.append("--nic-parameters=%s" % + ",".join(utils.FormatKeyValue(nicparams))) - htype = qa_config.get('enabled-hypervisors', None) - if htype: - cmd.append('--enabled-hypervisors=%s' % htype) + # Cluster value of the exclusive-storage node parameter + e_s = qa_config.get("exclusive-storage") + if e_s is not None: + cmd.extend(["--node-parameters", "exclusive_storage=%s" % e_s]) + else: + e_s = False + qa_config.SetExclusiveStorage(e_s) - cmd.append(qa_config.get('name')) + extra_args = qa_config.get("cluster-init-args") + if extra_args: + cmd.extend(extra_args) + + cmd.append(qa_config.get("name")) AssertCommand(cmd) + cmd = ["gnt-cluster", "modify"] + + # hypervisor parameter modifications + hvp = qa_config.get("hypervisor-parameters", {}) + for k, v in hvp.items(): + cmd.extend(["-H", "%s:%s" % (k, v)]) + # backend parameter modifications + bep = qa_config.get("backend-parameters", "") + if bep: + cmd.extend(["-B", bep]) + + if len(cmd) > 2: + AssertCommand(cmd) + + # OS parameters + osp = qa_config.get("os-parameters", {}) + for k, v in osp.items(): + AssertCommand(["gnt-os", "modify", "-O", v, k]) + + # OS hypervisor parameters + os_hvp = qa_config.get("os-hvp", {}) + for os_name in os_hvp: + for hv, hvp in os_hvp[os_name].items(): + AssertCommand(["gnt-os", "modify", "-H", "%s:%s" % (hv, hvp), os_name]) + def TestClusterRename(): """gnt-cluster rename""" - cmd = ['gnt-cluster', 'rename', '-f'] + cmd = ["gnt-cluster", "rename", "-f"] - original_name = qa_config.get('name') - rename_target = qa_config.get('rename', None) + original_name = qa_config.get("name") + rename_target = qa_config.get("rename", None) if rename_target is None: print qa_utils.FormatError('"rename" entry is missing') return - cmd_verify = ['gnt-cluster', 'verify'] - for data in [ cmd + [rename_target], - cmd_verify, + _CLUSTER_VERIFY, cmd + [original_name], - cmd_verify, + _CLUSTER_VERIFY, ]: AssertCommand(data) @@ -123,12 +270,12 @@ def TestClusterOob(): """out-of-band framework""" oob_path_exists = "/tmp/ganeti-qa-oob-does-exist-%s" % utils.NewUUID() - AssertCommand(["gnt-cluster", "verify"]) + AssertCommand(_CLUSTER_VERIFY) AssertCommand(["gnt-cluster", "modify", "--node-parameters", "oob_program=/tmp/ganeti-qa-oob-does-not-exist-%s" % utils.NewUUID()]) - AssertCommand(["gnt-cluster", "verify"], fail=True) + AssertCommand(_CLUSTER_VERIFY, fail=True) AssertCommand(["touch", oob_path_exists]) AssertCommand(["chmod", "0400", oob_path_exists]) @@ -138,12 +285,12 @@ def TestClusterOob(): AssertCommand(["gnt-cluster", "modify", "--node-parameters", "oob_program=%s" % oob_path_exists]) - AssertCommand(["gnt-cluster", "verify"], fail=True) + AssertCommand(_CLUSTER_VERIFY, fail=True) AssertCommand(["chmod", "0500", oob_path_exists]) AssertCommand(["gnt-cluster", "copyfile", oob_path_exists]) - AssertCommand(["gnt-cluster", "verify"]) + AssertCommand(_CLUSTER_VERIFY) finally: AssertCommand(["gnt-cluster", "command", "rm", oob_path_exists]) @@ -156,8 +303,8 @@ def TestClusterEpo(): master = qa_config.GetMasterNode() # Assert that OOB is unavailable for all nodes - result_output = GetCommandOutput(master["primary"], - "gnt-node list --verbose --no-header -o" + result_output = GetCommandOutput(master.primary, + "gnt-node list --verbose --no-headers -o" " powered") AssertEqual(compat.all(powered == "(unavail)" for powered in result_output.splitlines()), True) @@ -168,30 +315,31 @@ def TestClusterEpo(): AssertCommand(["gnt-cluster", "epo", "--all", "some_arg"], fail=True) # Unless --all is given master is not allowed to be in the list - AssertCommand(["gnt-cluster", "epo", "-f", master["primary"]], fail=True) + AssertCommand(["gnt-cluster", "epo", "-f", master.primary], fail=True) # This shouldn't fail AssertCommand(["gnt-cluster", "epo", "-f", "--all"]) # All instances should have been stopped now - result_output = GetCommandOutput(master["primary"], - "gnt-instance list --no-header -o status") - AssertEqual(compat.all(status == "ADMIN_down" + result_output = GetCommandOutput(master.primary, + "gnt-instance list --no-headers -o status") + # ERROR_down because the instance is stopped but not recorded as such + AssertEqual(compat.all(status == "ERROR_down" for status in result_output.splitlines()), True) # Now start everything again AssertCommand(["gnt-cluster", "epo", "--on", "-f", "--all"]) # All instances should have been started now - result_output = GetCommandOutput(master["primary"], - "gnt-instance list --no-header -o status") + result_output = GetCommandOutput(master.primary, + "gnt-instance list --no-headers -o status") AssertEqual(compat.all(status == "running" for status in result_output.splitlines()), True) def TestClusterVerify(): """gnt-cluster verify""" - AssertCommand(["gnt-cluster", "verify"]) + AssertCommand(_CLUSTER_VERIFY) AssertCommand(["gnt-cluster", "verify-disks"]) @@ -200,51 +348,328 @@ def TestJobqueue(): AssertCommand(["gnt-debug", "test-jobqueue"]) +def TestDelay(node): + """gnt-debug delay""" + AssertCommand(["gnt-debug", "delay", "1"]) + AssertCommand(["gnt-debug", "delay", "--no-master", "1"]) + AssertCommand(["gnt-debug", "delay", "--no-master", + "-n", node.primary, "1"]) + + def TestClusterReservedLvs(): """gnt-cluster reserved lvs""" - CVERIFY = ["gnt-cluster", "verify"] + vgname = qa_config.get("vg-name", constants.DEFAULT_VG) + lvname = _QA_LV_PREFIX + "test" + lvfullname = "/".join([vgname, lvname]) for fail, cmd in [ - (False, CVERIFY), + (False, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), - (False, ["lvcreate", "-L1G", "-nqa-test", "xenvg"]), - (True, CVERIFY), + (False, ["lvcreate", "-L1G", "-n", lvname, vgname]), + (True, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", - "xenvg/qa-test,.*/other-test"]), - (False, CVERIFY), - (False, ["gnt-cluster", "modify", "--reserved-lvs", ".*/qa-.*"]), - (False, CVERIFY), + "%s,.*/other-test" % lvfullname]), + (False, _CLUSTER_VERIFY), + (False, ["gnt-cluster", "modify", "--reserved-lvs", + ".*/%s.*" % _QA_LV_PREFIX]), + (False, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), - (True, CVERIFY), - (False, ["lvremove", "-f", "xenvg/qa-test"]), - (False, CVERIFY), + (True, _CLUSTER_VERIFY), + (False, ["lvremove", "-f", lvfullname]), + (False, _CLUSTER_VERIFY), ]: AssertCommand(cmd, fail=fail) +def TestClusterModifyEmpty(): + """gnt-cluster modify""" + AssertCommand(["gnt-cluster", "modify"], fail=True) + + +def TestClusterModifyDisk(): + """gnt-cluster modify -D""" + for param in _FAIL_PARAMS: + AssertCommand(["gnt-cluster", "modify", "-D", param], fail=True) + + +def TestClusterModifyDiskTemplates(): + """gnt-cluster modify --enabled-disk-templates=...""" + enabled_disk_templates = qa_config.GetEnabledDiskTemplates() + default_disk_template = qa_config.GetDefaultDiskTemplate() + + _TestClusterModifyDiskTemplatesArguments(default_disk_template, + enabled_disk_templates) + + _RestoreEnabledDiskTemplates() + nodes = qa_config.AcquireManyNodes(2) + + instance_template = enabled_disk_templates[0] + instance = qa_instance.CreateInstanceByDiskTemplate(nodes, instance_template) + + _TestClusterModifyUnusedDiskTemplate(instance_template) + _TestClusterModifyUsedDiskTemplate(instance_template, + enabled_disk_templates) + + qa_instance.TestInstanceRemove(instance) + _RestoreEnabledDiskTemplates() + + +def _RestoreEnabledDiskTemplates(): + """Sets the list of enabled disk templates back to the list of enabled disk + templates from the QA configuration. This can be used to make sure that + the tests that modify the list of disk templates do not interfere with + other tests. + + """ + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-template=%s" % + ",".join(qa_config.GetEnabledDiskTemplates())], + fail=False) + + +def _TestClusterModifyDiskTemplatesArguments(default_disk_template, + enabled_disk_templates): + """Tests argument handling of 'gnt-cluster modify' with respect to + the parameter '--enabled-disk-templates'. This test is independent + of instances. + + """ + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-template=%s" % + ",".join(enabled_disk_templates)], + fail=False) + + # bogus templates + AssertCommand(["gnt-cluster", "modify", + "--enabled-disk-templates=pinkbunny"], + fail=True) + + # duplicate entries do no harm + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-templates=%s,%s" % + (default_disk_template, default_disk_template)], + fail=False) + + +def _TestClusterModifyUsedDiskTemplate(instance_template, + enabled_disk_templates): + """Tests that disk templates that are currently in use by instances cannot + be disabled on the cluster. + + """ + # If the list of enabled disk templates contains only one template + # we need to add some other templates, because the list of enabled disk + # templates can only be set to a non-empty list. + new_disk_templates = list(set(enabled_disk_templates) + - set([instance_template])) + if not new_disk_templates: + new_disk_templates = list(set(constants.DISK_TEMPLATES) + - set([instance_template])) + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-templates=%s" % + ",".join(new_disk_templates)], + fail=True) + + +def _TestClusterModifyUnusedDiskTemplate(instance_template): + """Tests that unused disk templates can be disabled safely.""" + all_disk_templates = constants.DISK_TEMPLATES + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-templates=%s" % + ",".join(all_disk_templates)], + fail=False) + new_disk_templates = [instance_template] + AssertCommand( + ["gnt-cluster", "modify", + "--enabled-disk-templates=%s" % + ",".join(new_disk_templates)], + fail=False) + + def TestClusterModifyBe(): """gnt-cluster modify -B""" for fail, cmd in [ - # mem - (False, ["gnt-cluster", "modify", "-B", "memory=256"]), - (False, ["sh", "-c", "gnt-cluster info|grep '^ *memory: 256$'"]), - (True, ["gnt-cluster", "modify", "-B", "memory=a"]), - (False, ["gnt-cluster", "modify", "-B", "memory=128"]), - (False, ["sh", "-c", "gnt-cluster info|grep '^ *memory: 128$'"]), + # max/min mem + (False, ["gnt-cluster", "modify", "-B", "maxmem=256"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), + (False, ["gnt-cluster", "modify", "-B", "minmem=256"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), + (True, ["gnt-cluster", "modify", "-B", "maxmem=a"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), + (True, ["gnt-cluster", "modify", "-B", "minmem=a"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), + (False, ["gnt-cluster", "modify", "-B", "maxmem=128,minmem=128"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 128$'"]), + (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 128$'"]), # vcpus (False, ["gnt-cluster", "modify", "-B", "vcpus=4"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 4$'"]), - (True, ["gnt-cluster", "modify", "-B", "vcpus=a"]), + (True, ["gnt-cluster", "modify", "-B", "vcpus=a"]), (False, ["gnt-cluster", "modify", "-B", "vcpus=1"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 1$'"]), # auto_balance (False, ["gnt-cluster", "modify", "-B", "auto_balance=False"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: False$'"]), - (True, ["gnt-cluster", "modify", "-B", "auto_balance=1"]), + (True, ["gnt-cluster", "modify", "-B", "auto_balance=1"]), (False, ["gnt-cluster", "modify", "-B", "auto_balance=True"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: True$'"]), ]: AssertCommand(cmd, fail=fail) + # redo the original-requested BE parameters, if any + bep = qa_config.get("backend-parameters", "") + if bep: + AssertCommand(["gnt-cluster", "modify", "-B", bep]) + + +def _GetClusterIPolicy(): + """Return the run-time values of the cluster-level instance policy. + + @rtype: tuple + @return: (policy, specs), where: + - policy is a dictionary of the policy values, instance specs excluded + - specs is dict of dict, specs[par][key] is a spec value, where key is + "min", "max", or "std" + + """ + info = qa_utils.GetObjectInfo(["gnt-cluster", "info"]) + policy = info["Instance policy - limits for instances"] + (ret_policy, ret_specs) = qa_utils.ParseIPolicy(policy) + + # Sanity checks + assert len(ret_specs) > 0 + good = all("min" in d and "std" in d and "max" in d + for d in ret_specs.values()) + assert good, "Missing item in specs: %s" % ret_specs + assert len(ret_policy) > 0 + return (ret_policy, ret_specs) + + +def TestClusterModifyIPolicy(): + """gnt-cluster modify --ipolicy-*""" + basecmd = ["gnt-cluster", "modify"] + (old_policy, old_specs) = _GetClusterIPolicy() + for par in ["vcpu-ratio", "spindle-ratio"]: + curr_val = float(old_policy[par]) + test_values = [ + (True, 1.0), + (True, 1.5), + (True, 2), + (False, "a"), + # Restore the old value + (True, curr_val), + ] + for (good, val) in test_values: + cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)] + AssertCommand(cmd, fail=not good) + if good: + curr_val = val + # Check the affected parameter + (eff_policy, eff_specs) = _GetClusterIPolicy() + AssertEqual(float(eff_policy[par]), curr_val) + # Check everything else + AssertEqual(eff_specs, old_specs) + for p in eff_policy.keys(): + if p == par: + continue + AssertEqual(eff_policy[p], old_policy[p]) + + # Disk templates are treated slightly differently + par = "disk-templates" + disp_str = "enabled disk templates" + curr_val = old_policy[disp_str] + test_values = [ + (True, constants.DT_PLAIN), + (True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)), + (False, "thisisnotadisktemplate"), + (False, ""), + # Restore the old value + (True, curr_val.replace(" ", "")), + ] + for (good, val) in test_values: + cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)] + AssertCommand(cmd, fail=not good) + if good: + curr_val = val + # Check the affected parameter + (eff_policy, eff_specs) = _GetClusterIPolicy() + AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val) + # Check everything else + AssertEqual(eff_specs, old_specs) + for p in eff_policy.keys(): + if p == disp_str: + continue + AssertEqual(eff_policy[p], old_policy[p]) + + +def TestClusterSetISpecs(new_specs, fail=False, old_values=None): + """Change instance specs. + + @type new_specs: dict of dict + @param new_specs: new_specs[par][key], where key is "min", "max", "std". It + can be an empty dictionary. + @type fail: bool + @param fail: if the change is expected to fail + @type old_values: tuple + @param old_values: (old_policy, old_specs), as returned by + L{_GetClusterIPolicy} + @return: same as L{_GetClusterIPolicy} + + """ + build_cmd = lambda opts: ["gnt-cluster", "modify"] + opts + return qa_utils.TestSetISpecs(new_specs, get_policy_fn=_GetClusterIPolicy, + build_cmd_fn=build_cmd, fail=fail, + old_values=old_values) + + +def TestClusterModifyISpecs(): + """gnt-cluster modify --specs-*""" + params = ["memory-size", "disk-size", "disk-count", "cpu-count", "nic-count"] + (cur_policy, cur_specs) = _GetClusterIPolicy() + for par in params: + test_values = [ + (True, 0, 4, 12), + (True, 4, 4, 12), + (True, 4, 12, 12), + (True, 4, 4, 4), + (False, 4, 0, 12), + (False, 4, 16, 12), + (False, 4, 4, 0), + (False, 12, 4, 4), + (False, 12, 4, 0), + (False, "a", 4, 12), + (False, 0, "a", 12), + (False, 0, 4, "a"), + # This is to restore the old values + (True, + cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"]) + ] + for (good, mn, st, mx) in test_values: + new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}} + cur_state = (cur_policy, cur_specs) + # We update cur_specs, as we've copied the values to restore already + (cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good, + old_values=cur_state) + + # Get the ipolicy command + mnode = qa_config.GetMasterNode() + initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd") + modcmd = ["gnt-cluster", "modify"] + opts = initcmd.split() + assert opts[0:2] == ["gnt-cluster", "init"] + for k in range(2, len(opts) - 1): + if opts[k].startswith("--ipolicy-"): + assert k + 2 <= len(opts) + modcmd.extend(opts[k:k + 2]) + # Re-apply the ipolicy (this should be a no-op) + AssertCommand(modcmd) + new_initcmd = GetCommandOutput(mnode.primary, "gnt-cluster show-ispecs-cmd") + AssertEqual(initcmd, new_initcmd) + def TestClusterInfo(): """gnt-cluster info""" @@ -278,15 +703,15 @@ def TestClusterRenewCrypto(): ["--new-cluster-domain-secret", "--cluster-domain-secret=/dev/null"], ] for i in conflicting: - AssertCommand(cmd+i, fail=True) + AssertCommand(cmd + i, fail=True) # Invalid RAPI certificate cmd = ["gnt-cluster", "renew-crypto", "--force", "--rapi-certificate=/dev/null"] AssertCommand(cmd, fail=True) - rapi_cert_backup = qa_utils.BackupFile(master["primary"], - constants.RAPI_CERT_FILE) + rapi_cert_backup = qa_utils.BackupFile(master.primary, + pathutils.RAPI_CERT_FILE) try: # Custom RAPI certificate fh = tempfile.NamedTemporaryFile() @@ -296,7 +721,7 @@ def TestClusterRenewCrypto(): utils.GenerateSelfSignedSslCert(fh.name, validity=validity) - tmpcert = qa_utils.UploadFile(master["primary"], fh.name) + tmpcert = qa_utils.UploadFile(master.primary, fh.name) try: AssertCommand(["gnt-cluster", "renew-crypto", "--force", "--rapi-certificate=%s" % tmpcert]) @@ -309,7 +734,7 @@ def TestClusterRenewCrypto(): cds_fh.write("\n") cds_fh.flush() - tmpcds = qa_utils.UploadFile(master["primary"], cds_fh.name) + tmpcds = qa_utils.UploadFile(master.primary, cds_fh.name) try: AssertCommand(["gnt-cluster", "renew-crypto", "--force", "--cluster-domain-secret=%s" % tmpcds]) @@ -332,19 +757,19 @@ def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() - options = qa_config.get('options', {}) - disk_template = options.get('burnin-disk-template', 'drbd') - parallel = options.get('burnin-in-parallel', False) - check_inst = options.get('burnin-check-instances', False) - do_rename = options.get('burnin-rename', '') - do_reboot = options.get('burnin-reboot', True) + options = qa_config.get("options", {}) + disk_template = options.get("burnin-disk-template", constants.DT_DRBD8) + parallel = options.get("burnin-in-parallel", False) + check_inst = options.get("burnin-check-instances", False) + do_rename = options.get("burnin-rename", "") + do_reboot = options.get("burnin-reboot", True) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) # Get as many instances as we need instances = [] try: try: - num = qa_config.get('options', {}).get('burnin-instances', 1) + num = qa_config.get("options", {}).get("burnin-instances", 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: @@ -353,33 +778,36 @@ def TestClusterBurnin(): if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") - script = qa_utils.UploadFile(master['primary'], '../tools/burnin') + script = qa_utils.UploadFile(master.primary, "../tools/burnin") try: + disks = qa_config.GetDiskOptions() # Run burnin cmd = [script, - '--os=%s' % qa_config.get('os'), - '--disk-size=%s' % ",".join(qa_config.get('disk')), - '--disk-growth=%s' % ",".join(qa_config.get('disk-growth')), - '--disk-template=%s' % disk_template] + "--os=%s" % qa_config.get("os"), + "--minmem-size=%s" % qa_config.get(constants.BE_MINMEM), + "--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM), + "--disk-size=%s" % ",".join([d.get("size") for d in disks]), + "--disk-growth=%s" % ",".join([d.get("growth") for d in disks]), + "--disk-template=%s" % disk_template] if parallel: - cmd.append('--parallel') - cmd.append('--early-release') + cmd.append("--parallel") + cmd.append("--early-release") if check_inst: - cmd.append('--http-check') + cmd.append("--http-check") if do_rename: - cmd.append('--rename=%s' % do_rename) + cmd.append("--rename=%s" % do_rename) if not do_reboot: - cmd.append('--no-reboot') + cmd.append("--no-reboot") else: - cmd.append('--reboot-types=%s' % ",".join(reboot_types)) - cmd += [inst['name'] for inst in instances] + cmd.append("--reboot-types=%s" % ",".join(reboot_types)) + cmd += [inst.name for inst in instances] AssertCommand(cmd) finally: AssertCommand(["rm", "-f", script]) finally: for inst in instances: - qa_config.ReleaseInstance(inst) + inst.Release() def TestClusterMasterFailover(): @@ -393,37 +821,51 @@ def TestClusterMasterFailover(): # Back to original master node AssertCommand(cmd, node=master) finally: - qa_config.ReleaseNode(failovermaster) + failovermaster.Release() + + +def _NodeQueueDrainFile(node): + """Returns path to queue drain file for a node. + + """ + return qa_utils.MakeNodePath(node, pathutils.JOB_QUEUE_DRAIN_FILE) + + +def _AssertDrainFile(node, **kwargs): + """Checks for the queue drain file. + + """ + AssertCommand(["test", "-f", _NodeQueueDrainFile(node)], node=node, **kwargs) def TestClusterMasterFailoverWithDrainedQueue(): """gnt-cluster master-failover with drained queue""" - drain_check = ["test", "-f", constants.JOB_QUEUE_DRAIN_FILE] - master = qa_config.GetMasterNode() failovermaster = qa_config.AcquireNode(exclude=master) # Ensure queue is not drained for node in [master, failovermaster]: - AssertCommand(drain_check, node=node, fail=True) + _AssertDrainFile(node, fail=True) # Drain queue on failover master - AssertCommand(["touch", constants.JOB_QUEUE_DRAIN_FILE], node=failovermaster) + AssertCommand(["touch", _NodeQueueDrainFile(failovermaster)], + node=failovermaster) cmd = ["gnt-cluster", "master-failover"] try: - AssertCommand(drain_check, node=failovermaster) + _AssertDrainFile(failovermaster) AssertCommand(cmd, node=failovermaster) - AssertCommand(drain_check, fail=True) - AssertCommand(drain_check, node=failovermaster, fail=True) + _AssertDrainFile(master, fail=True) + _AssertDrainFile(failovermaster, fail=True) # Back to original master node AssertCommand(cmd, node=master) finally: - qa_config.ReleaseNode(failovermaster) + failovermaster.Release() - AssertCommand(drain_check, fail=True) - AssertCommand(drain_check, node=failovermaster, fail=True) + # Ensure queue is not drained + for node in [master, failovermaster]: + _AssertDrainFile(node, fail=True) def TestClusterCopyfile(): @@ -439,7 +881,7 @@ def TestClusterCopyfile(): f.seek(0) # Upload file to master node - testname = qa_utils.UploadFile(master['primary'], f.name) + testname = qa_utils.UploadFile(master.primary, f.name) try: # Copy file to all nodes AssertCommand(["gnt-cluster", "copyfile", testname]) @@ -452,8 +894,8 @@ def TestClusterCommand(): """gnt-cluster command""" uniqueid = utils.NewUUID() rfile = "/tmp/gnt%s" % utils.NewUUID() - rcmd = utils.ShellQuoteArgs(['echo', '-n', uniqueid]) - cmd = utils.ShellQuoteArgs(['gnt-cluster', 'command', + rcmd = utils.ShellQuoteArgs(["echo", "-n", uniqueid]) + cmd = utils.ShellQuoteArgs(["gnt-cluster", "command", "%s >%s" % (rcmd, rfile)]) try: @@ -466,3 +908,47 @@ def TestClusterCommand(): def TestClusterDestroy(): """gnt-cluster destroy""" AssertCommand(["gnt-cluster", "destroy", "--yes-do-it"]) + + +def TestClusterRepairDiskSizes(): + """gnt-cluster repair-disk-sizes""" + AssertCommand(["gnt-cluster", "repair-disk-sizes"]) + + +def TestSetExclStorCluster(newvalue): + """Set the exclusive_storage node parameter at the cluster level. + + @type newvalue: bool + @param newvalue: New value of exclusive_storage + @rtype: bool + @return: The old value of exclusive_storage + + """ + es_path = ["Default node parameters", "exclusive_storage"] + oldvalue = _GetClusterField(es_path) + AssertCommand(["gnt-cluster", "modify", "--node-parameters", + "exclusive_storage=%s" % newvalue]) + effvalue = _GetClusterField(es_path) + if effvalue != newvalue: + raise qa_error.Error("exclusive_storage has the wrong value: %s instead" + " of %s" % (effvalue, newvalue)) + qa_config.SetExclusiveStorage(newvalue) + return oldvalue + + +def TestExclStorSharedPv(node): + """cluster-verify reports LVs that share the same PV with exclusive_storage. + + """ + vgname = qa_config.get("vg-name", constants.DEFAULT_VG) + lvname1 = _QA_LV_PREFIX + "vol1" + lvname2 = _QA_LV_PREFIX + "vol2" + node_name = node.primary + AssertCommand(["lvcreate", "-L1G", "-n", lvname1, vgname], node=node_name) + AssertClusterVerify(fail=True, errors=[constants.CV_ENODEORPHANLV]) + AssertCommand(["lvcreate", "-L1G", "-n", lvname2, vgname], node=node_name) + AssertClusterVerify(fail=True, errors=[constants.CV_ENODELVM, + constants.CV_ENODEORPHANLV]) + AssertCommand(["lvremove", "-f", "/".join([vgname, lvname1])], node=node_name) + AssertCommand(["lvremove", "-f", "/".join([vgname, lvname2])], node=node_name) + AssertClusterVerify()