RunTestIf(os_enabled, fn)
-def RunCommonInstanceTests(instance):
+def RunCommonInstanceTests(instance, inst_nodes):
"""Runs a few tests that are common to all disk types.
"""
RunTestIf("tags", qa_tags.TestInstanceTags, instance)
+ if instance.disk_template == constants.DT_DRBD8:
+ RunTestIf("cluster-verify",
+ qa_cluster.TestClusterVerifyDisksBrokenDRBD, instance, inst_nodes)
RunTestIf("cluster-verify", qa_cluster.TestClusterVerify)
RunTestIf(qa_rapi.Enabled, qa_rapi.TestInstance, instance)
RunTest(qa_instance.TestInstanceStartup, instance)
RunTestIf("instance-modify-disks",
qa_instance.TestInstanceModifyDisks, instance)
- RunCommonInstanceTests(instance)
+ RunCommonInstanceTests(instance, inodes)
if qa_config.TestEnabled("instance-modify-primary"):
othernode = qa_config.AcquireNode()
RunTest(qa_instance.TestInstanceModifyPrimaryAndBack,
use_client)
try:
if qa_config.TestEnabled("instance-plain-rapi-common-tests"):
- RunCommonInstanceTests(rapi_instance)
+ RunCommonInstanceTests(rapi_instance, [pnode])
RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance, use_client)
finally:
rapi_instance.Release()
from ganeti import pathutils
import qa_config
+import qa_daemon
import qa_utils
import qa_error
import qa_instance
AssertCommand(["gnt-cluster", "verify-disks"])
+def TestClusterVerifyDisksBrokenDRBD(instance, inst_nodes):
+ """gnt-cluster verify-disks with broken DRBD"""
+ qa_daemon.TestPauseWatcher()
+
+ try:
+ info = qa_instance.GetInstanceInfo(instance.name)
+ snode = inst_nodes[1]
+ for idx, minor in enumerate(info["drbd-minors"][snode.primary]):
+ if idx % 2 == 0:
+ break_drbd_cmd = \
+ "(drbdsetup %d down >/dev/null 2>&1;" \
+ " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
+ (minor, minor)
+ else:
+ break_drbd_cmd = \
+ "(drbdsetup %d detach >/dev/null 2>&1;" \
+ " drbdsetup detach %d >/dev/null 2>&1) || /bin/true" % \
+ (minor, minor)
+ AssertCommand(break_drbd_cmd, node=snode)
+
+ verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+ "gnt-cluster verify-disks")
+ activation_msg = "Activating disks for instance '%s'" % instance.name
+ if activation_msg not in verify_output:
+ raise qa_error.Error("gnt-cluster verify-disks did not activate broken"
+ " DRBD disks:\n%s" % verify_output)
+
+ verify_output = GetCommandOutput(qa_config.GetMasterNode().primary,
+ "gnt-cluster verify-disks")
+ if activation_msg in verify_output:
+ raise qa_error.Error("gnt-cluster verify-disks wants to activate broken"
+ " DRBD disks on second attempt:\n%s" % verify_output)
+
+ AssertCommand(_CLUSTER_VERIFY)
+ finally:
+ qa_daemon.TestResumeWatcher()
+
+
def TestJobqueue():
"""gnt-debug test-jobqueue"""
AssertCommand(["gnt-debug", "test-jobqueue"])
return "/sys/block/%s/device/state" % disk
-def _GetInstanceInfo(instance):
+def GetInstanceInfo(instance):
"""Return information about the actual state of an instance.
@type instance: string
@param instance: the instance
"""
- info = _GetInstanceInfo(instance.name)
+ info = GetInstanceInfo(instance.name)
# FIXME: destruction/removal should be part of the disk class
if info["storage-type"] == constants.ST_LVM_VG:
vols = info["volumes"]
finally:
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
- info = _GetInstanceInfo(rename_source)
+ info = GetInstanceInfo(rename_source)
# Check instance volume tags correctly updated. Note that this check is lvm
# specific, so we skip it for non-lvm-based instances.
@param set_online: function to call to set the node on-line
"""
- info = _GetInstanceInfo(instance.name)
+ info = GetInstanceInfo(instance.name)
set_offline(snode)
try:
TestInstanceRemove(instance)
# syntax), we always have to perform both commands and ignore the
# output.
drbd_shutdown_cmd = \
- "(drbdsetup %d down && drbdsetup down resource%d) || /bin/true" % \
+ "(drbdsetup %d down >/dev/null 2>&1;" \
+ " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
(minor, minor)
AssertCommand(drbd_shutdown_cmd, node=snode)
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)