QA: test for --{shared,}-file-storage-dir
[ganeti-local] / qa / ganeti-qa.py
index 29eab43..9b4dc5e 100755 (executable)
 # pylint: disable=C0103
 # due to invalid name
 
-import sys
+import copy
 import datetime
 import optparse
+import sys
 
 import qa_cluster
 import qa_config
@@ -37,6 +38,7 @@ import qa_env
 import qa_error
 import qa_group
 import qa_instance
+import qa_monitoring
 import qa_network
 import qa_node
 import qa_os
@@ -48,7 +50,9 @@ import qa_utils
 from ganeti import utils
 from ganeti import rapi # pylint: disable=W0611
 from ganeti import constants
+from ganeti import pathutils
 
+from ganeti.http.auth import ParsePasswordFile
 import ganeti.rapi.client # pylint: disable=W0611
 from ganeti.rapi.client import UsesRapiClient
 
@@ -122,19 +126,48 @@ def RunEnvTests():
   RunTestIf("env", qa_env.TestGanetiCommands)
 
 
-def SetupCluster(rapi_user, rapi_secret):
+def _LookupRapiSecret(rapi_user):
+  """Find the RAPI secret for the given user.
+
+  @param rapi_user: Login user
+  @return: Login secret for the user
+
+  """
+  CTEXT = "{CLEARTEXT}"
+  master = qa_config.GetMasterNode()
+  cmd = ["cat", qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE)]
+  file_content = qa_utils.GetCommandOutput(master.primary,
+                                           utils.ShellQuoteArgs(cmd))
+  users = ParsePasswordFile(file_content)
+  entry = users.get(rapi_user)
+  if not entry:
+    raise qa_error.Error("User %s not found in RAPI users file" % rapi_user)
+  secret = entry.password
+  if secret.upper().startswith(CTEXT):
+    secret = secret[len(CTEXT):]
+  elif secret.startswith("{"):
+    raise qa_error.Error("Unsupported password schema for RAPI user %s:"
+                         " not a clear text password" % rapi_user)
+  return secret
+
+
+def SetupCluster(rapi_user):
   """Initializes the cluster.
 
   @param rapi_user: Login user for RAPI
-  @param rapi_secret: Login secret for RAPI
+  @return: Login secret for RAPI
 
   """
+  rapi_secret = utils.GenerateSecret()
   RunTestIf("create-cluster", qa_cluster.TestClusterInit,
             rapi_user, rapi_secret)
   if not qa_config.TestEnabled("create-cluster"):
     # If the cluster is already in place, we assume that exclusive-storage is
     # already set according to the configuration
     qa_config.SetExclusiveStorage(qa_config.get("exclusive-storage", False))
+    if qa_rapi.Enabled():
+      # To support RAPI on an existing cluster we have to find out the secret
+      rapi_secret = _LookupRapiSecret(rapi_user)
 
   # Test on empty cluster
   RunTestIf("node-list", qa_node.TestNodeList)
@@ -161,6 +194,8 @@ def SetupCluster(rapi_user, rapi_secret):
 
   RunTestIf("node-info", qa_node.TestNodeInfo)
 
+  return rapi_secret
+
 
 def RunClusterTests():
   """Runs tests related to gnt-cluster.
@@ -178,6 +213,8 @@ def RunClusterTests():
     ("cluster-modify", qa_cluster.TestClusterModifyBe),
     ("cluster-modify", qa_cluster.TestClusterModifyDisk),
     ("cluster-modify", qa_cluster.TestClusterModifyDiskTemplates),
+    ("cluster-modify", qa_cluster.TestClusterModifyFileStorageDir),
+    ("cluster-modify", qa_cluster.TestClusterModifySharedFileStorageDir),
     ("cluster-rename", qa_cluster.TestClusterRename),
     ("cluster-info", qa_cluster.TestClusterVersion),
     ("cluster-info", qa_cluster.TestClusterInfo),
@@ -238,7 +275,7 @@ def RunOsTests():
     RunTestIf(os_enabled, fn)
 
 
-def RunCommonInstanceTests(instance):
+def RunCommonInstanceTests(instance, inst_nodes):
   """Runs a few tests that are common to all disk types.
 
   """
@@ -267,6 +304,8 @@ def RunCommonInstanceTests(instance):
   RunTestIf(["instance-console", qa_rapi.Enabled],
             qa_rapi.TestRapiInstanceConsole, instance)
 
+  RunTestIf("instance-device-names", qa_instance.TestInstanceDeviceNames,
+            instance)
   DOWN_TESTS = qa_config.Either([
     "instance-reinstall",
     "instance-rename",
@@ -309,6 +348,9 @@ def RunCommonInstanceTests(instance):
 
   RunTestIf("tags", qa_tags.TestInstanceTags, instance)
 
+  if instance.disk_template == constants.DT_DRBD8:
+    RunTestIf("cluster-verify",
+              qa_cluster.TestClusterVerifyDisksBrokenDRBD, instance, inst_nodes)
   RunTestIf("cluster-verify", qa_cluster.TestClusterVerify)
 
   RunTestIf(qa_rapi.Enabled, qa_rapi.TestInstance, instance)
@@ -368,7 +410,8 @@ def RunExportImportTests(instance, inodes):
   # based storage types are untested, though. Also note that import could still
   # work, but is deeply embedded into the "export" case.
   if (qa_config.TestEnabled("instance-export") and
-      instance.disk_template != constants.DT_FILE):
+      instance.disk_template not in [constants.DT_FILE,
+                                     constants.DT_SHARED_FILE]):
     RunTest(qa_instance.TestInstanceExportNoTarget, instance)
 
     pnode = inodes[0]
@@ -466,6 +509,7 @@ def RunHardwareFailureTests(instance, inodes):
   if len(inodes) >= 2:
     RunTestIf("node-evacuate", qa_node.TestNodeEvacuate, inodes[0], inodes[1])
     RunTestIf("node-failover", qa_node.TestNodeFailover, inodes[0], inodes[1])
+    RunTestIf("node-migrate", qa_node.TestNodeMigrate, inodes[0], inodes[1])
 
 
 def RunExclusiveStorageTests():
@@ -521,33 +565,52 @@ def RunExclusiveStorageTests():
 
 
 def _BuildSpecDict(par, mn, st, mx):
-  return {par: {"min": mn, "std": st, "max": mx}}
+  return {
+    constants.ISPECS_MINMAX: [{
+      constants.ISPECS_MIN: {par: mn},
+      constants.ISPECS_MAX: {par: mx},
+      }],
+    constants.ISPECS_STD: {par: st},
+    }
+
+
+def _BuildDoubleSpecDict(index, par, mn, st, mx):
+  new_spec = {
+    constants.ISPECS_MINMAX: [{}, {}],
+    }
+  if st is not None:
+    new_spec[constants.ISPECS_STD] = {par: st}
+  new_spec[constants.ISPECS_MINMAX][index] = {
+    constants.ISPECS_MIN: {par: mn},
+    constants.ISPECS_MAX: {par: mx},
+    }
+  return new_spec
 
 
 def TestIPolicyPlainInstance():
   """Test instance policy interaction with instances"""
-  params = ["mem-size", "cpu-count", "disk-count", "disk-size", "nic-count"]
+  params = ["memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"]
   if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
     print "Template %s not supported" % constants.DT_PLAIN
     return
 
   # This test assumes that the group policy is empty
-  (_, old_specs) = qa_cluster.TestClusterSetISpecs({})
+  (_, old_specs) = qa_cluster.TestClusterSetISpecs()
+  # We also assume to have only one min/max bound
+  assert len(old_specs[constants.ISPECS_MINMAX]) == 1
   node = qa_config.AcquireNode()
   try:
-    # Log of policy changes, list of tuples: (change, policy_violated)
+    # Log of policy changes, list of tuples:
+    # (full_change, incremental_change, policy_violated)
     history = []
     instance = qa_instance.TestInstanceAddWithPlainDisk([node])
     try:
       policyerror = [constants.CV_EINSTANCEPOLICY]
       for par in params:
-        qa_cluster.AssertClusterVerify()
         (iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
         # Some specs must be multiple of 4
         new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4)
-        history.append((new_spec, True))
-        qa_cluster.TestClusterSetISpecs(new_spec)
-        qa_cluster.AssertClusterVerify(warnings=policyerror)
+        history.append((None, new_spec, True))
         if iminval > 0:
           # Some specs must be multiple of 4
           if iminval >= 4:
@@ -555,19 +618,57 @@ def TestIPolicyPlainInstance():
           else:
             upper = iminval - 1
           new_spec = _BuildSpecDict(par, 0, upper, upper)
-          history.append((new_spec, True))
-          qa_cluster.TestClusterSetISpecs(new_spec)
+          history.append((None, new_spec, True))
+        history.append((old_specs, None, False))
+
+      # Test with two instance specs
+      double_specs = copy.deepcopy(old_specs)
+      double_specs[constants.ISPECS_MINMAX] = \
+          double_specs[constants.ISPECS_MINMAX] * 2
+      (par1, par2) = params[0:2]
+      (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1)
+      (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2)
+      old_minmax = old_specs[constants.ISPECS_MINMAX][0]
+      history.extend([
+        (double_specs, None, False),
+        # The first min/max limit is being violated
+        (None,
+         _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4,
+                              imaxval1 + 4),
+         False),
+        # Both min/max limits are being violated
+        (None,
+         _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None, imaxval2 + 4),
+         True),
+        # The second min/max limit is being violated
+        (None,
+         _BuildDoubleSpecDict(0, par1,
+                              old_minmax[constants.ISPECS_MIN][par1],
+                              old_specs[constants.ISPECS_STD][par1],
+                              old_minmax[constants.ISPECS_MAX][par1]),
+         False),
+        (old_specs, None, False),
+        ])
+
+      # Apply the changes, and check policy violations after each change
+      qa_cluster.AssertClusterVerify()
+      for (new_specs, diff_specs, failed) in history:
+        qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
+                                        diff_specs=diff_specs)
+        if failed:
           qa_cluster.AssertClusterVerify(warnings=policyerror)
-        qa_cluster.TestClusterSetISpecs(old_specs)
-        history.append((old_specs, False))
+        else:
+          qa_cluster.AssertClusterVerify()
+
       qa_instance.TestInstanceRemove(instance)
     finally:
       instance.Release()
 
     # Now we replay the same policy changes, and we expect that the instance
     # cannot be created for the cases where we had a policy violation above
-    for (change, failed) in history:
-      qa_cluster.TestClusterSetISpecs(change)
+    for (new_specs, diff_specs, failed) in history:
+      qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
+                                      diff_specs=diff_specs)
       if failed:
         qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
       # Instance creation with no policy violation has been tested already
@@ -575,6 +676,30 @@ def TestIPolicyPlainInstance():
     node.Release()
 
 
+def IsExclusiveStorageInstanceTestEnabled():
+  test_name = "exclusive-storage-instance-tests"
+  if qa_config.TestEnabled(test_name):
+    vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
+    vgscmd = utils.ShellQuoteArgs([
+      "vgs", "--noheadings", "-o", "pv_count", vgname,
+      ])
+    nodes = qa_config.GetConfig()["nodes"]
+    for node in nodes:
+      try:
+        pvnum = int(qa_utils.GetCommandOutput(node.primary, vgscmd))
+      except Exception, e:
+        msg = ("Cannot get the number of PVs on %s, needed by '%s': %s" %
+               (node.primary, test_name, e))
+        raise qa_error.Error(msg)
+      if pvnum < 2:
+        raise qa_error.Error("Node %s has not enough PVs (%s) to run '%s'" %
+                             (node.primary, pvnum, test_name))
+    res = True
+  else:
+    res = False
+  return res
+
+
 def RunInstanceTests():
   """Create and exercise instances."""
   instance_tests = [
@@ -586,7 +711,9 @@ def RunInstanceTests():
      qa_instance.TestInstanceAddDiskless, 1),
     ("instance-add-file", constants.DT_FILE,
      qa_instance.TestInstanceAddFile, 1),
-  ]
+    ("instance-add-shared-file", constants.DT_SHARED_FILE,
+     qa_instance.TestInstanceAddSharedFile, 1),
+    ]
 
   for (test_name, templ, create_fun, num_nodes) in instance_tests:
     if (qa_config.TestEnabled(test_name) and
@@ -608,7 +735,9 @@ def RunInstanceTests():
             RunTest(qa_instance.TestInstanceConvertDiskToPlain,
                     instance, inodes)
             RunTest(qa_instance.TestInstanceStartup, instance)
-          RunCommonInstanceTests(instance)
+          RunTestIf("instance-modify-disks",
+                    qa_instance.TestInstanceModifyDisks, instance)
+          RunCommonInstanceTests(instance, inodes)
           if qa_config.TestEnabled("instance-modify-primary"):
             othernode = qa_config.AcquireNode()
             RunTest(qa_instance.TestInstanceModifyPrimaryAndBack,
@@ -627,18 +756,23 @@ def RunInstanceTests():
       qa_cluster.AssertClusterVerify()
 
 
+def RunMonitoringTests():
+  if qa_config.TestEnabled("mon-collector"):
+    RunTest(qa_monitoring.TestInstStatusCollector)
+
+
 def RunQa():
   """Main QA body.
 
   """
   rapi_user = "ganeti-qa"
-  rapi_secret = utils.GenerateSecret()
 
   RunEnvTests()
-  SetupCluster(rapi_user, rapi_secret)
+  rapi_secret = SetupCluster(rapi_user)
 
-  # Load RAPI certificate
-  qa_rapi.Setup(rapi_user, rapi_secret)
+  if qa_rapi.Enabled():
+    # Load RAPI certificate
+    qa_rapi.Setup(rapi_user, rapi_secret)
 
   RunClusterTests()
   RunOsTests()
@@ -676,7 +810,7 @@ def RunQa():
                                   use_client)
           try:
             if qa_config.TestEnabled("instance-plain-rapi-common-tests"):
-              RunCommonInstanceTests(rapi_instance)
+              RunCommonInstanceTests(rapi_instance, [pnode])
             RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance, use_client)
           finally:
             rapi_instance.Release()
@@ -687,7 +821,7 @@ def RunQa():
 
   config_list = [
     ("default-instance-tests", lambda: None, lambda _: None),
-    ("exclusive-storage-instance-tests",
+    (IsExclusiveStorageInstanceTestEnabled,
      lambda: qa_cluster.TestSetExclStorCluster(True),
      qa_cluster.TestSetExclStorCluster),
   ]
@@ -725,6 +859,10 @@ def RunQa():
   RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
             TestIPolicyPlainInstance)
 
+  RunTestIf(
+    "instance-add-restricted-by-disktemplates",
+    qa_instance.TestInstanceCreationRestrictedByDiskTemplates)
+
   # Test removing instance with offline drbd secondary
   if qa_config.TestEnabled(["instance-remove-drbd-offline",
                             "instance-add-drbd-disk"]):
@@ -744,6 +882,8 @@ def RunQa():
       snode.Release()
     qa_cluster.AssertClusterVerify()
 
+  RunMonitoringTests()
+
   RunTestIf("create-cluster", qa_node.TestNodeRemoveAll)
 
   RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)