Hs2Py constants: add remaining '_autoconf.*' constants
[ganeti-local] / qa / qa_instance.py
index 2578f0c..e7d8bb4 100644 (file)
@@ -23,7 +23,7 @@
 
 """
 
-import operator
+import os
 import re
 
 from ganeti import utils
@@ -35,69 +35,34 @@ import qa_config
 import qa_utils
 import qa_error
 
-from qa_utils import AssertIn, AssertCommand, AssertEqual
+from qa_utils import AssertCommand, AssertEqual
 from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
+from qa_instance_utils import CheckSsconfInstanceList, \
+                              CreateInstanceDrbd8, \
+                              CreateInstanceByDiskTemplate, \
+                              CreateInstanceByDiskTemplateOneNode, \
+                              GetGenericAddParameters
 
 
 def _GetDiskStatePath(disk):
   return "/sys/block/%s/device/state" % disk
 
 
-def _GetGenericAddParameters(inst, force_mac=None):
-  params = ["-B"]
-  params.append("%s=%s,%s=%s" % (constants.BE_MINMEM,
-                                 qa_config.get(constants.BE_MINMEM),
-                                 constants.BE_MAXMEM,
-                                 qa_config.get(constants.BE_MAXMEM)))
-  for idx, size in enumerate(qa_config.get("disk")):
-    params.extend(["--disk", "%s:size=%s" % (idx, size)])
-
-  # Set static MAC address if configured
-  if force_mac:
-    nic0_mac = force_mac
-  else:
-    nic0_mac = qa_config.GetInstanceNicMac(inst)
-  if nic0_mac:
-    params.extend(["--net", "0:mac=%s" % nic0_mac])
-
-  return params
-
-
-def _DiskTest(node, disk_template):
-  instance = qa_config.AcquireInstance()
-  try:
-    cmd = (["gnt-instance", "add",
-            "--os-type=%s" % qa_config.get("os"),
-            "--disk-template=%s" % disk_template,
-            "--node=%s" % node] +
-           _GetGenericAddParameters(instance))
-    cmd.append(instance["name"])
-
-    AssertCommand(cmd)
-
-    _CheckSsconfInstanceList(instance["name"])
-    qa_config.SetInstanceTemplate(instance, disk_template)
-
-    return instance
-  except:
-    qa_config.ReleaseInstance(instance)
-    raise
-
-
-def _GetInstanceInfo(instance):
+def GetInstanceInfo(instance):
   """Return information about the actual state of an instance.
 
   @type instance: string
   @param instance: the instance name
-  @return: a dictionary with two keys:
+  @return: a dictionary with the following keys:
       - "nodes": instance nodes, a list of strings
       - "volumes": instance volume IDs, a list of strings
+      - "drbd-minors": DRBD minors used by the instance, a dictionary where
+        keys are nodes, and values are lists of integers (or an empty
+        dictionary for non-DRBD instances)
+      - "disk-template": instance disk template
+      - "storage-type": storage type associated with the instance disk template
 
   """
-  master = qa_config.GetMasterNode()
-  infocmd = utils.ShellQuoteArgs(["gnt-instance", "info", instance])
-  info_out = qa_utils.GetCommandOutput(master["primary"], infocmd)
-  re_node = re.compile(r"^\s+-\s+(?:primary|secondaries):\s+(\S.+)$")
   node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
   # re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
   #  node1.fqdn
@@ -105,28 +70,58 @@ def _GetInstanceInfo(instance):
   #  node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
   # FIXME This works with no more than 2 secondaries
   re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$")
-  re_vol = re.compile(r"^\s+logical_id:\s+(\S+)$")
+
+  info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0]
   nodes = []
+  for nodeinfo in info["Nodes"]:
+    if "primary" in nodeinfo:
+      nodes.append(nodeinfo["primary"])
+    elif "secondaries" in nodeinfo:
+      nodestr = nodeinfo["secondaries"]
+      if nodestr:
+        m = re_nodelist.match(nodestr)
+        if m:
+          nodes.extend(filter(None, m.groups()))
+        else:
+          nodes.append(nodestr)
+
+  disk_template = info["Disk template"]
+  if not disk_template:
+    raise qa_error.Error("Can't get instance disk template")
+  storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
+
+  re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
   vols = []
-  for line in info_out.splitlines():
-    m = re_node.match(line)
-    if m:
-      nodestr = m.group(1)
-      m2 = re_nodelist.match(nodestr)
-      if m2:
-        nodes.extend(filter(None, m2.groups()))
-      else:
-        nodes.append(nodestr)
-    m = re_vol.match(line)
-    if m:
-      vols.append(m.group(1))
-  assert vols
+  drbd_min = {}
+  for (count, diskinfo) in enumerate(info["Disks"]):
+    (dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
+    if dtype == constants.DT_DRBD8:
+      for child in diskinfo["child devices"]:
+        vols.append(child["logical_id"])
+      for key in ["nodeA", "nodeB"]:
+        m = re_drbdnode.match(diskinfo[key])
+        if not m:
+          raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key])
+        node = m.group(1)
+        minor = int(m.group(2))
+        minorlist = drbd_min.setdefault(node, [])
+        minorlist.append(minor)
+    elif dtype == constants.DT_PLAIN:
+      vols.append(diskinfo["logical_id"])
+
   assert nodes
-  return {"nodes": nodes, "volumes": vols}
+  assert len(nodes) < 2 or vols
+  return {
+    "nodes": nodes,
+    "volumes": vols,
+    "drbd-minors": drbd_min,
+    "disk-template": disk_template,
+    "storage-type": storage_type,
+    }
 
 
-def _DestroyInstanceVolumes(instance):
-  """Remove all the LVM volumes of an instance.
+def _DestroyInstanceDisks(instance):
+  """Remove all the backend disks of an instance.
 
   This is used to simulate HW errors (dead nodes, broken disks...); the
   configuration of the instance is not affected.
@@ -134,25 +129,50 @@ def _DestroyInstanceVolumes(instance):
   @param instance: the instance
 
   """
-  info = _GetInstanceInfo(instance["name"])
-  vols = info["volumes"]
-  for node in info["nodes"]:
-    AssertCommand(["lvremove", "-f"] + vols, node=node)
+  info = GetInstanceInfo(instance.name)
+  # FIXME: destruction/removal should be part of the disk class
+  if info["storage-type"] == constants.ST_LVM_VG:
+    vols = info["volumes"]
+    for node in info["nodes"]:
+      AssertCommand(["lvremove", "-f"] + vols, node=node)
+  elif info["storage-type"] == constants.ST_FILE:
+    # Note that this works for both file and sharedfile, and this is intended.
+    storage_dir = qa_config.get("file-storage-dir",
+                                pathutils.DEFAULT_FILE_STORAGE_DIR)
+    idir = os.path.join(storage_dir, instance.name)
+    for node in info["nodes"]:
+      AssertCommand(["rm", "-rf", idir], node=node)
+  elif info["storage-type"] == constants.ST_DISKLESS:
+    pass
 
 
-def _GetBoolInstanceField(instance, field):
-  """Get the Boolean value of a field of an instance.
+def _GetInstanceField(instance, field):
+  """Get the value of a field of an instance.
 
   @type instance: string
   @param instance: Instance name
   @type field: string
   @param field: Name of the field
+  @rtype: string
 
   """
   master = qa_config.GetMasterNode()
   infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
-                                  "-o", field, instance])
-  info_out = qa_utils.GetCommandOutput(master["primary"], infocmd).strip()
+                                  "--units", "m", "-o", field, instance])
+  return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
+
+
+def _GetBoolInstanceField(instance, field):
+  """Get the Boolean value of a field of an instance.
+
+  @type instance: string
+  @param instance: Instance name
+  @type field: string
+  @param field: Name of the field
+  @rtype: bool
+
+  """
+  info_out = _GetInstanceField(instance, field)
   if info_out == "Y":
     return True
   elif info_out == "N":
@@ -162,54 +182,132 @@ def _GetBoolInstanceField(instance, field):
                          " %s" % (field, instance, info_out))
 
 
+def _GetNumInstanceField(instance, field):
+  """Get a numeric value of a field of an instance.
+
+  @type instance: string
+  @param instance: Instance name
+  @type field: string
+  @param field: Name of the field
+  @rtype: int or float
+
+  """
+  info_out = _GetInstanceField(instance, field)
+  try:
+    ret = int(info_out)
+  except ValueError:
+    try:
+      ret = float(info_out)
+    except ValueError:
+      raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
+                           " %s" % (field, instance, info_out))
+  return ret
+
+
+def GetInstanceSpec(instance, spec):
+  """Return the current spec for the given parameter.
+
+  @type instance: string
+  @param instance: Instance name
+  @type spec: string
+  @param spec: one of the supported parameters: "memory-size", "cpu-count",
+      "disk-count", "disk-size", "nic-count"
+  @rtype: tuple
+  @return: (minspec, maxspec); minspec and maxspec can be different only for
+      memory and disk size
+
+  """
+  specmap = {
+    "memory-size": ["be/minmem", "be/maxmem"],
+    "cpu-count": ["vcpus"],
+    "disk-count": ["disk.count"],
+    "disk-size": ["disk.size/ "],
+    "nic-count": ["nic.count"],
+    }
+  # For disks, first we need the number of disks
+  if spec == "disk-size":
+    (numdisk, _) = GetInstanceSpec(instance, "disk-count")
+    fields = ["disk.size/%s" % k for k in range(0, numdisk)]
+  else:
+    assert spec in specmap, "%s not in %s" % (spec, specmap)
+    fields = specmap[spec]
+  values = [_GetNumInstanceField(instance, f) for f in fields]
+  return (min(values), max(values))
+
+
 def IsFailoverSupported(instance):
-  templ = qa_config.GetInstanceTemplate(instance)
-  return templ in constants.DTS_MIRRORED
+  return instance.disk_template in constants.DTS_MIRRORED
 
 
 def IsMigrationSupported(instance):
-  templ = qa_config.GetInstanceTemplate(instance)
-  return templ in constants.DTS_MIRRORED
+  return instance.disk_template in constants.DTS_MIRRORED
 
 
 def IsDiskReplacingSupported(instance):
-  templ = qa_config.GetInstanceTemplate(instance)
-  return templ == constants.DT_DRBD8
+  return instance.disk_template == constants.DT_DRBD8
 
 
-@InstanceCheck(None, INST_UP, RETURN_VALUE)
-def TestInstanceAddWithPlainDisk(nodes):
+def IsDiskSupported(instance):
+  return instance.disk_template != constants.DT_DISKLESS
+
+
+def TestInstanceAddWithPlainDisk(nodes, fail=False):
   """gnt-instance add -t plain"""
-  assert len(nodes) == 1
-  return _DiskTest(nodes[0]["primary"], "plain")
+  if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
+    instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN,
+                                                    fail=fail)
+    if not fail:
+      qa_utils.RunInstanceCheck(instance, True)
+    return instance
 
 
 @InstanceCheck(None, INST_UP, RETURN_VALUE)
 def TestInstanceAddWithDrbdDisk(nodes):
   """gnt-instance add -t drbd"""
-  assert len(nodes) == 2
-  return _DiskTest(":".join(map(operator.itemgetter("primary"), nodes)),
-                   "drbd")
+  if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceDrbd8(nodes)
+
+
+@InstanceCheck(None, INST_UP, RETURN_VALUE)
+def TestInstanceAddFile(nodes):
+  """gnt-instance add -t file"""
+  assert len(nodes) == 1
+  if constants.DT_FILE in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
+
+
+@InstanceCheck(None, INST_UP, RETURN_VALUE)
+def TestInstanceAddSharedFile(nodes):
+  """gnt-instance add -t sharedfile"""
+  assert len(nodes) == 1
+  if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
+
+
+@InstanceCheck(None, INST_UP, RETURN_VALUE)
+def TestInstanceAddDiskless(nodes):
+  """gnt-instance add -t diskless"""
+  assert len(nodes) == 1
+  if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
 
 
 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
 def TestInstanceRemove(instance):
   """gnt-instance remove"""
-  AssertCommand(["gnt-instance", "remove", "-f", instance["name"]])
-
-  qa_config.ReleaseInstance(instance)
+  AssertCommand(["gnt-instance", "remove", "-f", instance.name])
 
 
 @InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
 def TestInstanceStartup(instance):
   """gnt-instance startup"""
-  AssertCommand(["gnt-instance", "startup", instance["name"]])
+  AssertCommand(["gnt-instance", "startup", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
 def TestInstanceShutdown(instance):
   """gnt-instance shutdown"""
-  AssertCommand(["gnt-instance", "shutdown", instance["name"]])
+  AssertCommand(["gnt-instance", "shutdown", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
@@ -217,7 +315,7 @@ def TestInstanceReboot(instance):
   """gnt-instance reboot"""
   options = qa_config.get("options", {})
   reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
-  name = instance["name"]
+  name = instance.name
   for rtype in reboot_types:
     AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name])
 
@@ -227,7 +325,7 @@ def TestInstanceReboot(instance):
 
   master = qa_config.GetMasterNode()
   cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name]
-  result_output = qa_utils.GetCommandOutput(master["primary"],
+  result_output = qa_utils.GetCommandOutput(master.primary,
                                             utils.ShellQuoteArgs(cmd))
   AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
 
@@ -235,39 +333,19 @@ def TestInstanceReboot(instance):
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceReinstall(instance):
   """gnt-instance reinstall"""
-  AssertCommand(["gnt-instance", "reinstall", "-f", instance["name"]])
+  if instance.disk_template == constants.DT_DISKLESS:
+    print qa_utils.FormatInfo("Test not supported for diskless instances")
+    return
+
+  AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
 
   # Test with non-existant OS definition
   AssertCommand(["gnt-instance", "reinstall", "-f",
                  "--os-type=NonExistantOsForQa",
-                 instance["name"]],
+                 instance.name],
                 fail=True)
 
 
-def _ReadSsconfInstanceList():
-  """Reads ssconf_instance_list from the master node.
-
-  """
-  master = qa_config.GetMasterNode()
-
-  cmd = ["cat", utils.PathJoin(pathutils.DATA_DIR,
-                               "ssconf_%s" % constants.SS_INSTANCE_LIST)]
-
-  return qa_utils.GetCommandOutput(master["primary"],
-                                   utils.ShellQuoteArgs(cmd)).splitlines()
-
-
-def _CheckSsconfInstanceList(instance):
-  """Checks if a certain instance is in the ssconf instance list.
-
-  @type instance: string
-  @param instance: Instance name
-
-  """
-  AssertIn(qa_utils.ResolveInstanceName(instance),
-           _ReadSsconfInstanceList())
-
-
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceRenameAndBack(rename_source, rename_target):
   """gnt-instance rename
@@ -276,42 +354,55 @@ def TestInstanceRenameAndBack(rename_source, rename_target):
   name.
 
   """
-  _CheckSsconfInstanceList(rename_source)
+  CheckSsconfInstanceList(rename_source)
 
   # first do a rename to a different actual name, expecting it to fail
   qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
   try:
     AssertCommand(["gnt-instance", "rename", rename_source, rename_target],
                   fail=True)
-    _CheckSsconfInstanceList(rename_source)
+    CheckSsconfInstanceList(rename_source)
   finally:
     qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
 
-  # Check instance volume tags correctly updated
-  # FIXME: this is LVM specific!
-  info = _GetInstanceInfo(rename_source)
-  tags_cmd = ("lvs -o tags --noheadings %s | grep " %
-              (" ".join(info["volumes"]), ))
+  info = GetInstanceInfo(rename_source)
+
+  # Check instance volume tags correctly updated. Note that this check is lvm
+  # specific, so we skip it for non-lvm-based instances.
+  # FIXME: This will need updating when instances will be able to have
+  # different disks living on storage pools with etherogeneous storage types.
+  # FIXME: This check should be put inside the disk/storage class themselves,
+  # rather than explicitly called here.
+  if info["storage-type"] == constants.ST_LVM_VG:
+    # In the lvm world we can check for tags on the logical volume
+    tags_cmd = ("lvs -o tags --noheadings %s | grep " %
+                (" ".join(info["volumes"]), ))
+  else:
+    # Other storage types don't have tags, so we use an always failing command,
+    # to make sure it never gets executed
+    tags_cmd = "false"
 
   # and now rename instance to rename_target...
   AssertCommand(["gnt-instance", "rename", rename_source, rename_target])
-  _CheckSsconfInstanceList(rename_target)
+  CheckSsconfInstanceList(rename_target)
   qa_utils.RunInstanceCheck(rename_source, False)
   qa_utils.RunInstanceCheck(rename_target, False)
 
   # NOTE: tags might not be the exactly as the instance name, due to
   # charset restrictions; hence the test might be flaky
-  if rename_source != rename_target:
+  if (rename_source != rename_target and
+      info["storage-type"] == constants.ST_LVM_VG):
     for node in info["nodes"]:
       AssertCommand(tags_cmd + rename_source, node=node, fail=True)
       AssertCommand(tags_cmd + rename_target, node=node, fail=False)
 
   # and back
   AssertCommand(["gnt-instance", "rename", rename_target, rename_source])
-  _CheckSsconfInstanceList(rename_source)
+  CheckSsconfInstanceList(rename_source)
   qa_utils.RunInstanceCheck(rename_target, False)
 
-  if rename_source != rename_target:
+  if (rename_source != rename_target and
+      info["storage-type"] == constants.ST_LVM_VG):
     for node in info["nodes"]:
       AssertCommand(tags_cmd + rename_source, node=node, fail=False)
       AssertCommand(tags_cmd + rename_target, node=node, fail=True)
@@ -325,7 +416,7 @@ def TestInstanceFailover(instance):
                               " test")
     return
 
-  cmd = ["gnt-instance", "failover", "--force", instance["name"]]
+  cmd = ["gnt-instance", "failover", "--force", instance.name]
 
   # failover ...
   AssertCommand(cmd)
@@ -343,10 +434,10 @@ def TestInstanceMigrate(instance, toggle_always_failover=True):
                               " test")
     return
 
-  cmd = ["gnt-instance", "migrate", "--force", instance["name"]]
+  cmd = ["gnt-instance", "migrate", "--force", instance.name]
   af_par = constants.BE_ALWAYS_FAILOVER
   af_field = "be/" + constants.BE_ALWAYS_FAILOVER
-  af_init_val = _GetBoolInstanceField(instance["name"], af_field)
+  af_init_val = _GetBoolInstanceField(instance.name, af_field)
 
   # migrate ...
   AssertCommand(cmd)
@@ -357,21 +448,21 @@ def TestInstanceMigrate(instance, toggle_always_failover=True):
   if toggle_always_failover:
     AssertCommand(["gnt-instance", "modify", "-B",
                    ("%s=%s" % (af_par, not af_init_val)),
-                   instance["name"]])
+                   instance.name])
   AssertCommand(cmd)
   # TODO: Verify the choice between failover and migration
   qa_utils.RunInstanceCheck(instance, True)
   if toggle_always_failover:
     AssertCommand(["gnt-instance", "modify", "-B",
-                   ("%s=%s" % (af_par, af_init_val)), instance["name"]])
+                   ("%s=%s" % (af_par, af_init_val)), instance.name])
 
   # TODO: Split into multiple tests
-  AssertCommand(["gnt-instance", "shutdown", instance["name"]])
+  AssertCommand(["gnt-instance", "shutdown", instance.name])
   qa_utils.RunInstanceCheck(instance, False)
   AssertCommand(cmd, fail=True)
   AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover",
-                 instance["name"]])
-  AssertCommand(["gnt-instance", "start", instance["name"]])
+                 instance.name])
+  AssertCommand(["gnt-instance", "start", instance.name])
   AssertCommand(cmd)
   # @InstanceCheck enforces the check that the instance is running
   qa_utils.RunInstanceCheck(instance, True)
@@ -379,7 +470,7 @@ def TestInstanceMigrate(instance, toggle_always_failover=True):
   AssertCommand(["gnt-instance", "modify", "-B",
                  ("%s=%s" %
                   (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)),
-                 instance["name"]])
+                 instance.name])
 
   AssertCommand(cmd)
   qa_utils.RunInstanceCheck(instance, True)
@@ -389,7 +480,7 @@ def TestInstanceMigrate(instance, toggle_always_failover=True):
   AssertCommand(["gnt-instance", "modify", "-B",
                  ("%s=%s" %
                   (constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)),
-                 instance["name"]])
+                 instance.name])
 
   AssertCommand(cmd)
   qa_utils.RunInstanceCheck(instance, True)
@@ -397,7 +488,7 @@ def TestInstanceMigrate(instance, toggle_always_failover=True):
 
 def TestInstanceInfo(instance):
   """gnt-instance info"""
-  AssertCommand(["gnt-instance", "info", instance["name"]])
+  AssertCommand(["gnt-instance", "info", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
@@ -445,23 +536,62 @@ def TestInstanceModify(instance):
       ])
 
   for alist in args:
-    AssertCommand(["gnt-instance", "modify"] + alist + [instance["name"]])
+    AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
 
   # check no-modify
-  AssertCommand(["gnt-instance", "modify", instance["name"]], fail=True)
+  AssertCommand(["gnt-instance", "modify", instance.name], fail=True)
 
   # Marking offline while instance is running must fail...
-  AssertCommand(["gnt-instance", "modify", "--offline", instance["name"]],
+  AssertCommand(["gnt-instance", "modify", "--offline", instance.name],
                  fail=True)
 
   # ...while making it online is ok, and should work
-  AssertCommand(["gnt-instance", "modify", "--online", instance["name"]])
+  AssertCommand(["gnt-instance", "modify", "--online", instance.name])
+
+
+@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
+def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode):
+  """gnt-instance modify --new-primary
+
+  This will leave the instance on its original primary node, not other node.
+
+  """
+  if instance.disk_template != constants.DT_FILE:
+    print qa_utils.FormatInfo("Test only supported for the file disk template")
+    return
+
+  cluster_name = qa_config.get("name")
+
+  name = instance.name
+  current = currentnode.primary
+  other = othernode.primary
+
+  filestorage = qa_config.get("file-storage-dir",
+                              pathutils.DEFAULT_FILE_STORAGE_DIR)
+  disk = os.path.join(filestorage, name)
+
+  AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name],
+                fail=True)
+  AssertCommand(["gnt-instance", "shutdown", name])
+  AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" %
+                 pathutils.SSH_KNOWN_HOSTS_FILE,
+                 "-oCheckHostIp=no", "-oStrictHostKeyChecking=yes",
+                 "-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name,
+                 "-r", disk, "%s:%s" % (other, filestorage)], node=current)
+  AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name])
+  AssertCommand(["gnt-instance", "startup", name])
+
+  # and back
+  AssertCommand(["gnt-instance", "shutdown", name])
+  AssertCommand(["rm", "-rf", disk], node=other)
+  AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name])
+  AssertCommand(["gnt-instance", "startup", name])
 
 
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceStoppedModify(instance):
   """gnt-instance modify (stopped instance)"""
-  name = instance["name"]
+  name = instance.name
 
   # Instance was not marked offline; try marking it online once more
   AssertCommand(["gnt-instance", "modify", "--online", name])
@@ -486,31 +616,63 @@ def TestInstanceStoppedModify(instance):
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceConvertDiskToPlain(instance, inodes):
   """gnt-instance modify -t"""
-  name = instance["name"]
-  template = qa_config.GetInstanceTemplate(instance)
-  if template != "drbd":
+  name = instance.name
+
+  template = instance.disk_template
+  if template != constants.DT_DRBD8:
     print qa_utils.FormatInfo("Unsupported template %s, skipping conversion"
                               " test" % template)
     return
+
   assert len(inodes) == 2
-  AssertCommand(["gnt-instance", "modify", "-t", "plain", name])
-  AssertCommand(["gnt-instance", "modify", "-t", "drbd",
-                 "-n", inodes[1]["primary"], name])
+  AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name])
+  AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8,
+                 "-n", inodes[1].primary, name])
+
+
+@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
+def TestInstanceModifyDisks(instance):
+  """gnt-instance modify --disk"""
+  if not IsDiskSupported(instance):
+    print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
+    return
+
+  disk_conf = qa_config.GetDiskOptions()[-1]
+  size = disk_conf.get("size")
+  name = instance.name
+  build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
+  if qa_config.AreSpindlesSupported():
+    spindles = disk_conf.get("spindles")
+    spindles_supported = True
+  else:
+    # Any number is good for spindles in this case
+    spindles = 1
+    spindles_supported = False
+  AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
+                fail=not spindles_supported)
+  AssertCommand(build_cmd("add:size=%s" % size),
+                fail=spindles_supported)
+  # Exactly one of the above commands has succeded, so we need one remove
+  AssertCommand(build_cmd("remove"))
 
 
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceGrowDisk(instance):
   """gnt-instance grow-disk"""
-  if qa_config.GetExclusiveStorage():
-    print qa_utils.FormatInfo("Test not supported with exclusive_storage")
+  if instance.disk_template == constants.DT_DISKLESS:
+    print qa_utils.FormatInfo("Test not supported for diskless instances")
     return
-  name = instance["name"]
-  all_size = qa_config.get("disk")
-  all_grow = qa_config.get("disk-growth")
+
+  name = instance.name
+  disks = qa_config.GetDiskOptions()
+  all_size = [d.get("size") for d in disks]
+  all_grow = [d.get("growth") for d in disks]
+
   if not all_grow:
     # missing disk sizes but instance grow disk has been enabled,
     # let's set fixed/nomimal growth
     all_grow = ["128M" for _ in all_size]
+
   for idx, (size, grow) in enumerate(zip(all_size, all_grow)):
     # succeed in grow by amount
     AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow])
@@ -524,6 +686,57 @@ def TestInstanceGrowDisk(instance):
                    str(int_size + 2 * int_grow)])
 
 
+@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
+def TestInstanceDeviceNames(instance):
+  if instance.disk_template == constants.DT_DISKLESS:
+    print qa_utils.FormatInfo("Test not supported for diskless instances")
+    return
+
+  name = instance.name
+  for dev_type in ["disk", "net"]:
+    if dev_type == "disk":
+      options = ",size=512M"
+      if qa_config.AreSpindlesSupported():
+        options += ",spindles=1"
+    else:
+      options = ""
+    # succeed in adding a device named 'test_device'
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   name])
+    # succeed in removing the 'test_device'
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=test_device:remove" % dev_type,
+                   name])
+    # fail to add two devices with the same name
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   name], fail=True)
+    # fail to add a device with invalid name
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=2%s" % (dev_type, options),
+                   name], fail=True)
+  # Rename disks
+  disks = qa_config.GetDiskOptions()
+  disk_names = [d.get("name") for d in disks]
+  for idx, disk_name in enumerate(disk_names):
+    # Refer to disk by idx
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=%s:modify,name=renamed" % idx,
+                   name])
+    # Refer to by name and rename to original name
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=renamed:modify,name=%s" % disk_name,
+                   name])
+  if len(disks) >= 2:
+    # fail in renaming to disks to the same name
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=0:modify,name=same_name",
+                   "--disk=1:modify,name=same_name",
+                   name], fail=True)
+
+
 def TestInstanceList():
   """gnt-instance list"""
   qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
@@ -537,7 +750,7 @@ def TestInstanceListFields():
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
 def TestInstanceConsole(instance):
   """gnt-instance console"""
-  AssertCommand(["gnt-instance", "console", "--show-cmd", instance["name"]])
+  AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
@@ -546,7 +759,7 @@ def TestReplaceDisks(instance, curr_nodes, other_nodes):
   def buildcmd(args):
     cmd = ["gnt-instance", "replace-disks"]
     cmd.extend(args)
-    cmd.append(instance["name"])
+    cmd.append(instance.name)
     return cmd
 
   if not IsDiskReplacingSupported(instance):
@@ -568,23 +781,23 @@ def TestReplaceDisks(instance, curr_nodes, other_nodes):
     # A placeholder; the actual command choice depends on use_ialloc
     None,
     # Restore the original secondary
-    ["--new-secondary=%s" % snode["primary"]],
+    ["--new-secondary=%s" % snode.primary],
     ]:
     if data is None:
       if use_ialloc:
         data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
       else:
-        data = ["--new-secondary=%s" % othernode["primary"]]
+        data = ["--new-secondary=%s" % othernode.primary]
     AssertCommand(buildcmd(data))
 
   AssertCommand(buildcmd(["-a"]))
-  AssertCommand(["gnt-instance", "stop", instance["name"]])
+  AssertCommand(["gnt-instance", "stop", instance.name])
   AssertCommand(buildcmd(["-a"]), fail=True)
-  AssertCommand(["gnt-instance", "activate-disks", instance["name"]])
+  AssertCommand(["gnt-instance", "activate-disks", instance.name])
   AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
-                 instance["name"]])
+                 instance.name])
   AssertCommand(buildcmd(["-a"]))
-  AssertCommand(["gnt-instance", "start", instance["name"]])
+  AssertCommand(["gnt-instance", "start", instance.name])
 
 
 def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
@@ -599,15 +812,42 @@ def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
 
   """
   if destroy:
-    _DestroyInstanceVolumes(instance)
+    _DestroyInstanceDisks(instance)
   AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs +
-                 [instance["name"]]), fail)
+                 [instance.name]), fail)
   if not fail and check:
     # Quick check that the disks are there
-    AssertCommand(["gnt-instance", "activate-disks", instance["name"]])
+    AssertCommand(["gnt-instance", "activate-disks", instance.name])
     AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
-                   instance["name"]])
-    AssertCommand(["gnt-instance", "deactivate-disks", instance["name"]])
+                   instance.name])
+    AssertCommand(["gnt-instance", "deactivate-disks", instance.name])
+
+
+def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth,
+                            spindles_supported):
+  if with_spindles:
+    if spindles_supported:
+      if with_growth:
+        build_spindles_opt = (lambda disk:
+                              ",spindles=%s" %
+                              (disk["spindles"] + disk["spindles-growth"]))
+      else:
+        build_spindles_opt = (lambda disk:
+                              ",spindles=%s" % disk["spindles"])
+    else:
+      build_spindles_opt = (lambda _: ",spindles=1")
+  else:
+    build_spindles_opt = (lambda _: "")
+  if with_growth:
+    build_size_opt = (lambda disk:
+                      "size=%s" % (utils.ParseUnit(disk["size"]) +
+                                   utils.ParseUnit(disk["growth"])))
+  else:
+    build_size_opt = (lambda disk: "size=%s" % disk["size"])
+  build_disk_opt = (lambda (idx, disk):
+                    "--disk=%s:%s%s" % (idx, build_size_opt(disk),
+                                        build_spindles_opt(disk)))
+  return map(build_disk_opt, en_disks)
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
@@ -621,17 +861,21 @@ def TestRecreateDisks(instance, inodes, othernodes):
   """
   options = qa_config.get("options", {})
   use_ialloc = options.get("use-iallocators", True)
-  other_seq = ":".join([n["primary"] for n in othernodes])
-  orig_seq = ":".join([n["primary"] for n in inodes])
+  other_seq = ":".join([n.primary for n in othernodes])
+  orig_seq = ":".join([n.primary for n in inodes])
   # These fail because the instance is running
   _AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
   if use_ialloc:
     _AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False)
   else:
     _AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
-  AssertCommand(["gnt-instance", "stop", instance["name"]])
+  AssertCommand(["gnt-instance", "stop", instance.name])
   # Disks exist: this should fail
   _AssertRecreateDisks([], instance, fail=True, destroy=False)
+  # Unsupported spindles parameters: fail
+  if not qa_config.AreSpindlesSupported():
+    _AssertRecreateDisks(["--disk=0:spindles=2"], instance,
+                         fail=True, destroy=False)
   # Recreate disks in place
   _AssertRecreateDisks([], instance)
   # Move disks away
@@ -643,53 +887,84 @@ def TestRecreateDisks(instance, inodes, othernodes):
   else:
     _AssertRecreateDisks(["-n", other_seq], instance)
   # Move disks back
-  _AssertRecreateDisks(["-n", orig_seq], instance, check=False)
+  _AssertRecreateDisks(["-n", orig_seq], instance)
+  # Recreate resized disks
+  # One of the two commands fails because either spindles are given when they
+  # should not or vice versa
+  alldisks = qa_config.GetDiskOptions()
+  spindles_supported = qa_config.AreSpindlesSupported()
+  disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True,
+                                      spindles_supported)
+  _AssertRecreateDisks(disk_opts, instance, destroy=True,
+                       fail=not spindles_supported)
+  disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True,
+                                      spindles_supported)
+  _AssertRecreateDisks(disk_opts, instance, destroy=False,
+                       fail=spindles_supported)
+  # Recreate the disks one by one (with the original size)
+  for (idx, disk) in enumerate(alldisks):
+    # Only the first call should destroy all the disk
+    destroy = (idx == 0)
+    # Again, one of the two commands is expected to fail
+    disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False,
+                                        spindles_supported)
+    _AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
+                         fail=not spindles_supported)
+    disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False,
+                                        spindles_supported)
+    _AssertRecreateDisks(disk_opts, instance, destroy=False, check=False,
+                         fail=spindles_supported)
   # This and InstanceCheck decoration check that the disks are working
-  AssertCommand(["gnt-instance", "reinstall", "-f", instance["name"]])
-  AssertCommand(["gnt-instance", "start", instance["name"]])
+  AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
+  AssertCommand(["gnt-instance", "start", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
 def TestInstanceExport(instance, node):
   """gnt-backup export -n ..."""
-  name = instance["name"]
-  AssertCommand(["gnt-backup", "export", "-n", node["primary"], name])
+  name = instance.name
+  # Export does not work for file-based templates, thus we skip the test
+  if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
+    return
+  AssertCommand(["gnt-backup", "export", "-n", node.primary, name])
   return qa_utils.ResolveInstanceName(name)
 
 
 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
 def TestInstanceExportWithRemove(instance, node):
   """gnt-backup export --remove-instance"""
-  AssertCommand(["gnt-backup", "export", "-n", node["primary"],
-                 "--remove-instance", instance["name"]])
-  qa_config.ReleaseInstance(instance)
+  AssertCommand(["gnt-backup", "export", "-n", node.primary,
+                 "--remove-instance", instance.name])
 
 
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
 def TestInstanceExportNoTarget(instance):
   """gnt-backup export (without target node, should fail)"""
-  AssertCommand(["gnt-backup", "export", instance["name"]], fail=True)
+  AssertCommand(["gnt-backup", "export", instance.name], fail=True)
 
 
 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
 def TestInstanceImport(newinst, node, expnode, name):
   """gnt-backup import"""
   templ = constants.DT_PLAIN
+  if not qa_config.IsTemplateSupported(templ):
+    return
   cmd = (["gnt-backup", "import",
           "--disk-template=%s" % templ,
           "--no-ip-check",
-          "--src-node=%s" % expnode["primary"],
+          "--src-node=%s" % expnode.primary,
           "--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
-          "--node=%s" % node["primary"]] +
-         _GetGenericAddParameters(newinst, force_mac=constants.VALUE_GENERATE))
-  cmd.append(newinst["name"])
+          "--node=%s" % node.primary] +
+         GetGenericAddParameters(newinst, templ,
+                                  force_mac=constants.VALUE_GENERATE))
+  cmd.append(newinst.name)
   AssertCommand(cmd)
-  qa_config.SetInstanceTemplate(newinst, templ)
+  newinst.SetDiskTemplate(templ)
 
 
 def TestBackupList(expnode):
   """gnt-backup list"""
-  AssertCommand(["gnt-backup", "list", "--node=%s" % expnode["primary"]])
+  AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary])
 
   qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
                             namefield=None, test_unknown=False)
@@ -698,3 +973,114 @@ def TestBackupList(expnode):
 def TestBackupListFields():
   """gnt-backup list-fields"""
   qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
+
+
+def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
+  """gnt-instance remove with an off-line node
+
+  @param instance: instance
+  @param snode: secondary node, to be set offline
+  @param set_offline: function to call to set the node off-line
+  @param set_online: function to call to set the node on-line
+
+  """
+  info = GetInstanceInfo(instance.name)
+  set_offline(snode)
+  try:
+    TestInstanceRemove(instance)
+  finally:
+    set_online(snode)
+
+  # Clean up the disks on the offline node, if necessary
+  if instance.disk_template not in constants.DTS_EXT_MIRROR:
+    # FIXME: abstract the cleanup inside the disks
+    if info["storage-type"] == constants.ST_LVM_VG:
+      for minor in info["drbd-minors"][snode.primary]:
+        # DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
+        # relies on the fact that we always create a resources for each minor,
+        # and that this resources is always named resource{minor}.
+        # As 'drbdsetup 0 down' does return success (even though that's invalid
+        # syntax), we always have to perform both commands and ignore the
+        # output.
+        drbd_shutdown_cmd = \
+          "(drbdsetup %d down >/dev/null 2>&1;" \
+          " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
+            (minor, minor)
+        AssertCommand(drbd_shutdown_cmd, node=snode)
+      AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
+    elif info["storage-type"] == constants.ST_FILE:
+      filestorage = qa_config.get("file-storage-dir",
+                                  pathutils.DEFAULT_FILE_STORAGE_DIR)
+      disk = os.path.join(filestorage, instance.name)
+      AssertCommand(["rm", "-rf", disk], node=snode)
+
+
+def TestInstanceCreationRestrictedByDiskTemplates():
+  """Test adding instances for disabled disk templates."""
+  if qa_config.TestEnabled("cluster-exclusive-storage"):
+    # These tests are valid only for non-exclusive storage
+    return
+
+  enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
+  nodes = qa_config.AcquireManyNodes(2)
+
+  # Setup the cluster with the enabled_disk_templates
+  AssertCommand(
+    ["gnt-cluster", "modify",
+     "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
+     "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)],
+    fail=False)
+
+  # Test instance creation for enabled disk templates
+  for disk_template in enabled_disk_templates:
+    instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
+    TestInstanceRemove(instance)
+    instance.Release()
+
+  # Test that instance creation fails for disabled disk templates
+  disabled_disk_templates = list(constants.DISK_TEMPLATES
+                                 - set(enabled_disk_templates))
+  for disk_template in disabled_disk_templates:
+    instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
+
+  # Test instance creation for after disabling enabled disk templates
+  if (len(enabled_disk_templates) > 1):
+    # Partition the disk templates, enable them separately and check if the
+    # disabled ones cannot be used by instances.
+    middle = len(enabled_disk_templates) / 2
+    templates1 = enabled_disk_templates[:middle]
+    templates2 = enabled_disk_templates[middle:]
+
+    for (enabled, disabled) in [(templates1, templates2),
+                                (templates2, templates1)]:
+      AssertCommand(["gnt-cluster", "modify",
+                     "--enabled-disk-templates=%s" % ",".join(enabled),
+                     "--ipolicy-disk-templates=%s" % ",".join(enabled)],
+                    fail=False)
+      for disk_template in disabled:
+        CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
+  elif (len(enabled_disk_templates) == 1):
+    # If only one disk template is enabled in the QA config, we have to enable
+    # some other templates in order to test if the disabling the only enabled
+    # disk template prohibits creating instances of that template.
+    other_disk_templates = list(
+                             set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
+                             set(enabled_disk_templates))
+    AssertCommand(["gnt-cluster", "modify",
+                   "--enabled-disk-templates=%s" %
+                     ",".join(other_disk_templates),
+                   "--ipolicy-disk-templates=%s" %
+                     ",".join(other_disk_templates)],
+                  fail=False)
+    CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True)
+  else:
+    raise qa_error.Error("Please enable at least one disk template"
+                         " in your QA setup.")
+
+  # Restore initially enabled disk templates
+  AssertCommand(["gnt-cluster", "modify",
+                 "--enabled-disk-templates=%s" %
+                   ",".join(enabled_disk_templates),
+                 "--ipolicy-disk-templates=%s" %
+                   ",".join(enabled_disk_templates)],
+                 fail=False)