Hs2Py constants: add remaining '_autoconf.*' constants
[ganeti-local] / qa / qa_instance.py
index d3b2f25..e7d8bb4 100644 (file)
@@ -23,7 +23,6 @@
 
 """
 
-import operator
 import os
 import re
 
@@ -36,65 +35,20 @@ import qa_config
 import qa_utils
 import qa_error
 
-from qa_utils import AssertIn, AssertCommand, AssertEqual
+from qa_utils import AssertCommand, AssertEqual
 from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
+from qa_instance_utils import CheckSsconfInstanceList, \
+                              CreateInstanceDrbd8, \
+                              CreateInstanceByDiskTemplate, \
+                              CreateInstanceByDiskTemplateOneNode, \
+                              GetGenericAddParameters
 
 
 def _GetDiskStatePath(disk):
   return "/sys/block/%s/device/state" % disk
 
 
-def _GetGenericAddParameters(inst, disk_template, force_mac=None):
-  params = ["-B"]
-  params.append("%s=%s,%s=%s" % (constants.BE_MINMEM,
-                                 qa_config.get(constants.BE_MINMEM),
-                                 constants.BE_MAXMEM,
-                                 qa_config.get(constants.BE_MAXMEM)))
-
-  if disk_template != constants.DT_DISKLESS:
-    for idx, size in enumerate(qa_config.get("disk")):
-      params.extend(["--disk", "%s:size=%s" % (idx, size)])
-
-  # Set static MAC address if configured
-  if force_mac:
-    nic0_mac = force_mac
-  else:
-    nic0_mac = inst.GetNicMacAddr(0, None)
-
-  if nic0_mac:
-    params.extend(["--net", "0:mac=%s" % nic0_mac])
-
-  return params
-
-
-def _DiskTest(node, disk_template, fail=False):
-  instance = qa_config.AcquireInstance()
-  try:
-    cmd = (["gnt-instance", "add",
-            "--os-type=%s" % qa_config.get("os"),
-            "--disk-template=%s" % disk_template,
-            "--node=%s" % node] +
-           _GetGenericAddParameters(instance, disk_template))
-    cmd.append(instance.name)
-
-    AssertCommand(cmd, fail=fail)
-
-    if not fail:
-      _CheckSsconfInstanceList(instance.name)
-      instance.SetDiskTemplate(disk_template)
-
-      return instance
-  except:
-    instance.Release()
-    raise
-
-  # Handle the case where creation is expected to fail
-  assert fail
-  instance.Release()
-  return None
-
-
-def _GetInstanceInfo(instance):
+def GetInstanceInfo(instance):
   """Return information about the actual state of an instance.
 
   @type instance: string
@@ -105,6 +59,8 @@ def _GetInstanceInfo(instance):
       - "drbd-minors": DRBD minors used by the instance, a dictionary where
         keys are nodes, and values are lists of integers (or an empty
         dictionary for non-DRBD instances)
+      - "disk-template": instance disk template
+      - "storage-type": storage type associated with the instance disk template
 
   """
   node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
@@ -129,12 +85,17 @@ def _GetInstanceInfo(instance):
         else:
           nodes.append(nodestr)
 
+  disk_template = info["Disk template"]
+  if not disk_template:
+    raise qa_error.Error("Can't get instance disk template")
+  storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
+
   re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
   vols = []
   drbd_min = {}
   for (count, diskinfo) in enumerate(info["Disks"]):
     (dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
-    if dtype == constants.LD_DRBD8:
+    if dtype == constants.DT_DRBD8:
       for child in diskinfo["child devices"]:
         vols.append(child["logical_id"])
       for key in ["nodeA", "nodeB"]:
@@ -145,7 +106,7 @@ def _GetInstanceInfo(instance):
         minor = int(m.group(2))
         minorlist = drbd_min.setdefault(node, [])
         minorlist.append(minor)
-    elif dtype == constants.LD_LV:
+    elif dtype == constants.DT_PLAIN:
       vols.append(diskinfo["logical_id"])
 
   assert nodes
@@ -154,11 +115,13 @@ def _GetInstanceInfo(instance):
     "nodes": nodes,
     "volumes": vols,
     "drbd-minors": drbd_min,
+    "disk-template": disk_template,
+    "storage-type": storage_type,
     }
 
 
-def _DestroyInstanceVolumes(instance):
-  """Remove all the LVM volumes of an instance.
+def _DestroyInstanceDisks(instance):
+  """Remove all the backend disks of an instance.
 
   This is used to simulate HW errors (dead nodes, broken disks...); the
   configuration of the instance is not affected.
@@ -166,10 +129,21 @@ def _DestroyInstanceVolumes(instance):
   @param instance: the instance
 
   """
-  info = _GetInstanceInfo(instance.name)
-  vols = info["volumes"]
-  for node in info["nodes"]:
-    AssertCommand(["lvremove", "-f"] + vols, node=node)
+  info = GetInstanceInfo(instance.name)
+  # FIXME: destruction/removal should be part of the disk class
+  if info["storage-type"] == constants.ST_LVM_VG:
+    vols = info["volumes"]
+    for node in info["nodes"]:
+      AssertCommand(["lvremove", "-f"] + vols, node=node)
+  elif info["storage-type"] == constants.ST_FILE:
+    # Note that this works for both file and sharedfile, and this is intended.
+    storage_dir = qa_config.get("file-storage-dir",
+                                pathutils.DEFAULT_FILE_STORAGE_DIR)
+    idir = os.path.join(storage_dir, instance.name)
+    for node in info["nodes"]:
+      AssertCommand(["rm", "-rf", idir], node=node)
+  elif info["storage-type"] == constants.ST_DISKLESS:
+    pass
 
 
 def _GetInstanceField(instance, field):
@@ -236,7 +210,7 @@ def GetInstanceSpec(instance, spec):
   @type instance: string
   @param instance: Instance name
   @type spec: string
-  @param spec: one of the supported parameters: "mem-size", "cpu-count",
+  @param spec: one of the supported parameters: "memory-size", "cpu-count",
       "disk-count", "disk-size", "nic-count"
   @rtype: tuple
   @return: (minspec, maxspec); minspec and maxspec can be different only for
@@ -244,7 +218,7 @@ def GetInstanceSpec(instance, spec):
 
   """
   specmap = {
-    "mem-size": ["be/minmem", "be/maxmem"],
+    "memory-size": ["be/minmem", "be/maxmem"],
     "cpu-count": ["vcpus"],
     "disk-count": ["disk.count"],
     "disk-size": ["disk.size/ "],
@@ -273,35 +247,49 @@ def IsDiskReplacingSupported(instance):
   return instance.disk_template == constants.DT_DRBD8
 
 
+def IsDiskSupported(instance):
+  return instance.disk_template != constants.DT_DISKLESS
+
+
 def TestInstanceAddWithPlainDisk(nodes, fail=False):
   """gnt-instance add -t plain"""
-  assert len(nodes) == 1
-  instance = _DiskTest(nodes[0].primary, constants.DT_PLAIN, fail=fail)
-  if not fail:
-    qa_utils.RunInstanceCheck(instance, True)
-  return instance
+  if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
+    instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN,
+                                                    fail=fail)
+    if not fail:
+      qa_utils.RunInstanceCheck(instance, True)
+    return instance
 
 
 @InstanceCheck(None, INST_UP, RETURN_VALUE)
 def TestInstanceAddWithDrbdDisk(nodes):
   """gnt-instance add -t drbd"""
-  assert len(nodes) == 2
-  return _DiskTest(":".join(map(operator.attrgetter("primary"), nodes)),
-                   constants.DT_DRBD8)
+  if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceDrbd8(nodes)
 
 
 @InstanceCheck(None, INST_UP, RETURN_VALUE)
 def TestInstanceAddFile(nodes):
   """gnt-instance add -t file"""
   assert len(nodes) == 1
-  return _DiskTest(nodes[0].primary, constants.DT_FILE)
+  if constants.DT_FILE in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
+
+
+@InstanceCheck(None, INST_UP, RETURN_VALUE)
+def TestInstanceAddSharedFile(nodes):
+  """gnt-instance add -t sharedfile"""
+  assert len(nodes) == 1
+  if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
 
 
 @InstanceCheck(None, INST_UP, RETURN_VALUE)
 def TestInstanceAddDiskless(nodes):
   """gnt-instance add -t diskless"""
   assert len(nodes) == 1
-  return _DiskTest(nodes[0].primary, constants.DT_DISKLESS)
+  if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates():
+    return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
 
 
 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
@@ -358,32 +346,6 @@ def TestInstanceReinstall(instance):
                 fail=True)
 
 
-def _ReadSsconfInstanceList():
-  """Reads ssconf_instance_list from the master node.
-
-  """
-  master = qa_config.GetMasterNode()
-
-  ssconf_path = utils.PathJoin(pathutils.DATA_DIR,
-                               "ssconf_%s" % constants.SS_INSTANCE_LIST)
-
-  cmd = ["cat", qa_utils.MakeNodePath(master, ssconf_path)]
-
-  return qa_utils.GetCommandOutput(master.primary,
-                                   utils.ShellQuoteArgs(cmd)).splitlines()
-
-
-def _CheckSsconfInstanceList(instance):
-  """Checks if a certain instance is in the ssconf instance list.
-
-  @type instance: string
-  @param instance: Instance name
-
-  """
-  AssertIn(qa_utils.ResolveInstanceName(instance),
-           _ReadSsconfInstanceList())
-
-
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceRenameAndBack(rename_source, rename_target):
   """gnt-instance rename
@@ -392,42 +354,55 @@ def TestInstanceRenameAndBack(rename_source, rename_target):
   name.
 
   """
-  _CheckSsconfInstanceList(rename_source)
+  CheckSsconfInstanceList(rename_source)
 
   # first do a rename to a different actual name, expecting it to fail
   qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
   try:
     AssertCommand(["gnt-instance", "rename", rename_source, rename_target],
                   fail=True)
-    _CheckSsconfInstanceList(rename_source)
+    CheckSsconfInstanceList(rename_source)
   finally:
     qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
 
-  # Check instance volume tags correctly updated
-  # FIXME: this is LVM specific!
-  info = _GetInstanceInfo(rename_source)
-  tags_cmd = ("lvs -o tags --noheadings %s | grep " %
-              (" ".join(info["volumes"]), ))
+  info = GetInstanceInfo(rename_source)
+
+  # Check instance volume tags correctly updated. Note that this check is lvm
+  # specific, so we skip it for non-lvm-based instances.
+  # FIXME: This will need updating when instances will be able to have
+  # different disks living on storage pools with etherogeneous storage types.
+  # FIXME: This check should be put inside the disk/storage class themselves,
+  # rather than explicitly called here.
+  if info["storage-type"] == constants.ST_LVM_VG:
+    # In the lvm world we can check for tags on the logical volume
+    tags_cmd = ("lvs -o tags --noheadings %s | grep " %
+                (" ".join(info["volumes"]), ))
+  else:
+    # Other storage types don't have tags, so we use an always failing command,
+    # to make sure it never gets executed
+    tags_cmd = "false"
 
   # and now rename instance to rename_target...
   AssertCommand(["gnt-instance", "rename", rename_source, rename_target])
-  _CheckSsconfInstanceList(rename_target)
+  CheckSsconfInstanceList(rename_target)
   qa_utils.RunInstanceCheck(rename_source, False)
   qa_utils.RunInstanceCheck(rename_target, False)
 
   # NOTE: tags might not be the exactly as the instance name, due to
   # charset restrictions; hence the test might be flaky
-  if rename_source != rename_target:
+  if (rename_source != rename_target and
+      info["storage-type"] == constants.ST_LVM_VG):
     for node in info["nodes"]:
       AssertCommand(tags_cmd + rename_source, node=node, fail=True)
       AssertCommand(tags_cmd + rename_target, node=node, fail=False)
 
   # and back
   AssertCommand(["gnt-instance", "rename", rename_target, rename_source])
-  _CheckSsconfInstanceList(rename_source)
+  CheckSsconfInstanceList(rename_source)
   qa_utils.RunInstanceCheck(rename_target, False)
 
-  if rename_source != rename_target:
+  if (rename_source != rename_target and
+      info["storage-type"] == constants.ST_LVM_VG):
     for node in info["nodes"]:
       AssertCommand(tags_cmd + rename_source, node=node, fail=False)
       AssertCommand(tags_cmd + rename_target, node=node, fail=True)
@@ -585,17 +560,24 @@ def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode):
     print qa_utils.FormatInfo("Test only supported for the file disk template")
     return
 
+  cluster_name = qa_config.get("name")
+
   name = instance.name
   current = currentnode.primary
   other = othernode.primary
 
-  filestorage = qa_config.get("file-storage-dir")
+  filestorage = qa_config.get("file-storage-dir",
+                              pathutils.DEFAULT_FILE_STORAGE_DIR)
   disk = os.path.join(filestorage, name)
 
   AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name],
                 fail=True)
   AssertCommand(["gnt-instance", "shutdown", name])
-  AssertCommand(["scp", "-r", disk, "%s:%s" % (other, filestorage)])
+  AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" %
+                 pathutils.SSH_KNOWN_HOSTS_FILE,
+                 "-oCheckHostIp=no", "-oStrictHostKeyChecking=yes",
+                 "-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name,
+                 "-r", disk, "%s:%s" % (other, filestorage)], node=current)
   AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name])
   AssertCommand(["gnt-instance", "startup", name])
 
@@ -648,20 +630,43 @@ def TestInstanceConvertDiskToPlain(instance, inodes):
                  "-n", inodes[1].primary, name])
 
 
+@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
+def TestInstanceModifyDisks(instance):
+  """gnt-instance modify --disk"""
+  if not IsDiskSupported(instance):
+    print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
+    return
+
+  disk_conf = qa_config.GetDiskOptions()[-1]
+  size = disk_conf.get("size")
+  name = instance.name
+  build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
+  if qa_config.AreSpindlesSupported():
+    spindles = disk_conf.get("spindles")
+    spindles_supported = True
+  else:
+    # Any number is good for spindles in this case
+    spindles = 1
+    spindles_supported = False
+  AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
+                fail=not spindles_supported)
+  AssertCommand(build_cmd("add:size=%s" % size),
+                fail=spindles_supported)
+  # Exactly one of the above commands has succeded, so we need one remove
+  AssertCommand(build_cmd("remove"))
+
+
 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
 def TestInstanceGrowDisk(instance):
   """gnt-instance grow-disk"""
-  if qa_config.GetExclusiveStorage():
-    print qa_utils.FormatInfo("Test not supported with exclusive_storage")
-    return
-
   if instance.disk_template == constants.DT_DISKLESS:
     print qa_utils.FormatInfo("Test not supported for diskless instances")
     return
 
   name = instance.name
-  all_size = qa_config.get("disk")
-  all_grow = qa_config.get("disk-growth")
+  disks = qa_config.GetDiskOptions()
+  all_size = [d.get("size") for d in disks]
+  all_grow = [d.get("growth") for d in disks]
 
   if not all_grow:
     # missing disk sizes but instance grow disk has been enabled,
@@ -681,6 +686,57 @@ def TestInstanceGrowDisk(instance):
                    str(int_size + 2 * int_grow)])
 
 
+@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
+def TestInstanceDeviceNames(instance):
+  if instance.disk_template == constants.DT_DISKLESS:
+    print qa_utils.FormatInfo("Test not supported for diskless instances")
+    return
+
+  name = instance.name
+  for dev_type in ["disk", "net"]:
+    if dev_type == "disk":
+      options = ",size=512M"
+      if qa_config.AreSpindlesSupported():
+        options += ",spindles=1"
+    else:
+      options = ""
+    # succeed in adding a device named 'test_device'
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   name])
+    # succeed in removing the 'test_device'
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=test_device:remove" % dev_type,
+                   name])
+    # fail to add two devices with the same name
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   "--%s=-1:add,name=test_device%s" % (dev_type, options),
+                   name], fail=True)
+    # fail to add a device with invalid name
+    AssertCommand(["gnt-instance", "modify",
+                   "--%s=-1:add,name=2%s" % (dev_type, options),
+                   name], fail=True)
+  # Rename disks
+  disks = qa_config.GetDiskOptions()
+  disk_names = [d.get("name") for d in disks]
+  for idx, disk_name in enumerate(disk_names):
+    # Refer to disk by idx
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=%s:modify,name=renamed" % idx,
+                   name])
+    # Refer to by name and rename to original name
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=renamed:modify,name=%s" % disk_name,
+                   name])
+  if len(disks) >= 2:
+    # fail in renaming to disks to the same name
+    AssertCommand(["gnt-instance", "modify",
+                   "--disk=0:modify,name=same_name",
+                   "--disk=1:modify,name=same_name",
+                   name], fail=True)
+
+
 def TestInstanceList():
   """gnt-instance list"""
   qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
@@ -756,7 +812,7 @@ def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
 
   """
   if destroy:
-    _DestroyInstanceVolumes(instance)
+    _DestroyInstanceDisks(instance)
   AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs +
                  [instance.name]), fail)
   if not fail and check:
@@ -767,6 +823,33 @@ def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
     AssertCommand(["gnt-instance", "deactivate-disks", instance.name])
 
 
+def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth,
+                            spindles_supported):
+  if with_spindles:
+    if spindles_supported:
+      if with_growth:
+        build_spindles_opt = (lambda disk:
+                              ",spindles=%s" %
+                              (disk["spindles"] + disk["spindles-growth"]))
+      else:
+        build_spindles_opt = (lambda disk:
+                              ",spindles=%s" % disk["spindles"])
+    else:
+      build_spindles_opt = (lambda _: ",spindles=1")
+  else:
+    build_spindles_opt = (lambda _: "")
+  if with_growth:
+    build_size_opt = (lambda disk:
+                      "size=%s" % (utils.ParseUnit(disk["size"]) +
+                                   utils.ParseUnit(disk["growth"])))
+  else:
+    build_size_opt = (lambda disk: "size=%s" % disk["size"])
+  build_disk_opt = (lambda (idx, disk):
+                    "--disk=%s:%s%s" % (idx, build_size_opt(disk),
+                                        build_spindles_opt(disk)))
+  return map(build_disk_opt, en_disks)
+
+
 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
 def TestRecreateDisks(instance, inodes, othernodes):
   """gnt-instance recreate-disks
@@ -789,6 +872,10 @@ def TestRecreateDisks(instance, inodes, othernodes):
   AssertCommand(["gnt-instance", "stop", instance.name])
   # Disks exist: this should fail
   _AssertRecreateDisks([], instance, fail=True, destroy=False)
+  # Unsupported spindles parameters: fail
+  if not qa_config.AreSpindlesSupported():
+    _AssertRecreateDisks(["--disk=0:spindles=2"], instance,
+                         fail=True, destroy=False)
   # Recreate disks in place
   _AssertRecreateDisks([], instance)
   # Move disks away
@@ -800,7 +887,33 @@ def TestRecreateDisks(instance, inodes, othernodes):
   else:
     _AssertRecreateDisks(["-n", other_seq], instance)
   # Move disks back
-  _AssertRecreateDisks(["-n", orig_seq], instance, check=False)
+  _AssertRecreateDisks(["-n", orig_seq], instance)
+  # Recreate resized disks
+  # One of the two commands fails because either spindles are given when they
+  # should not or vice versa
+  alldisks = qa_config.GetDiskOptions()
+  spindles_supported = qa_config.AreSpindlesSupported()
+  disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True,
+                                      spindles_supported)
+  _AssertRecreateDisks(disk_opts, instance, destroy=True,
+                       fail=not spindles_supported)
+  disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True,
+                                      spindles_supported)
+  _AssertRecreateDisks(disk_opts, instance, destroy=False,
+                       fail=spindles_supported)
+  # Recreate the disks one by one (with the original size)
+  for (idx, disk) in enumerate(alldisks):
+    # Only the first call should destroy all the disk
+    destroy = (idx == 0)
+    # Again, one of the two commands is expected to fail
+    disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False,
+                                        spindles_supported)
+    _AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
+                         fail=not spindles_supported)
+    disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False,
+                                        spindles_supported)
+    _AssertRecreateDisks(disk_opts, instance, destroy=False, check=False,
+                         fail=spindles_supported)
   # This and InstanceCheck decoration check that the disks are working
   AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
   AssertCommand(["gnt-instance", "start", instance.name])
@@ -810,6 +923,9 @@ def TestRecreateDisks(instance, inodes, othernodes):
 def TestInstanceExport(instance, node):
   """gnt-backup export -n ..."""
   name = instance.name
+  # Export does not work for file-based templates, thus we skip the test
+  if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
+    return
   AssertCommand(["gnt-backup", "export", "-n", node.primary, name])
   return qa_utils.ResolveInstanceName(name)
 
@@ -831,13 +947,15 @@ def TestInstanceExportNoTarget(instance):
 def TestInstanceImport(newinst, node, expnode, name):
   """gnt-backup import"""
   templ = constants.DT_PLAIN
+  if not qa_config.IsTemplateSupported(templ):
+    return
   cmd = (["gnt-backup", "import",
           "--disk-template=%s" % templ,
           "--no-ip-check",
           "--src-node=%s" % expnode.primary,
           "--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
           "--node=%s" % node.primary] +
-         _GetGenericAddParameters(newinst, templ,
+         GetGenericAddParameters(newinst, templ,
                                   force_mac=constants.VALUE_GENERATE))
   cmd.append(newinst.name)
   AssertCommand(cmd)
@@ -858,7 +976,7 @@ def TestBackupListFields():
 
 
 def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
-  """gtn-instance remove with an off-line node
+  """gnt-instance remove with an off-line node
 
   @param instance: instance
   @param snode: secondary node, to be set offline
@@ -866,13 +984,103 @@ def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
   @param set_online: function to call to set the node on-line
 
   """
-  info = _GetInstanceInfo(instance.name)
+  info = GetInstanceInfo(instance.name)
   set_offline(snode)
   try:
     TestInstanceRemove(instance)
   finally:
     set_online(snode)
-  # Clean up the disks on the offline node
-  for minor in info["drbd-minors"][snode.primary]:
-    AssertCommand(["drbdsetup", str(minor), "down"], node=snode)
-  AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
+
+  # Clean up the disks on the offline node, if necessary
+  if instance.disk_template not in constants.DTS_EXT_MIRROR:
+    # FIXME: abstract the cleanup inside the disks
+    if info["storage-type"] == constants.ST_LVM_VG:
+      for minor in info["drbd-minors"][snode.primary]:
+        # DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
+        # relies on the fact that we always create a resources for each minor,
+        # and that this resources is always named resource{minor}.
+        # As 'drbdsetup 0 down' does return success (even though that's invalid
+        # syntax), we always have to perform both commands and ignore the
+        # output.
+        drbd_shutdown_cmd = \
+          "(drbdsetup %d down >/dev/null 2>&1;" \
+          " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
+            (minor, minor)
+        AssertCommand(drbd_shutdown_cmd, node=snode)
+      AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
+    elif info["storage-type"] == constants.ST_FILE:
+      filestorage = qa_config.get("file-storage-dir",
+                                  pathutils.DEFAULT_FILE_STORAGE_DIR)
+      disk = os.path.join(filestorage, instance.name)
+      AssertCommand(["rm", "-rf", disk], node=snode)
+
+
+def TestInstanceCreationRestrictedByDiskTemplates():
+  """Test adding instances for disabled disk templates."""
+  if qa_config.TestEnabled("cluster-exclusive-storage"):
+    # These tests are valid only for non-exclusive storage
+    return
+
+  enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
+  nodes = qa_config.AcquireManyNodes(2)
+
+  # Setup the cluster with the enabled_disk_templates
+  AssertCommand(
+    ["gnt-cluster", "modify",
+     "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
+     "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)],
+    fail=False)
+
+  # Test instance creation for enabled disk templates
+  for disk_template in enabled_disk_templates:
+    instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
+    TestInstanceRemove(instance)
+    instance.Release()
+
+  # Test that instance creation fails for disabled disk templates
+  disabled_disk_templates = list(constants.DISK_TEMPLATES
+                                 - set(enabled_disk_templates))
+  for disk_template in disabled_disk_templates:
+    instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
+
+  # Test instance creation for after disabling enabled disk templates
+  if (len(enabled_disk_templates) > 1):
+    # Partition the disk templates, enable them separately and check if the
+    # disabled ones cannot be used by instances.
+    middle = len(enabled_disk_templates) / 2
+    templates1 = enabled_disk_templates[:middle]
+    templates2 = enabled_disk_templates[middle:]
+
+    for (enabled, disabled) in [(templates1, templates2),
+                                (templates2, templates1)]:
+      AssertCommand(["gnt-cluster", "modify",
+                     "--enabled-disk-templates=%s" % ",".join(enabled),
+                     "--ipolicy-disk-templates=%s" % ",".join(enabled)],
+                    fail=False)
+      for disk_template in disabled:
+        CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
+  elif (len(enabled_disk_templates) == 1):
+    # If only one disk template is enabled in the QA config, we have to enable
+    # some other templates in order to test if the disabling the only enabled
+    # disk template prohibits creating instances of that template.
+    other_disk_templates = list(
+                             set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
+                             set(enabled_disk_templates))
+    AssertCommand(["gnt-cluster", "modify",
+                   "--enabled-disk-templates=%s" %
+                     ",".join(other_disk_templates),
+                   "--ipolicy-disk-templates=%s" %
+                     ",".join(other_disk_templates)],
+                  fail=False)
+    CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True)
+  else:
+    raise qa_error.Error("Please enable at least one disk template"
+                         " in your QA setup.")
+
+  # Restore initially enabled disk templates
+  AssertCommand(["gnt-cluster", "modify",
+                 "--enabled-disk-templates=%s" %
+                   ",".join(enabled_disk_templates),
+                 "--ipolicy-disk-templates=%s" %
+                   ",".join(enabled_disk_templates)],
+                 fail=False)