Cleanup in cmdlib for standalone function calls
[ganeti-local] / lib / cmdlib.py
index 633d8e5..f947f5e 100644 (file)
@@ -30,6 +30,7 @@ import time
 import tempfile
 import re
 import platform
+import logging
 
 from ganeti import rpc
 from ganeti import ssh
@@ -55,7 +56,6 @@ class LogicalUnit(object):
     - redefine HPATH and HTYPE
     - optionally redefine their run requirements:
         REQ_MASTER: the LU needs to run on the master node
-        REQ_WSSTORE: the LU needs a writable SimpleStore
         REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
 
   Note that all commands require root permissions.
@@ -65,10 +65,9 @@ class LogicalUnit(object):
   HTYPE = None
   _OP_REQP = []
   REQ_MASTER = True
-  REQ_WSSTORE = False
   REQ_BGL = True
 
-  def __init__(self, processor, op, context, sstore):
+  def __init__(self, processor, op, context):
     """Constructor for LogicalUnit.
 
     This needs to be overriden in derived classes in order to check op
@@ -78,7 +77,6 @@ class LogicalUnit(object):
     self.proc = processor
     self.op = op
     self.cfg = context.cfg
-    self.sstore = sstore
     self.context = context
     # Dicts used to declare locking needs to mcpu
     self.needed_locks = None
@@ -100,7 +98,7 @@ class LogicalUnit(object):
       raise errors.OpPrereqError("Cluster not initialized yet,"
                                  " use 'gnt-cluster init' first.")
     if self.REQ_MASTER:
-      master = sstore.GetMasterNode()
+      master = self.cfg.GetMasterNode()
       if master != utils.HostInfo().name:
         raise errors.OpPrereqError("Commands must be run on the master"
                                    " node %s" % master)
@@ -110,7 +108,7 @@ class LogicalUnit(object):
 
     """
     if not self.__ssh:
-      self.__ssh = ssh.SshRunner(self.sstore)
+      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
     return self.__ssh
 
   ssh = property(fget=__GetSSH)
@@ -444,7 +442,7 @@ def _BuildInstanceHookEnvByObject(instance, override=None):
   return _BuildInstanceHookEnv(**args)
 
 
-def _CheckInstanceBridgesExist(instance):
+def _CheckInstanceBridgesExist(lu, instance):
   """Check that the brigdes needed by an instance exist.
 
   """
@@ -470,7 +468,7 @@ class LUDestroyCluster(NoHooksLU):
     Any errors are signalled by raising errors.OpPrereqError.
 
     """
-    master = self.sstore.GetMasterNode()
+    master = self.cfg.GetMasterNode()
 
     nodelist = self.cfg.GetNodeList()
     if len(nodelist) != 1 or nodelist[0] != master:
@@ -485,7 +483,7 @@ class LUDestroyCluster(NoHooksLU):
     """Destroys the cluster.
 
     """
-    master = self.sstore.GetMasterNode()
+    master = self.cfg.GetMasterNode()
     if not rpc.call_node_stop_master(master, False):
       raise errors.OpExecError("Could not disable the master role")
     priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
@@ -588,8 +586,11 @@ class LUVerifyCluster(LogicalUnit):
                           (node, node_result['node-net-test'][node]))
 
     hyp_result = node_result.get('hypervisor', None)
-    if hyp_result is not None:
-      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
+    if isinstance(hyp_result, dict):
+      for hv_name, hv_result in hyp_result.iteritems():
+        if hv_result is not None:
+          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
+                      (hv_name, hv_result))
     return bad
 
   def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
@@ -723,6 +724,7 @@ class LUVerifyCluster(LogicalUnit):
       feedback_fn("  - ERROR: %s" % msg)
 
     vg_name = self.cfg.GetVGName()
+    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
     nodelist = utils.NiceSort(self.cfg.GetNodeList())
     nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
     instancelist = utils.NiceSort(self.cfg.GetInstanceList())
@@ -734,25 +736,27 @@ class LUVerifyCluster(LogicalUnit):
 
     # FIXME: verify OS list
     # do local checksums
-    file_names = list(self.sstore.GetFileList())
+    file_names = []
     file_names.append(constants.SSL_CERT_FILE)
     file_names.append(constants.CLUSTER_CONF_FILE)
     local_checksums = utils.FingerprintFiles(file_names)
 
     feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
     all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
-    all_instanceinfo = rpc.call_instance_list(nodelist)
+    all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
     all_vglist = rpc.call_vg_list(nodelist)
     node_verify_param = {
       'filelist': file_names,
       'nodelist': nodelist,
-      'hypervisor': None,
+      'hypervisor': hypervisors,
       'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                         for node in nodeinfo]
       }
-    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
+    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param,
+                                      self.cfg.GetClusterName())
     all_rversion = rpc.call_version(nodelist)
-    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
+    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
+                                   self.cfg.GetHypervisorType())
 
     for node in nodelist:
       feedback_fn("* Verifying node %s" % node)
@@ -1005,17 +1009,16 @@ class LURenameCluster(LogicalUnit):
   HPATH = "cluster-rename"
   HTYPE = constants.HTYPE_CLUSTER
   _OP_REQP = ["name"]
-  REQ_WSSTORE = True
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
     """
     env = {
-      "OP_TARGET": self.sstore.GetClusterName(),
+      "OP_TARGET": self.cfg.GetClusterName(),
       "NEW_NAME": self.op.name,
       }
-    mn = self.sstore.GetMasterNode()
+    mn = self.cfg.GetMasterNode()
     return env, [mn], [mn]
 
   def CheckPrereq(self):
@@ -1026,8 +1029,8 @@ class LURenameCluster(LogicalUnit):
 
     new_name = hostname.name
     self.ip = new_ip = hostname.ip
-    old_name = self.sstore.GetClusterName()
-    old_ip = self.sstore.GetMasterIP()
+    old_name = self.cfg.GetClusterName()
+    old_ip = self.cfg.GetMasterIP()
     if new_name == old_name and new_ip == old_ip:
       raise errors.OpPrereqError("Neither the name nor the IP address of the"
                                  " cluster has changed")
@@ -1045,15 +1048,15 @@ class LURenameCluster(LogicalUnit):
     """
     clustername = self.op.name
     ip = self.ip
-    ss = self.sstore
 
     # shutdown the master IP
-    master = ss.GetMasterNode()
+    master = self.cfg.GetMasterNode()
     if not rpc.call_node_stop_master(master, False):
       raise errors.OpExecError("Could not disable the master role")
 
     try:
       # modify the sstore
+      # TODO: sstore
       ss.SetKey(ss.SS_MASTER_IP, ip)
       ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
 
@@ -1101,16 +1104,25 @@ class LUSetClusterParams(LogicalUnit):
   HPATH = "cluster-modify"
   HTYPE = constants.HTYPE_CLUSTER
   _OP_REQP = []
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    # FIXME: in the future maybe other cluster params won't require checking on
+    # all nodes to be modified.
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+    }
+    self.share_locks[locking.LEVEL_NODE] = 1
 
   def BuildHooksEnv(self):
     """Build hooks env.
 
     """
     env = {
-      "OP_TARGET": self.sstore.GetClusterName(),
+      "OP_TARGET": self.cfg.GetClusterName(),
       "NEW_VG_NAME": self.op.vg_name,
       }
-    mn = self.sstore.GetMasterNode()
+    mn = self.cfg.GetMasterNode()
     return env, [mn], [mn]
 
   def CheckPrereq(self):
@@ -1120,9 +1132,10 @@ class LUSetClusterParams(LogicalUnit):
     if the given volume group is valid.
 
     """
+    # FIXME: This only works because there is only one parameter that can be
+    # changed or removed.
     if not self.op.vg_name:
-      instances = [self.cfg.GetInstanceInfo(name)
-                   for name in self.cfg.GetInstanceList()]
+      instances = self.cfg.GetAllInstancesInfo().values()
       for inst in instances:
         for disk in inst.disks:
           if _RecursiveCheckIfLVMBased(disk):
@@ -1131,7 +1144,7 @@ class LUSetClusterParams(LogicalUnit):
 
     # if vg_name not None, checks given volume group on all nodes
     if self.op.vg_name:
-      node_list = self.cfg.GetNodeList()
+      node_list = self.acquired_locks[locking.LEVEL_NODE]
       vglist = rpc.call_vg_list(node_list)
       for node in node_list:
         vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
@@ -1151,7 +1164,7 @@ class LUSetClusterParams(LogicalUnit):
                   " state, not changing")
 
 
-def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
+def _WaitForSync(lu, instance, oneshot=False, unlock=False):
   """Sleep and poll for an instance's disk to sync.
 
   """
@@ -1159,12 +1172,12 @@ def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
     return True
 
   if not oneshot:
-    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
+    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
 
   node = instance.primary_node
 
   for dev in instance.disks:
-    cfgw.SetDiskID(dev, node)
+    lu.cfg.SetDiskID(dev, node)
 
   retries = 0
   while True:
@@ -1173,7 +1186,7 @@ def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
     cumul_degraded = False
     rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
     if not rstats:
-      proc.LogWarning("Can't get any data from node %s" % node)
+      lu.proc.LogWarning("Can't get any data from node %s" % node)
       retries += 1
       if retries >= 10:
         raise errors.RemoteError("Can't contact node %s for mirror data,"
@@ -1184,8 +1197,8 @@ def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
     for i in range(len(rstats)):
       mstat = rstats[i]
       if mstat is None:
-        proc.LogWarning("Can't compute data for node %s/%s" %
-                        (node, instance.disks[i].iv_name))
+        lu.proc.LogWarning("Can't compute data for node %s/%s" %
+                           (node, instance.disks[i].iv_name))
         continue
       # we ignore the ldisk parameter
       perc_done, est_time, is_degraded, _ = mstat
@@ -1197,19 +1210,19 @@ def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
           max_time = est_time
         else:
           rem_time = "no time estimate"
-        proc.LogInfo("- device %s: %5.2f%% done, %s" %
-                     (instance.disks[i].iv_name, perc_done, rem_time))
+        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
+                        (instance.disks[i].iv_name, perc_done, rem_time))
     if done or oneshot:
       break
 
     time.sleep(min(60, max_time))
 
   if done:
-    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
+    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
   return not cumul_degraded
 
 
-def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
+def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
   """Check that mirrors are not degraded.
 
   The ldisk parameter, if True, will change the test from the
@@ -1217,7 +1230,7 @@ def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
   the device(s)) to the ldisk (representing the local storage status).
 
   """
-  cfgw.SetDiskID(dev, node)
+  lu.cfg.SetDiskID(dev, node)
   if ldisk:
     idx = 6
   else:
@@ -1233,7 +1246,7 @@ def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
       result = result and (not rstats[idx])
   if dev.children:
     for child in dev.children:
-      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
+      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
 
   return result
 
@@ -1364,7 +1377,7 @@ class LURemoveNode(LogicalUnit):
 
     instance_list = self.cfg.GetInstanceList()
 
-    masternode = self.sstore.GetMasterNode()
+    masternode = self.cfg.GetMasterNode()
     if node.name == masternode:
       raise errors.OpPrereqError("Node is the master node,"
                                  " you need to failover first.")
@@ -1412,6 +1425,7 @@ class LUQueryNodes(NoHooksLU):
       "name", "pinst_cnt", "sinst_cnt",
       "pinst_list", "sinst_list",
       "pip", "sip", "tags",
+      "serial_no",
       ])
 
     _CheckOutputFields(static=self.static_fields,
@@ -1447,6 +1461,12 @@ class LUQueryNodes(NoHooksLU):
     all_info = self.cfg.GetAllNodesInfo()
     if self.do_locking:
       nodenames = self.acquired_locks[locking.LEVEL_NODE]
+    elif self.wanted != locking.ALL_SET:
+      nodenames = self.wanted
+      missing = set(nodenames).difference(all_info.keys())
+      if missing:
+        raise errors.OpExecError(
+          "Some nodes were removed before retrieving their data: %s" % missing)
     else:
       nodenames = all_info.keys()
     nodelist = [all_info[name] for name in nodenames]
@@ -1455,7 +1475,8 @@ class LUQueryNodes(NoHooksLU):
 
     if self.dynamic_fields.intersection(self.op.output_fields):
       live_data = {}
-      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                     self.cfg.GetHypervisorType())
       for name in nodenames:
         nodeinfo = node_data.get(name, None)
         if nodeinfo:
@@ -1511,6 +1532,8 @@ class LUQueryNodes(NoHooksLU):
           val = node.secondary_ip
         elif field == "tags":
           val = list(node.GetTags())
+        elif field == "serial_no":
+          val = node.serial_no
         elif field in self.dynamic_fields:
           val = live_data[node.name].get(field, None)
         else:
@@ -1675,7 +1698,7 @@ class LUAddNode(LogicalUnit):
 
     # check that the type of the node (single versus dual homed) is the
     # same as for the master
-    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
+    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
     master_singlehomed = myself.secondary_ip == myself.primary_ip
     newbie_singlehomed = secondary_ip == primary_ip
     if master_singlehomed != newbie_singlehomed:
@@ -1755,13 +1778,14 @@ class LUAddNode(LogicalUnit):
                                  " you gave (%s). Please fix and re-run this"
                                  " command." % new_node.secondary_ip)
 
-    node_verify_list = [self.sstore.GetMasterNode()]
+    node_verify_list = [self.cfg.GetMasterNode()]
     node_verify_param = {
       'nodelist': [node],
       # TODO: do a node-net-test as well?
     }
 
-    result = rpc.call_node_verify(node_verify_list, node_verify_param)
+    result = rpc.call_node_verify(node_verify_list, node_verify_param,
+                                  self.cfg.GetClusterName())
     for verifier in node_verify_list:
       if not result[verifier]:
         raise errors.OpExecError("Cannot communicate with %s's node daemon"
@@ -1774,7 +1798,7 @@ class LUAddNode(LogicalUnit):
 
     # Distribute updated /etc/hosts and known_hosts to all nodes,
     # including the node just added
-    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
+    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
     dist_nodes = self.cfg.GetNodeList()
     if not self.op.readd:
       dist_nodes.append(node)
@@ -1789,8 +1813,8 @@ class LUAddNode(LogicalUnit):
           logger.Error("copy of file %s to node %s failed" %
                        (fname, to_node))
 
-    to_copy = self.sstore.GetFileList()
-    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+    to_copy = []
+    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
       to_copy.append(constants.VNC_PASSWORD_FILE)
     for fname in to_copy:
       result = rpc.call_upload_file([node], fname)
@@ -1825,22 +1849,23 @@ class LUQueryClusterInfo(NoHooksLU):
 
     """
     result = {
-      "name": self.sstore.GetClusterName(),
+      "name": self.cfg.GetClusterName(),
       "software_version": constants.RELEASE_VERSION,
       "protocol_version": constants.PROTOCOL_VERSION,
       "config_version": constants.CONFIG_VERSION,
       "os_api_version": constants.OS_API_VERSION,
       "export_version": constants.EXPORT_VERSION,
-      "master": self.sstore.GetMasterNode(),
+      "master": self.cfg.GetMasterNode(),
       "architecture": (platform.architecture()[0], platform.machine()),
-      "hypervisor_type": self.sstore.GetHypervisorType(),
+      "hypervisor_type": self.cfg.GetHypervisorType(),
+      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
       }
 
     return result
 
 
-class LUDumpClusterConfig(NoHooksLU):
-  """Return a text-representation of the cluster-config.
+class LUQueryConfigValues(NoHooksLU):
+  """Return configuration values.
 
   """
   _OP_REQP = []
@@ -1849,6 +1874,11 @@ class LUDumpClusterConfig(NoHooksLU):
   def ExpandNames(self):
     self.needed_locks = {}
 
+    static_fields = ["cluster_name", "master_node"]
+    _CheckOutputFields(static=static_fields,
+                       dynamic=[],
+                       selected=self.op.output_fields)
+
   def CheckPrereq(self):
     """No prerequisites.
 
@@ -1859,7 +1889,15 @@ class LUDumpClusterConfig(NoHooksLU):
     """Dump a representation of the cluster config to the standard output.
 
     """
-    return self.cfg.DumpConfig()
+    values = []
+    for field in self.op.output_fields:
+      if field == "cluster_name":
+        values.append(self.cfg.GetClusterName())
+      elif field == "master_node":
+        values.append(self.cfg.GetMasterNode())
+      else:
+        raise errors.ParameterError(field)
+    return values
 
 
 class LUActivateInstanceDisks(NoHooksLU):
@@ -1892,14 +1930,14 @@ class LUActivateInstanceDisks(NoHooksLU):
     """Activate the disks.
 
     """
-    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
+    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block devices")
 
     return disks_info
 
 
-def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
+def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
   """Prepare the block devices for an instance.
 
   This sets up the block devices on all nodes.
@@ -1929,7 +1967,7 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
   # 1st pass, assemble on all nodes in secondary mode
   for inst_disk in instance.disks:
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
-      cfg.SetDiskID(node_disk, node)
+      lu.cfg.SetDiskID(node_disk, node)
       result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
       if not result:
         logger.Error("could not prepare block device %s on node %s"
@@ -1944,7 +1982,7 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       if node != instance.primary_node:
         continue
-      cfg.SetDiskID(node_disk, node)
+      lu.cfg.SetDiskID(node_disk, node)
       result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
       if not result:
         logger.Error("could not prepare block device %s on node %s"
@@ -1956,19 +1994,19 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
   # this is a workaround that would be fixed better by
   # improving the logical/physical id handling
   for disk in instance.disks:
-    cfg.SetDiskID(disk, instance.primary_node)
+    lu.cfg.SetDiskID(disk, instance.primary_node)
 
   return disks_ok, device_info
 
 
-def _StartInstanceDisks(cfg, instance, force):
+def _StartInstanceDisks(lu, instance, force):
   """Start the disks of an instance.
 
   """
-  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
+  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
                                            ignore_secondaries=force)
   if not disks_ok:
-    _ShutdownInstanceDisks(instance, cfg)
+    _ShutdownInstanceDisks(lu, instance)
     if force is not None and not force:
       logger.Error("If the message above refers to a secondary node,"
                    " you can retry the operation using '--force'.")
@@ -2006,17 +2044,18 @@ class LUDeactivateInstanceDisks(NoHooksLU):
 
     """
     instance = self.instance
-    _SafeShutdownInstanceDisks(instance, self.cfg)
+    _SafeShutdownInstanceDisks(self, instance)
 
 
-def _SafeShutdownInstanceDisks(instance, cfg):
+def _SafeShutdownInstanceDisks(lu, instance):
   """Shutdown block devices of an instance.
 
   This function checks if an instance is running, before calling
   _ShutdownInstanceDisks.
 
   """
-  ins_l = rpc.call_instance_list([instance.primary_node])
+  ins_l = rpc.call_instance_list([instance.primary_node],
+                                 [instance.hypervisor])
   ins_l = ins_l[instance.primary_node]
   if not type(ins_l) is list:
     raise errors.OpExecError("Can't contact node '%s'" %
@@ -2026,10 +2065,10 @@ def _SafeShutdownInstanceDisks(instance, cfg):
     raise errors.OpExecError("Instance is running, can't shutdown"
                              " block devices.")
 
-  _ShutdownInstanceDisks(instance, cfg)
+  _ShutdownInstanceDisks(lu, instance)
 
 
-def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
+def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
   """Shutdown block devices of an instance.
 
   This does the shutdown on all nodes of the instance.
@@ -2041,7 +2080,7 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
   result = True
   for disk in instance.disks:
     for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
-      cfg.SetDiskID(top_disk, node)
+      lu.cfg.SetDiskID(top_disk, node)
       if not rpc.call_blockdev_shutdown(node, top_disk):
         logger.Error("could not shutdown block device %s on node %s" %
                      (disk.iv_name, node))
@@ -2050,7 +2089,7 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
   return result
 
 
-def _CheckNodeFreeMemory(cfg, node, reason, requested):
+def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
   """Checks if a node has enough free memory.
 
   This function check if a given node has the needed amount of free
@@ -2058,14 +2097,21 @@ def _CheckNodeFreeMemory(cfg, node, reason, requested):
   information from the node, this function raise an OpPrereqError
   exception.
 
-  Args:
-    - cfg: a ConfigWriter instance
-    - node: the node name
-    - reason: string to use in the error message
-    - requested: the amount of memory in MiB
+  @type lu: C{LogicalUnit}
+  @param lu: a logical unit from which we get configuration data
+  @type node: C{str}
+  @param node: the node to check
+  @type reason: C{str}
+  @param reason: string to use in the error message
+  @type requested: C{int}
+  @param requested: the amount of memory in MiB to check for
+  @type hypervisor: C{str}
+  @param hypervisor: the hypervisor to ask for memory stats
+  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
+      we cannot check the node
 
   """
-  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
+  nodeinfo = rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
   if not nodeinfo or not isinstance(nodeinfo, dict):
     raise errors.OpPrereqError("Could not contact node %s for resource"
                              " information" % (node,))
@@ -2108,7 +2154,7 @@ class LUStartupInstance(LogicalUnit):
       "FORCE": self.op.force,
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
           list(self.instance.secondary_nodes))
     return env, nl, nl
 
@@ -2123,11 +2169,11 @@ class LUStartupInstance(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     # check bridges existance
-    _CheckInstanceBridgesExist(instance)
+    _CheckInstanceBridgesExist(self, instance)
 
-    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
+    _CheckNodeFreeMemory(self, instance.primary_node,
                          "starting instance %s" % instance.name,
-                         instance.memory)
+                         instance.memory, instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -2141,10 +2187,10 @@ class LUStartupInstance(LogicalUnit):
 
     node_current = instance.primary_node
 
-    _StartInstanceDisks(self.cfg, instance, force)
+    _StartInstanceDisks(self, instance, force)
 
     if not rpc.call_instance_start(node_current, instance, extra_args):
-      _ShutdownInstanceDisks(instance, self.cfg)
+      _ShutdownInstanceDisks(self, instance)
       raise errors.OpExecError("Could not start instance")
 
 
@@ -2184,7 +2230,7 @@ class LURebootInstance(LogicalUnit):
       "IGNORE_SECONDARIES": self.op.ignore_secondaries,
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
           list(self.instance.secondary_nodes))
     return env, nl, nl
 
@@ -2199,7 +2245,7 @@ class LURebootInstance(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     # check bridges existance
-    _CheckInstanceBridgesExist(instance)
+    _CheckInstanceBridgesExist(self, instance)
 
   def Exec(self, feedback_fn):
     """Reboot the instance.
@@ -2220,10 +2266,10 @@ class LURebootInstance(LogicalUnit):
     else:
       if not rpc.call_instance_shutdown(node_current, instance):
         raise errors.OpExecError("could not shutdown instance for full reboot")
-      _ShutdownInstanceDisks(instance, self.cfg)
-      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
+      _ShutdownInstanceDisks(self, instance)
+      _StartInstanceDisks(self, instance, ignore_secondaries)
       if not rpc.call_instance_start(node_current, instance, extra_args):
-        _ShutdownInstanceDisks(instance, self.cfg)
+        _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance for full reboot")
 
     self.cfg.MarkInstanceUp(instance.name)
@@ -2254,7 +2300,7 @@ class LUShutdownInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
           list(self.instance.secondary_nodes))
     return env, nl, nl
 
@@ -2278,7 +2324,7 @@ class LUShutdownInstance(LogicalUnit):
     if not rpc.call_instance_shutdown(node_current, instance):
       logger.Error("could not shutdown instance")
 
-    _ShutdownInstanceDisks(instance, self.cfg)
+    _ShutdownInstanceDisks(self, instance)
 
 
 class LUReinstallInstance(LogicalUnit):
@@ -2306,7 +2352,7 @@ class LUReinstallInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
           list(self.instance.secondary_nodes))
     return env, nl, nl
 
@@ -2326,7 +2372,8 @@ class LUReinstallInstance(LogicalUnit):
     if instance.status != "down":
       raise errors.OpPrereqError("Instance '%s' is marked to be up" %
                                  self.op.instance_name)
-    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
+                                         instance.hypervisor)
     if remote_info:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -2356,9 +2403,9 @@ class LUReinstallInstance(LogicalUnit):
     if self.op.os_type is not None:
       feedback_fn("Changing OS to '%s'..." % self.op.os_type)
       inst.os = self.op.os_type
-      self.cfg.AddInstance(inst)
+      self.cfg.Update(inst)
 
-    _StartInstanceDisks(self.cfg, inst, None)
+    _StartInstanceDisks(self, inst, None)
     try:
       feedback_fn("Running the instance OS create scripts...")
       if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
@@ -2366,7 +2413,7 @@ class LUReinstallInstance(LogicalUnit):
                                  " on node %s" %
                                  (inst.name, inst.primary_node))
     finally:
-      _ShutdownInstanceDisks(inst, self.cfg)
+      _ShutdownInstanceDisks(self, inst)
 
 
 class LURenameInstance(LogicalUnit):
@@ -2385,7 +2432,7 @@ class LURenameInstance(LogicalUnit):
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
     env["INSTANCE_NEW_NAME"] = self.op.new_name
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
           list(self.instance.secondary_nodes))
     return env, nl, nl
 
@@ -2403,7 +2450,8 @@ class LURenameInstance(LogicalUnit):
     if instance.status != "down":
       raise errors.OpPrereqError("Instance '%s' is marked to be up" %
                                  self.op.instance_name)
-    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
+                                         instance.hypervisor)
     if remote_info:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -2462,7 +2510,7 @@ class LURenameInstance(LogicalUnit):
                                  " Ganeti)" % (old_file_storage_dir,
                                                new_file_storage_dir))
 
-    _StartInstanceDisks(self.cfg, inst, None)
+    _StartInstanceDisks(self, inst, None)
     try:
       if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
                                           "sda", "sdb"):
@@ -2471,7 +2519,7 @@ class LURenameInstance(LogicalUnit):
                (inst.name, inst.primary_node))
         logger.Error(msg)
     finally:
-      _ShutdownInstanceDisks(inst, self.cfg)
+      _ShutdownInstanceDisks(self, inst)
 
 
 class LURemoveInstance(LogicalUnit):
@@ -2499,7 +2547,7 @@ class LURemoveInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = [self.sstore.GetMasterNode()]
+    nl = [self.cfg.GetMasterNode()]
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -2529,7 +2577,7 @@ class LURemoveInstance(LogicalUnit):
 
     logger.Info("removing block devices for instance %s" % instance.name)
 
-    if not _RemoveDisks(instance, self.cfg):
+    if not _RemoveDisks(self, instance):
       if self.op.ignore_failures:
         feedback_fn("Warning: can't remove instance's disks")
       else:
@@ -2555,11 +2603,11 @@ class LUQueryInstances(NoHooksLU):
       "admin_state", "admin_ram",
       "disk_template", "ip", "mac", "bridge",
       "sda_size", "sdb_size", "vcpus", "tags",
-      "auto_balance",
       "network_port", "kernel_path", "initrd_path",
       "hvm_boot_order", "hvm_acpi", "hvm_pae",
       "hvm_cdrom_image_path", "hvm_nic_type",
       "hvm_disk_type", "vnc_bind_address",
+      "serial_no", "hypervisor",
       ])
     _CheckOutputFields(static=self.static_fields,
                        dynamic=self.dynamic_fields,
@@ -2597,6 +2645,13 @@ class LUQueryInstances(NoHooksLU):
     all_info = self.cfg.GetAllInstancesInfo()
     if self.do_locking:
       instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+    elif self.wanted != locking.ALL_SET:
+      instance_names = self.wanted
+      missing = set(instance_names).difference(all_info.keys())
+      if missing:
+        raise errors.OpExecError(
+          "Some instances were removed before retrieving their data: %s"
+          % missing)
     else:
       instance_names = all_info.keys()
     instance_list = [all_info[iname] for iname in instance_names]
@@ -2604,11 +2659,12 @@ class LUQueryInstances(NoHooksLU):
     # begin data gathering
 
     nodes = frozenset([inst.primary_node for inst in instance_list])
+    hv_list = list(set([inst.hypervisor for inst in instance_list]))
 
     bad_nodes = []
     if self.dynamic_fields.intersection(self.op.output_fields):
       live_data = {}
-      node_data = rpc.call_all_instances_info(nodes)
+      node_data = rpc.call_all_instances_info(nodes, hv_list)
       for name in nodes:
         result = node_data[name]
         if result:
@@ -2682,6 +2738,8 @@ class LUQueryInstances(NoHooksLU):
           val = instance.vcpus
         elif field == "tags":
           val = list(instance.GetTags())
+        elif field == "serial_no":
+          val = instance.serial_no
         elif field in ("network_port", "kernel_path", "initrd_path",
                        "hvm_boot_order", "hvm_acpi", "hvm_pae",
                        "hvm_cdrom_image_path", "hvm_nic_type",
@@ -2694,6 +2752,8 @@ class LUQueryInstances(NoHooksLU):
             val = "default"
           else:
             val = "-"
+        elif field == "hypervisor":
+          val = instance.hypervisor
         else:
           raise errors.ParameterError(field)
         iout.append(val)
@@ -2730,7 +2790,7 @@ class LUFailoverInstance(LogicalUnit):
       "IGNORE_CONSISTENCY": self.op.ignore_consistency,
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
+    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -2754,8 +2814,9 @@ class LUFailoverInstance(LogicalUnit):
 
     target_node = secondary_nodes[0]
     # check memory requirements on the secondary node
-    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
-                         instance.name, instance.memory)
+    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+                         instance.name, instance.memory,
+                         instance.hypervisor)
 
     # check bridge existance
     brlist = [nic.bridge for nic in instance.nics]
@@ -2779,7 +2840,7 @@ class LUFailoverInstance(LogicalUnit):
     feedback_fn("* checking disk consistency between source and target")
     for dev in instance.disks:
       # for drbd, these are drbd over lvm
-      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
+      if not _CheckDiskConsistency(self, dev, target_node, False):
         if instance.status == "up" and not self.op.ignore_consistency:
           raise errors.OpExecError("Disk %s is degraded on target node,"
                                    " aborting failover." % dev.iv_name)
@@ -2798,7 +2859,7 @@ class LUFailoverInstance(LogicalUnit):
                                  (instance.name, source_node))
 
     feedback_fn("* deactivating the instance's disks on source node")
-    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
+    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
       raise errors.OpExecError("Can't shut down the instance's disks.")
 
     instance.primary_node = target_node
@@ -2811,20 +2872,20 @@ class LUFailoverInstance(LogicalUnit):
       logger.Info("Starting instance %s on node %s" %
                   (instance.name, target_node))
 
-      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
+      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
                                                ignore_secondaries=True)
       if not disks_ok:
-        _ShutdownInstanceDisks(instance, self.cfg)
+        _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Can't activate the instance's disks")
 
       feedback_fn("* starting the instance on the target node")
       if not rpc.call_instance_start(target_node, instance, None):
-        _ShutdownInstanceDisks(instance, self.cfg)
+        _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s." %
                                  (instance.name, target_node))
 
 
-def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
+def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
   """Create a tree of block devices on the primary node.
 
   This always creates all devices.
@@ -2832,10 +2893,10 @@ def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
   """
   if device.children:
     for child in device.children:
-      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
+      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
         return False
 
-  cfg.SetDiskID(device, node)
+  lu.cfg.SetDiskID(device, node)
   new_id = rpc.call_blockdev_create(node, device, device.size,
                                     instance.name, True, info)
   if not new_id:
@@ -2845,7 +2906,7 @@ def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
   return True
 
 
-def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
+def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
   """Create a tree of block devices on a secondary node.
 
   If this device type has to be created on secondaries, create it and
@@ -2858,13 +2919,13 @@ def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
     force = True
   if device.children:
     for child in device.children:
-      if not _CreateBlockDevOnSecondary(cfg, node, instance,
+      if not _CreateBlockDevOnSecondary(lu, node, instance,
                                         child, force, info):
         return False
 
   if not force:
     return True
-  cfg.SetDiskID(device, node)
+  lu.cfg.SetDiskID(device, node)
   new_id = rpc.call_blockdev_create(node, device, device.size,
                                     instance.name, False, info)
   if not new_id:
@@ -2874,7 +2935,7 @@ def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
   return True
 
 
-def _GenerateUniqueNames(cfg, exts):
+def _GenerateUniqueNames(lu, exts):
   """Generate a suitable LV name.
 
   This will generate a logical volume name for the given instance.
@@ -2882,29 +2943,33 @@ def _GenerateUniqueNames(cfg, exts):
   """
   results = []
   for val in exts:
-    new_id = cfg.GenerateUniqueID()
+    new_id = lu.cfg.GenerateUniqueID()
     results.append("%s%s" % (new_id, val))
   return results
 
 
-def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
+def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
+                         p_minor, s_minor):
   """Generate a drbd8 device complete with its children.
 
   """
-  port = cfg.AllocatePort()
-  vgname = cfg.GetVGName()
+  port = lu.cfg.AllocatePort()
+  vgname = lu.cfg.GetVGName()
+  shared_secret = lu.cfg.GenerateDRBDSecret()
   dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgname, names[0]))
   dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
                           logical_id=(vgname, names[1]))
   drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
-                          logical_id = (primary, secondary, port),
-                          children = [dev_data, dev_meta],
+                          logical_id=(primary, secondary, port,
+                                      p_minor, s_minor,
+                                      shared_secret),
+                          children=[dev_data, dev_meta],
                           iv_name=iv_name)
   return drbd_dev
 
 
-def _GenerateDiskTemplate(cfg, template_name,
+def _GenerateDiskTemplate(lu, template_name,
                           instance_name, primary_node,
                           secondary_nodes, disk_sz, swap_sz,
                           file_storage_dir, file_driver):
@@ -2913,14 +2978,14 @@ def _GenerateDiskTemplate(cfg, template_name,
   """
   #TODO: compute space requirements
 
-  vgname = cfg.GetVGName()
+  vgname = lu.cfg.GetVGName()
   if template_name == constants.DT_DISKLESS:
     disks = []
   elif template_name == constants.DT_PLAIN:
     if len(secondary_nodes) != 0:
       raise errors.ProgrammerError("Wrong template configuration")
 
-    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
+    names = _GenerateUniqueNames(lu, [".sda", ".sdb"])
     sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                            logical_id=(vgname, names[0]),
                            iv_name = "sda")
@@ -2932,12 +2997,18 @@ def _GenerateDiskTemplate(cfg, template_name,
     if len(secondary_nodes) != 1:
       raise errors.ProgrammerError("Wrong template configuration")
     remote_node = secondary_nodes[0]
-    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
-                                       ".sdb_data", ".sdb_meta"])
-    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
-                                         disk_sz, names[0:2], "sda")
-    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
-                                         swap_sz, names[2:4], "sdb")
+    (minor_pa, minor_pb,
+     minor_sa, minor_sb) = lu.cfg.AllocateDRBDMinor(
+      [primary_node, primary_node, remote_node, remote_node], instance_name)
+
+    names = _GenerateUniqueNames(lu, [".sda_data", ".sda_meta",
+                                      ".sdb_data", ".sdb_meta"])
+    drbd_sda_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
+                                        disk_sz, names[0:2], "sda",
+                                        minor_pa, minor_sa)
+    drbd_sdb_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
+                                        swap_sz, names[2:4], "sdb",
+                                        minor_pb, minor_sb)
     disks = [drbd_sda_dev, drbd_sdb_dev]
   elif template_name == constants.DT_FILE:
     if len(secondary_nodes) != 0:
@@ -2962,7 +3033,7 @@ def _GetInstanceInfoText(instance):
   return "originstname+%s" % instance.name
 
 
-def _CreateDisks(cfg, instance):
+def _CreateDisks(lu, instance):
   """Create all disks for an instance.
 
   This abstracts away some work from AddInstance.
@@ -2994,13 +3065,13 @@ def _CreateDisks(cfg, instance):
                 (device.iv_name, instance.name))
     #HARDCODE
     for secondary_node in instance.secondary_nodes:
-      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
+      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
                                         device, False, info):
         logger.Error("failed to create volume %s (%s) on secondary node %s!" %
                      (device.iv_name, device, secondary_node))
         return False
     #HARDCODE
-    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
+    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
                                     instance, device, info):
       logger.Error("failed to create volume %s on primary!" %
                    device.iv_name)
@@ -3009,7 +3080,7 @@ def _CreateDisks(cfg, instance):
   return True
 
 
-def _RemoveDisks(instance, cfg):
+def _RemoveDisks(lu, instance):
   """Remove all disks for an instance.
 
   This abstracts away some work from `AddInstance()` and
@@ -3029,7 +3100,7 @@ def _RemoveDisks(instance, cfg):
   result = True
   for device in instance.disks:
     for node, disk in device.ComputeNodeTree(instance.primary_node):
-      cfg.SetDiskID(disk, node)
+      lu.cfg.SetDiskID(disk, node)
       if not rpc.call_blockdev_remove(node, disk):
         logger.Error("could not remove block device %s on node %s,"
                      " continuing anyway" %
@@ -3100,19 +3171,31 @@ class LUCreateInstance(LogicalUnit):
     for attr in ["kernel_path", "initrd_path", "pnode", "snode",
                  "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
                  "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
-                 "vnc_bind_address"]:
+                 "vnc_bind_address", "hypervisor"]:
       if not hasattr(self.op, attr):
         setattr(self.op, attr, None)
 
+    # cheap checks, mostly valid constants given
+
     # verify creation mode
     if self.op.mode not in (constants.INSTANCE_CREATE,
                             constants.INSTANCE_IMPORT):
       raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
                                  self.op.mode)
+
     # disk template and mirror node verification
     if self.op.disk_template not in constants.DISK_TEMPLATES:
       raise errors.OpPrereqError("Invalid disk template name")
 
+    if self.op.hypervisor is None:
+      self.op.hypervisor = self.cfg.GetHypervisorType()
+
+    enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors
+    if self.op.hypervisor not in enabled_hvs:
+      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
+                                 " cluster (%s)" % (self.op.hypervisor,
+                                  ",".join(enabled_hvs)))
+
     #### instance parameters check
 
     # instance name verification
@@ -3205,7 +3288,7 @@ class LUCreateInstance(LogicalUnit):
              {"size": self.op.swap_size, "mode": "w"}]
     nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
              "bridge": self.op.bridge}]
-    ial = IAllocator(self.cfg, self.sstore,
+    ial = IAllocator(self.cfg,
                      mode=constants.IALLOCATOR_MODE_ALLOC,
                      name=self.op.instance_name,
                      disk_template=self.op.disk_template,
@@ -3226,7 +3309,8 @@ class LUCreateInstance(LogicalUnit):
     if len(ial.nodes) != ial.required_nodes:
       raise errors.OpPrereqError("iallocator '%s' returned invalid number"
                                  " of nodes (%s), required %s" %
-                                 (len(ial.nodes), ial.required_nodes))
+                                 (self.op.iallocator, len(ial.nodes),
+                                  ial.required_nodes))
     self.op.pnode = ial.nodes[0]
     logger.ToStdout("Selected nodes for the instance: %s" %
                     (", ".join(ial.nodes),))
@@ -3262,7 +3346,7 @@ class LUCreateInstance(LogicalUnit):
       nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
     ))
 
-    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
+    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
           self.secondaries)
     return env, nl, nl
 
@@ -3276,6 +3360,7 @@ class LUCreateInstance(LogicalUnit):
       raise errors.OpPrereqError("Cluster does not support lvm-based"
                                  " instances")
 
+
     if self.op.mode == constants.INSTANCE_IMPORT:
       src_node = self.op.src_node
       src_path = self.op.src_path
@@ -3312,7 +3397,7 @@ class LUCreateInstance(LogicalUnit):
     if self.op.ip_check:
       if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
         raise errors.OpPrereqError("IP %s of instance %s already in use" %
-                                   (self.check_ip, instance_name))
+                                   (self.check_ip, self.op.instance_name))
 
     # bridge verification
     bridge = getattr(self.op, "bridge", None)
@@ -3350,7 +3435,8 @@ class LUCreateInstance(LogicalUnit):
     # Check lv size requirements
     if req_size is not None:
       nodenames = [pnode.name] + self.secondaries
-      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                    self.op.hypervisor)
       for node in nodenames:
         info = nodeinfo.get(node, None)
         if not info:
@@ -3382,9 +3468,9 @@ class LUCreateInstance(LogicalUnit):
 
     # memory check on primary node
     if self.op.start:
-      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
+      _CheckNodeFreeMemory(self, self.pnode.name,
                            "creating instance %s" % self.op.instance_name,
-                           self.op.mem_size)
+                           self.op.mem_size, self.op.hypervisor)
 
     # hvm_cdrom_image_path verification
     if self.op.hvm_cdrom_image_path is not None:
@@ -3407,7 +3493,7 @@ class LUCreateInstance(LogicalUnit):
                                    self.op.vnc_bind_address)
 
     # Xen HVM device type checks
-    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+    if self.op.hypervisor == constants.HT_XEN_HVM:
       if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
         raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
                                    " hypervisor" % self.op.hvm_nic_type)
@@ -3436,7 +3522,7 @@ class LUCreateInstance(LogicalUnit):
     if self.inst_ip is not None:
       nic.ip = self.inst_ip
 
-    ht_kind = self.sstore.GetHypervisorType()
+    ht_kind = self.op.hypervisor
     if ht_kind in constants.HTS_REQ_PORT:
       network_port = self.cfg.AllocatePort()
     else:
@@ -3453,11 +3539,11 @@ class LUCreateInstance(LogicalUnit):
 
     # build the full file storage dir path
     file_storage_dir = os.path.normpath(os.path.join(
-                                        self.sstore.GetFileStorageDir(),
+                                        self.cfg.GetFileStorageDir(),
                                         string_file_storage_dir, instance))
 
 
-    disks = _GenerateDiskTemplate(self.cfg,
+    disks = _GenerateDiskTemplate(self,
                                   self.op.disk_template,
                                   instance, pnode_name,
                                   self.secondaries, self.op.disk_size,
@@ -3482,11 +3568,13 @@ class LUCreateInstance(LogicalUnit):
                             vnc_bind_address=self.op.vnc_bind_address,
                             hvm_nic_type=self.op.hvm_nic_type,
                             hvm_disk_type=self.op.hvm_disk_type,
+                            hypervisor=self.op.hypervisor,
                             )
 
     feedback_fn("* creating instance disks...")
-    if not _CreateDisks(self.cfg, iobj):
-      _RemoveDisks(iobj, self.cfg)
+    if not _CreateDisks(self, iobj):
+      _RemoveDisks(self, iobj)
+      self.cfg.ReleaseDRBDMinors(instance)
       raise errors.OpExecError("Device creation failed, reverting...")
 
     feedback_fn("adding instance %s to cluster config" % instance)
@@ -3495,19 +3583,21 @@ class LUCreateInstance(LogicalUnit):
     # Declare that we don't want to remove the instance lock anymore, as we've
     # added the instance to the config
     del self.remove_locks[locking.LEVEL_INSTANCE]
+    # Remove the temp. assignements for the instance's drbds
+    self.cfg.ReleaseDRBDMinors(instance)
 
     if self.op.wait_for_sync:
-      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
+      disk_abort = not _WaitForSync(self, iobj)
     elif iobj.disk_template in constants.DTS_NET_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
-      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
+      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
     else:
       disk_abort = False
 
     if disk_abort:
-      _RemoveDisks(iobj, self.cfg)
+      _RemoveDisks(self, iobj)
       self.cfg.RemoveInstance(iobj.name)
       # Make sure the instance lock gets removed
       self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
@@ -3529,8 +3619,9 @@ class LUCreateInstance(LogicalUnit):
         feedback_fn("* running the instance OS import scripts...")
         src_node = self.op.src_node
         src_image = self.src_image
+        cluster_name = self.cfg.GetClusterName()
         if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
-                                                src_node, src_image):
+                                           src_node, src_image, cluster_name):
           raise errors.OpExecError("Could not import os for instance"
                                    " %s on node %s" %
                                    (instance, pnode_name))
@@ -3577,7 +3668,8 @@ class LUConnectConsole(NoHooksLU):
     instance = self.instance
     node = instance.primary_node
 
-    node_insts = rpc.call_instance_list([node])[node]
+    node_insts = rpc.call_instance_list([node],
+                                        [instance.hypervisor])[node]
     if node_insts is False:
       raise errors.OpExecError("Can't connect to node %s." % node)
 
@@ -3586,7 +3678,7 @@ class LUConnectConsole(NoHooksLU):
 
     logger.Debug("connecting to console of %s on %s" % (instance.name, node))
 
-    hyper = hypervisor.GetHypervisor()
+    hyper = hypervisor.GetHypervisor(instance.hypervisor)
     console_cmd = hyper.GetShellCommandForConsole(instance)
 
     # build ssh cmdline
@@ -3637,7 +3729,7 @@ class LUReplaceDisks(LogicalUnit):
     """Compute a new secondary node using an IAllocator.
 
     """
-    ial = IAllocator(self.cfg, self.sstore,
+    ial = IAllocator(self.cfg,
                      mode=constants.IALLOCATOR_MODE_RELOC,
                      name=self.op.instance_name,
                      relocate_from=[self.sec_node])
@@ -3669,7 +3761,7 @@ class LUReplaceDisks(LogicalUnit):
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
     nl = [
-      self.sstore.GetMasterNode(),
+      self.cfg.GetMasterNode(),
       self.instance.primary_node,
       ]
     if self.op.remote_node is not None:
@@ -3803,7 +3895,7 @@ class LUReplaceDisks(LogicalUnit):
       if not dev.iv_name in self.op.disks:
         continue
       info("checking %s consistency on %s" % (dev.iv_name, oth_node))
-      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
+      if not _CheckDiskConsistency(self, dev, oth_node,
                                    oth_node==instance.primary_node):
         raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
                                  " to replace disks on this node (%s)" %
@@ -3817,7 +3909,7 @@ class LUReplaceDisks(LogicalUnit):
       size = dev.size
       cfg.SetDiskID(dev, tgt_node)
       lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
-      names = _GenerateUniqueNames(cfg, lv_names)
+      names = _GenerateUniqueNames(self, lv_names)
       lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                              logical_id=(vgname, names[0]))
       lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
@@ -3831,7 +3923,7 @@ class LUReplaceDisks(LogicalUnit):
       # _Create...OnPrimary (which forces the creation), even if we
       # are talking about the secondary node
       for new_lv in new_lvs:
-        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
+        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
                                         _GetInstanceInfoText(instance)):
           raise errors.OpExecError("Failed to create new LV named '%s' on"
                                    " node '%s'" %
@@ -3899,7 +3991,7 @@ class LUReplaceDisks(LogicalUnit):
     # does a combined result over all disks, so we don't check its
     # return value
     self.proc.LogStep(5, steps_total, "sync devices")
-    _WaitForSync(cfg, instance, self.proc, unlock=True)
+    _WaitForSync(self, instance, unlock=True)
 
     # so check manually all the devices
     for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
@@ -3975,7 +4067,7 @@ class LUReplaceDisks(LogicalUnit):
       if not dev.iv_name in self.op.disks:
         continue
       info("checking %s consistency on %s" % (dev.iv_name, pri_node))
-      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
+      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
         raise errors.OpExecError("Primary node (%s) has degraded storage,"
                                  " unsafe to replace the secondary" %
                                  pri_node)
@@ -3989,26 +4081,42 @@ class LUReplaceDisks(LogicalUnit):
       # _Create...OnPrimary (which forces the creation), even if we
       # are talking about the secondary node
       for new_lv in dev.children:
-        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
+        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
                                         _GetInstanceInfoText(instance)):
           raise errors.OpExecError("Failed to create new LV named '%s' on"
                                    " node '%s'" %
                                    (new_lv.logical_id[1], new_node))
 
-      iv_names[dev.iv_name] = (dev, dev.children)
 
+    # Step 4: dbrd minors and drbd setups changes
+    # after this, we must manually remove the drbd minors on both the
+    # error and the success paths
+    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
+                                   instance.name)
+    logging.debug("Allocated minors %s" % (minors,))
     self.proc.LogStep(4, steps_total, "changing drbd configuration")
-    for dev in instance.disks:
+    for dev, new_minor in zip(instance.disks, minors):
       size = dev.size
       info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
       # create new devices on new_node
+      if pri_node == dev.logical_id[0]:
+        new_logical_id = (pri_node, new_node,
+                          dev.logical_id[2], dev.logical_id[3], new_minor,
+                          dev.logical_id[5])
+      else:
+        new_logical_id = (new_node, pri_node,
+                          dev.logical_id[2], new_minor, dev.logical_id[4],
+                          dev.logical_id[5])
+      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
+      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
+                    new_logical_id)
       new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
-                              logical_id=(pri_node, new_node,
-                                          dev.logical_id[2]),
+                              logical_id=new_logical_id,
                               children=dev.children)
-      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
+      if not _CreateBlockDevOnSecondary(self, new_node, instance,
                                         new_drbd, False,
-                                      _GetInstanceInfoText(instance)):
+                                        _GetInstanceInfoText(instance)):
+        self.cfg.ReleaseDRBDMinors(instance.name)
         raise errors.OpExecError("Failed to create new DRBD on"
                                  " node '%s'" % new_node)
 
@@ -4024,9 +4132,9 @@ class LUReplaceDisks(LogicalUnit):
     done = 0
     for dev in instance.disks:
       cfg.SetDiskID(dev, pri_node)
-      # set the physical (unique in bdev terms) id to None, meaning
-      # detach from network
-      dev.physical_id = (None,) * len(dev.physical_id)
+      # set the network part of the physical (unique in bdev terms) id
+      # to None, meaning detach from network
+      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
       # and 'find' the device, which will 'fix' it to match the
       # standalone state
       if rpc.call_blockdev_find(pri_node, dev):
@@ -4037,15 +4145,19 @@ class LUReplaceDisks(LogicalUnit):
 
     if not done:
       # no detaches succeeded (very unlikely)
+      self.cfg.ReleaseDRBDMinors(instance.name)
       raise errors.OpExecError("Can't detach at least one DRBD from old node")
 
     # if we managed to detach at least one, we update all the disks of
     # the instance to point to the new secondary
     info("updating instance configuration")
-    for dev in instance.disks:
-      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
+    for dev, _, new_logical_id in iv_names.itervalues():
+      dev.logical_id = new_logical_id
       cfg.SetDiskID(dev, pri_node)
     cfg.Update(instance)
+    # we can remove now the temp minors as now the new values are
+    # written to the config file (and therefore stable)
+    self.cfg.ReleaseDRBDMinors(instance.name)
 
     # and now perform the drbd attach
     info("attaching primary drbds to new secondary (standalone => connected)")
@@ -4056,6 +4168,7 @@ class LUReplaceDisks(LogicalUnit):
       # it will automatically activate the network, if the physical_id
       # is correct
       cfg.SetDiskID(dev, pri_node)
+      logging.debug("Disk to attach: %s", dev)
       if not rpc.call_blockdev_find(pri_node, dev):
         warning("can't attach drbd %s to new secondary!" % dev.iv_name,
                 "please do a gnt-instance info to see the status of disks")
@@ -4064,17 +4177,17 @@ class LUReplaceDisks(LogicalUnit):
     # does a combined result over all disks, so we don't check its
     # return value
     self.proc.LogStep(5, steps_total, "sync devices")
-    _WaitForSync(cfg, instance, self.proc, unlock=True)
+    _WaitForSync(self, instance, unlock=True)
 
     # so check manually all the devices
-    for name, (dev, old_lvs) in iv_names.iteritems():
+    for name, (dev, old_lvs, _) in iv_names.iteritems():
       cfg.SetDiskID(dev, pri_node)
       is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
       if is_degr:
         raise errors.OpExecError("DRBD device %s is degraded!" % name)
 
     self.proc.LogStep(6, steps_total, "removing old storage")
-    for name, (dev, old_lvs) in iv_names.iteritems():
+    for name, (dev, old_lvs, _) in iv_names.iteritems():
       info("remove logical volumes for %s" % name)
       for lv in old_lvs:
         cfg.SetDiskID(lv, old_node)
@@ -4092,7 +4205,7 @@ class LUReplaceDisks(LogicalUnit):
 
     # Activate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
-      _StartInstanceDisks(self.cfg, instance, True)
+      _StartInstanceDisks(self, instance, True)
 
     if instance.disk_template == constants.DT_DRBD8:
       if self.op.remote_node is None:
@@ -4106,7 +4219,7 @@ class LUReplaceDisks(LogicalUnit):
 
     # Deactivate the instance disks if we're replacing them on a down instance
     if instance.status == "down":
-      _SafeShutdownInstanceDisks(instance, self.cfg)
+      _SafeShutdownInstanceDisks(self, instance)
 
     return ret
 
@@ -4141,7 +4254,7 @@ class LUGrowDisk(LogicalUnit):
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
     nl = [
-      self.sstore.GetMasterNode(),
+      self.cfg.GetMasterNode(),
       self.instance.primary_node,
       ]
     return env, nl, nl
@@ -4167,7 +4280,8 @@ class LUGrowDisk(LogicalUnit):
                                  (self.op.disk, instance.name))
 
     nodenames = [instance.primary_node] + list(instance.secondary_nodes)
-    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                  instance.hypervisor)
     for node in nodenames:
       info = nodeinfo.get(node, None)
       if not info:
@@ -4207,6 +4321,7 @@ class LUQueryInstanceData(NoHooksLU):
   """
   _OP_REQP = ["instances"]
   REQ_BGL = False
+
   def ExpandNames(self):
     self.needed_locks = {}
     self.share_locks = dict(((i, 1) for i in locking.LEVELS))
@@ -4289,7 +4404,8 @@ class LUQueryInstanceData(NoHooksLU):
     result = {}
     for instance in self.wanted_instances:
       remote_info = rpc.call_instance_info(instance.primary_node,
-                                                instance.name)
+                                           instance.name,
+                                           instance.hypervisor)
       if remote_info and "state" in remote_info:
         remote_state = "up"
       else:
@@ -4313,14 +4429,15 @@ class LUQueryInstanceData(NoHooksLU):
         "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
         "disks": disks,
         "vcpus": instance.vcpus,
+        "hypervisor": instance.hypervisor,
         }
 
-      htkind = self.sstore.GetHypervisorType()
-      if htkind == constants.HT_XEN_PVM30:
+      htkind = instance.hypervisor
+      if htkind == constants.HT_XEN_PVM:
         idict["kernel_path"] = instance.kernel_path
         idict["initrd_path"] = instance.initrd_path
 
-      if htkind == constants.HT_XEN_HVM31:
+      if htkind == constants.HT_XEN_HVM:
         idict["hvm_boot_order"] = instance.hvm_boot_order
         idict["hvm_acpi"] = instance.hvm_acpi
         idict["hvm_pae"] = instance.hvm_pae
@@ -4392,7 +4509,7 @@ class LUSetInstanceParams(LogicalUnit):
         mac = self.instance.nics[0].mac
       args['nics'] = [(ip, bridge, mac)]
     env = _BuildInstanceHookEnvByObject(self.instance, override=args)
-    nl = [self.sstore.GetMasterNode(),
+    nl = [self.cfg.GetMasterNode(),
           self.instance.primary_node] + list(self.instance.secondary_nodes)
     return env, nl, nl
 
@@ -4512,8 +4629,10 @@ class LUSetInstanceParams(LogicalUnit):
       pnode = self.instance.primary_node
       nodelist = [pnode]
       nodelist.extend(instance.secondary_nodes)
-      instance_info = rpc.call_instance_info(pnode, instance.name)
-      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
+      instance_info = rpc.call_instance_info(pnode, instance.name,
+                                             instance.hypervisor)
+      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
+                                    instance.hypervisor)
 
       if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
         # Assume the primary node is unreachable and go ahead
@@ -4540,7 +4659,7 @@ class LUSetInstanceParams(LogicalUnit):
                            " node %s" % node)
 
     # Xen HVM device type checks
-    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+    if instance.hypervisor == constants.HT_XEN_HVM:
       if self.op.hvm_nic_type is not None:
         if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
           raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
@@ -4688,7 +4807,7 @@ class LUExportInstance(LogicalUnit):
       "EXPORT_DO_SHUTDOWN": self.op.shutdown,
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
+    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
           self.op.target_node]
     return env, nl, nl
 
@@ -4751,13 +4870,15 @@ class LUExportInstance(LogicalUnit):
     finally:
       if self.op.shutdown and instance.status == "up":
         if not rpc.call_instance_start(src_node, instance, None):
-          _ShutdownInstanceDisks(instance, self.cfg)
+          _ShutdownInstanceDisks(self, instance)
           raise errors.OpExecError("Could not start instance")
 
     # TODO: check for size
 
+    cluster_name = self.cfg.GetClusterName()
     for dev in snap_disks:
-      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
+      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
+                                      instance, cluster_name):
         logger.Error("could not export block device %s from node %s to node %s"
                      % (dev.logical_id[1], src_node, dst_node.name))
       if not rpc.call_blockdev_remove(src_node, dev):
@@ -4835,26 +4956,34 @@ class TagsLU(NoHooksLU):
   This is an abstract class which is the parent of all the other tags LUs.
 
   """
-  def CheckPrereq(self):
-    """Check prerequisites.
 
-    """
-    if self.op.kind == constants.TAG_CLUSTER:
-      self.target = self.cfg.GetClusterInfo()
-    elif self.op.kind == constants.TAG_NODE:
+  def ExpandNames(self):
+    self.needed_locks = {}
+    if self.op.kind == constants.TAG_NODE:
       name = self.cfg.ExpandNodeName(self.op.name)
       if name is None:
         raise errors.OpPrereqError("Invalid node name (%s)" %
                                    (self.op.name,))
       self.op.name = name
-      self.target = self.cfg.GetNodeInfo(name)
+      self.needed_locks[locking.LEVEL_NODE] = name
     elif self.op.kind == constants.TAG_INSTANCE:
       name = self.cfg.ExpandInstanceName(self.op.name)
       if name is None:
         raise errors.OpPrereqError("Invalid instance name (%s)" %
                                    (self.op.name,))
       self.op.name = name
-      self.target = self.cfg.GetInstanceInfo(name)
+      self.needed_locks[locking.LEVEL_INSTANCE] = name
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    if self.op.kind == constants.TAG_CLUSTER:
+      self.target = self.cfg.GetClusterInfo()
+    elif self.op.kind == constants.TAG_NODE:
+      self.target = self.cfg.GetNodeInfo(self.op.name)
+    elif self.op.kind == constants.TAG_INSTANCE:
+      self.target = self.cfg.GetInstanceInfo(self.op.name)
     else:
       raise errors.OpPrereqError("Wrong tag type requested (%s)" %
                                  str(self.op.kind))
@@ -4865,6 +4994,7 @@ class LUGetTags(TagsLU):
 
   """
   _OP_REQP = ["kind", "name"]
+  REQ_BGL = False
 
   def Exec(self, feedback_fn):
     """Returns the tag list.
@@ -4878,6 +5008,10 @@ class LUSearchTags(NoHooksLU):
 
   """
   _OP_REQP = ["pattern"]
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -4897,9 +5031,9 @@ class LUSearchTags(NoHooksLU):
     """
     cfg = self.cfg
     tgts = [("/cluster", cfg.GetClusterInfo())]
-    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
+    ilist = cfg.GetAllInstancesInfo().values()
     tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
-    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
+    nlist = cfg.GetAllNodesInfo().values()
     tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
     results = []
     for path, target in tgts:
@@ -4914,6 +5048,7 @@ class LUAddTags(TagsLU):
 
   """
   _OP_REQP = ["kind", "name", "tags"]
+  REQ_BGL = False
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -4947,6 +5082,7 @@ class LUDelTags(TagsLU):
 
   """
   _OP_REQP = ["kind", "name", "tags"]
+  REQ_BGL = False
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -5030,7 +5166,7 @@ class IAllocator(object):
   """IAllocator framework.
 
   An IAllocator instance has three sets of attributes:
-    - cfg/sstore that are needed to query the cluster
+    - cfg that is needed to query the cluster
     - input data (all members of the _KEYS class attribute are required)
     - four buffer attributes (in|out_data|text), that represent the
       input (to the external script) in text and data structure format,
@@ -5047,9 +5183,8 @@ class IAllocator(object):
     "relocate_from",
     ]
 
-  def __init__(self, cfg, sstore, mode, name, **kwargs):
+  def __init__(self, cfg, mode, name, **kwargs):
     self.cfg = cfg
-    self.sstore = sstore
     # init buffer variables
     self.in_text = self.out_text = self.in_data = self.out_data = None
     # init all input fields so that pylint is happy
@@ -5087,12 +5222,13 @@ class IAllocator(object):
 
     """
     cfg = self.cfg
+    cluster_info = cfg.GetClusterInfo()
     # cluster data
     data = {
       "version": 1,
-      "cluster_name": self.sstore.GetClusterName(),
-      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
-      "hypervisor_type": self.sstore.GetHypervisorType(),
+      "cluster_name": self.cfg.GetClusterName(),
+      "cluster_tags": list(cluster_info.GetTags()),
+      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
       # we don't have job IDs
       }
 
@@ -5101,7 +5237,10 @@ class IAllocator(object):
     # node data
     node_results = {}
     node_list = cfg.GetNodeList()
-    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
+    # FIXME: here we have only one hypervisor information, but
+    # instance can belong to different hypervisors
+    node_data = rpc.call_node_info(node_list, cfg.GetVGName(),
+                                   cfg.GetHypervisorType())
     for nname in node_list:
       ninfo = cfg.GetNodeInfo(nname)
       if nname not in node_data or not isinstance(node_data[nname], dict):
@@ -5157,6 +5296,7 @@ class IAllocator(object):
         "nics": nic_data,
         "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
         "disk_template": iinfo.disk_template,
+        "hypervisor": iinfo.hypervisor,
         }
       instance_data[iinfo.name] = pir
 
@@ -5255,7 +5395,7 @@ class IAllocator(object):
     """
     data = self.in_text
 
-    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
+    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
 
     if not isinstance(result, (list, tuple)) or len(result) != 4:
       raise errors.OpExecError("Invalid result from master iallocator runner")
@@ -5368,7 +5508,7 @@ class LUTestAllocator(NoHooksLU):
 
     """
     if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
-      ial = IAllocator(self.cfg, self.sstore,
+      ial = IAllocator(self.cfg,
                        mode=self.op.mode,
                        name=self.op.name,
                        mem_size=self.op.mem_size,
@@ -5380,7 +5520,7 @@ class LUTestAllocator(NoHooksLU):
                        vcpus=self.op.vcpus,
                        )
     else:
-      ial = IAllocator(self.cfg, self.sstore,
+      ial = IAllocator(self.cfg,
                        mode=self.op.mode,
                        name=self.op.name,
                        relocate_from=list(self.relocate_from),