Add gnt-backup remove functionality
[ganeti-local] / lib / cmdlib.py
index 014290f..cdac3d0 100644 (file)
@@ -1,7 +1,7 @@
-#!/usr/bin/python
+#
 #
 
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -26,7 +26,6 @@
 import os
 import os.path
 import sha
-import socket
 import time
 import tempfile
 import re
@@ -43,6 +42,8 @@ from ganeti import constants
 from ganeti import objects
 from ganeti import opcodes
 from ganeti import ssconf
+from ganeti import serializer
+
 
 class LogicalUnit(object):
   """Logical Unit base class.
@@ -70,10 +71,12 @@ class LogicalUnit(object):
     validity.
 
     """
-    self.processor = processor
+    self.proc = processor
     self.op = op
     self.cfg = cfg
     self.sstore = sstore
+    self.__ssh = None
+
     for attr_name in self._OP_REQP:
       attr_val = getattr(op, attr_name, None)
       if attr_val is None:
@@ -85,10 +88,20 @@ class LogicalUnit(object):
                                    " use 'gnt-cluster init' first.")
       if self.REQ_MASTER:
         master = sstore.GetMasterNode()
-        if master != socket.gethostname():
+        if master != utils.HostInfo().name:
           raise errors.OpPrereqError("Commands must be run on the master"
                                      " node %s" % master)
 
+  def __GetSSH(self):
+    """Returns the SshRunner object
+
+    """
+    if not self.__ssh:
+      self.__ssh = ssh.SshRunner(self.sstore)
+    return self.__ssh
+
+  ssh = property(fget=__GetSSH)
+
   def CheckPrereq(self):
     """Check prerequisites for this LU.
 
@@ -161,31 +174,72 @@ class NoHooksLU(LogicalUnit):
     This is a no-op, since we don't run hooks.
 
     """
-    return
+    return {}, [], []
+
+
+def _AddHostToEtcHosts(hostname):
+  """Wrapper around utils.SetEtcHostsEntry.
+
+  """
+  hi = utils.HostInfo(name=hostname)
+  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
+
+
+def _RemoveHostFromEtcHosts(hostname):
+  """Wrapper around utils.RemoveEtcHostsEntry.
+
+  """
+  hi = utils.HostInfo(name=hostname)
+  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
+  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
 
 
 def _GetWantedNodes(lu, nodes):
-  """Returns list of checked and expanded nodes.
+  """Returns list of checked and expanded node names.
 
   Args:
     nodes: List of nodes (strings) or None for all
 
   """
-  if nodes is not None and not isinstance(nodes, list):
+  if not isinstance(nodes, list):
     raise errors.OpPrereqError("Invalid argument type 'nodes'")
 
   if nodes:
-    wanted_nodes = []
+    wanted = []
 
     for name in nodes:
-      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
+      node = lu.cfg.ExpandNodeName(name)
       if node is None:
         raise errors.OpPrereqError("No such node name '%s'" % name)
-    wanted_nodes.append(node)
+      wanted.append(node)
 
-    return wanted_nodes
   else:
-    return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
+    wanted = lu.cfg.GetNodeList()
+  return utils.NiceSort(wanted)
+
+
+def _GetWantedInstances(lu, instances):
+  """Returns list of checked and expanded instance names.
+
+  Args:
+    instances: List of instances (strings) or None for all
+
+  """
+  if not isinstance(instances, list):
+    raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+  if instances:
+    wanted = []
+
+    for name in instances:
+      instance = lu.cfg.ExpandInstanceName(name)
+      if instance is None:
+        raise errors.OpPrereqError("No such instance name '%s'" % name)
+      wanted.append(instance)
+
+  else:
+    wanted = lu.cfg.GetInstanceList()
+  return utils.NiceSort(wanted)
 
 
 def _CheckOutputFields(static, dynamic, selected):
@@ -215,6 +269,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     secondary_nodes: List of secondary nodes as strings
   """
   env = {
+    "OP_TARGET": name,
     "INSTANCE_NAME": name,
     "INSTANCE_PRIMARY": primary_node,
     "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
@@ -226,11 +281,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   if nics:
     nic_count = len(nics)
-    for idx, (ip, bridge) in enumerate(nics):
+    for idx, (ip, bridge, mac) in enumerate(nics):
       if ip is None:
         ip = ""
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
+      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
   else:
     nic_count = 0
 
@@ -254,176 +310,13 @@ def _BuildInstanceHookEnvByObject(instance, override=None):
     'status': instance.os,
     'memory': instance.memory,
     'vcpus': instance.vcpus,
-    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
+    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
   }
   if override:
     args.update(override)
   return _BuildInstanceHookEnv(**args)
 
 
-def _UpdateEtcHosts(fullnode, ip):
-  """Ensure a node has a correct entry in /etc/hosts.
-
-  Args:
-    fullnode - Fully qualified domain name of host. (str)
-    ip       - IPv4 address of host (str)
-
-  """
-  node = fullnode.split(".", 1)[0]
-
-  f = open('/etc/hosts', 'r+')
-
-  inthere = False
-
-  save_lines = []
-  add_lines = []
-  removed = False
-
-  while True:
-    rawline = f.readline()
-
-    if not rawline:
-      # End of file
-      break
-
-    line = rawline.split('\n')[0]
-
-    # Strip off comments
-    line = line.split('#')[0]
-
-    if not line:
-      # Entire line was comment, skip
-      save_lines.append(rawline)
-      continue
-
-    fields = line.split()
-
-    haveall = True
-    havesome = False
-    for spec in [ ip, fullnode, node ]:
-      if spec not in fields:
-        haveall = False
-      if spec in fields:
-        havesome = True
-
-    if haveall:
-      inthere = True
-      save_lines.append(rawline)
-      continue
-
-    if havesome and not haveall:
-      # Line (old, or manual?) which is missing some.  Remove.
-      removed = True
-      continue
-
-    save_lines.append(rawline)
-
-  if not inthere:
-    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
-
-  if removed:
-    if add_lines:
-      save_lines = save_lines + add_lines
-
-    # We removed a line, write a new file and replace old.
-    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
-    newfile = os.fdopen(fd, 'w')
-    newfile.write(''.join(save_lines))
-    newfile.close()
-    os.rename(tmpname, '/etc/hosts')
-
-  elif add_lines:
-    # Simply appending a new line will do the trick.
-    f.seek(0, 2)
-    for add in add_lines:
-      f.write(add)
-
-  f.close()
-
-
-def _UpdateKnownHosts(fullnode, ip, pubkey):
-  """Ensure a node has a correct known_hosts entry.
-
-  Args:
-    fullnode - Fully qualified domain name of host. (str)
-    ip       - IPv4 address of host (str)
-    pubkey   - the public key of the cluster
-
-  """
-  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
-    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
-  else:
-    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
-
-  inthere = False
-
-  save_lines = []
-  add_lines = []
-  removed = False
-
-  while True:
-    rawline = f.readline()
-    logger.Debug('read %s' % (repr(rawline),))
-
-    if not rawline:
-      # End of file
-      break
-
-    line = rawline.split('\n')[0]
-
-    parts = line.split(' ')
-    fields = parts[0].split(',')
-    key = parts[2]
-
-    haveall = True
-    havesome = False
-    for spec in [ ip, fullnode ]:
-      if spec not in fields:
-        haveall = False
-      if spec in fields:
-        havesome = True
-
-    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
-    if haveall and key == pubkey:
-      inthere = True
-      save_lines.append(rawline)
-      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
-      continue
-
-    if havesome and (not haveall or key != pubkey):
-      removed = True
-      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
-      continue
-
-    save_lines.append(rawline)
-
-  if not inthere:
-    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
-    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
-
-  if removed:
-    save_lines = save_lines + add_lines
-
-    # Write a new file and replace old.
-    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
-                                   constants.DATA_DIR)
-    newfile = os.fdopen(fd, 'w')
-    try:
-      newfile.write(''.join(save_lines))
-    finally:
-      newfile.close()
-    logger.Debug("Wrote new known_hosts.")
-    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
-
-  elif add_lines:
-    # Simply appending a new line will do the trick.
-    f.seek(0, 2)
-    for add in add_lines:
-      f.write(add)
-
-  f.close()
-
-
 def _HasValidVG(vglist, vgname):
   """Checks if the volume group list is valid.
 
@@ -451,24 +344,23 @@ def _InitSSHSetup(node):
     node: the name of this host as a fqdn
 
   """
-  if os.path.exists('/root/.ssh/id_dsa'):
-    utils.CreateBackup('/root/.ssh/id_dsa')
-  if os.path.exists('/root/.ssh/id_dsa.pub'):
-    utils.CreateBackup('/root/.ssh/id_dsa.pub')
+  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
 
-  utils.RemoveFile('/root/.ssh/id_dsa')
-  utils.RemoveFile('/root/.ssh/id_dsa.pub')
+  for name in priv_key, pub_key:
+    if os.path.exists(name):
+      utils.CreateBackup(name)
+    utils.RemoveFile(name)
 
   result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
-                         "-f", "/root/.ssh/id_dsa",
+                         "-f", priv_key,
                          "-q", "-N", ""])
   if result.failed:
     raise errors.OpExecError("Could not generate ssh keypair, error %s" %
                              result.output)
 
-  f = open('/root/.ssh/id_dsa.pub', 'r')
+  f = open(pub_key, 'r')
   try:
-    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
+    utils.AddAuthorizedKey(auth_keys, f.read(8192))
   finally:
     f.close()
 
@@ -504,14 +396,26 @@ def _InitGanetiServerSetup(ss):
                              (result.cmd, result.exit_code, result.output))
 
 
+def _CheckInstanceBridgesExist(instance):
+  """Check that the brigdes needed by an instance exist.
+
+  """
+  # check bridges existance
+  brlist = [nic.bridge for nic in instance.nics]
+  if not rpc.call_bridges_exist(instance.primary_node, brlist):
+    raise errors.OpPrereqError("one or more target bridges %s does not"
+                               " exist on destination node '%s'" %
+                               (brlist, instance.primary_node))
+
+
 class LUInitCluster(LogicalUnit):
   """Initialise the cluster.
 
   """
   HPATH = "cluster-init"
   HTYPE = constants.HTYPE_CLUSTER
-  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
-              "def_bridge", "master_netdev"]
+  _OP_REQP = ["cluster_name", "hypervisor_type", "mac_prefix",
+              "def_bridge", "master_netdev", "file_storage_dir"]
   REQ_CLUSTER = False
 
   def BuildHooksEnv(self):
@@ -521,11 +425,8 @@ class LUInitCluster(LogicalUnit):
     ourselves in the post-run node list.
 
     """
-    env = {
-      "CLUSTER": self.op.cluster_name,
-      "MASTER": self.hostname['hostname_full'],
-      }
-    return env, [], [self.hostname['hostname_full']]
+    env = {"OP_TARGET": self.op.cluster_name}
+    return env, [], [self.hostname.name]
 
   def CheckPrereq(self):
     """Verify that the passed name is a valid one.
@@ -534,58 +435,77 @@ class LUInitCluster(LogicalUnit):
     if config.ConfigWriter.IsCluster():
       raise errors.OpPrereqError("Cluster is already initialised")
 
-    hostname_local = socket.gethostname()
-    self.hostname = hostname = utils.LookupHostname(hostname_local)
-    if not hostname:
-      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
-                                 hostname_local)
+    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
+      if not os.path.exists(constants.VNC_PASSWORD_FILE):
+        raise errors.OpPrereqError("Please prepare the cluster VNC"
+                                   "password file %s" %
+                                   constants.VNC_PASSWORD_FILE)
 
-    if hostname["hostname_full"] != hostname_local:
-      raise errors.OpPrereqError("My own hostname (%s) does not match the"
-                                 " resolver (%s): probably not using FQDN"
-                                 " for hostname." %
-                                 (hostname_local, hostname["hostname_full"]))
+    self.hostname = hostname = utils.HostInfo()
 
-    if hostname["ip"].startswith("127."):
+    if hostname.ip.startswith("127."):
       raise errors.OpPrereqError("This host's IP resolves to the private"
-                                 " range (%s). Please fix DNS or /etc/hosts." %
-                                 (hostname["ip"],))
+                                 " range (%s). Please fix DNS or %s." %
+                                 (hostname.ip, constants.ETC_HOSTS))
 
-    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
-    if not clustername:
-      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
-                                 % self.op.cluster_name)
-
-    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
-    if result.failed:
+    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
+                         source=constants.LOCALHOST_IP_ADDRESS):
       raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                                  " to %s,\nbut this ip address does not"
                                  " belong to this host."
-                                 " Aborting." % hostname['ip'])
+                                 " Aborting." % hostname.ip)
+
+    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
+
+    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
+                     timeout=5):
+      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
 
     secondary_ip = getattr(self.op, "secondary_ip", None)
     if secondary_ip and not utils.IsValidIP(secondary_ip):
       raise errors.OpPrereqError("Invalid secondary ip given")
-    if secondary_ip and secondary_ip != hostname['ip']:
-      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
-      if result.failed:
-        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
-                                   "but it does not belong to this host." %
-                                   secondary_ip)
+    if (secondary_ip and
+        secondary_ip != hostname.ip and
+        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
+                           source=constants.LOCALHOST_IP_ADDRESS))):
+      raise errors.OpPrereqError("You gave %s as secondary IP,"
+                                 " but it does not belong to this host." %
+                                 secondary_ip)
     self.secondary_ip = secondary_ip
 
-    # checks presence of the volume group given
-    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
+    if not hasattr(self.op, "vg_name"):
+      self.op.vg_name = None
+    # if vg_name not None, checks if volume group is valid
+    if self.op.vg_name:
+      vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
+      if vgstatus:
+        raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
+                                   " you are not using lvm" % vgstatus)
+
+    self.op.file_storage_dir = os.path.normpath(self.op.file_storage_dir)
+
+    if not os.path.isabs(self.op.file_storage_dir):
+      raise errors.OpPrereqError("The file storage directory you have is"
+                                 " not an absolute path.")
+
+    if not os.path.exists(self.op.file_storage_dir):
+      try:
+        os.makedirs(self.op.file_storage_dir, 0750)
+      except OSError, err:
+        raise errors.OpPrereqError("Cannot create file storage directory"
+                                   " '%s': %s" %
+                                   (self.op.file_storage_dir, err))
 
-    if vgstatus:
-      raise errors.OpPrereqError("Error: %s" % vgstatus)
+    if not os.path.isdir(self.op.file_storage_dir):
+      raise errors.OpPrereqError("The file storage directory '%s' is not"
+                                 " a directory." % self.op.file_storage_dir)
 
     if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
                     self.op.mac_prefix):
       raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
                                  self.op.mac_prefix)
 
-    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
+    if self.op.hypervisor_type not in constants.HYPER_TYPES:
       raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
                                  self.op.hypervisor_type)
 
@@ -595,6 +515,11 @@ class LUInitCluster(LogicalUnit):
                                  (self.op.master_netdev,
                                   result.output.strip()))
 
+    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
+            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
+      raise errors.OpPrereqError("Init.d script '%s' missing or not"
+                                 " executable." % constants.NODE_INITD_SCRIPT)
+
   def Exec(self, feedback_fn):
     """Initialize the cluster.
 
@@ -603,44 +528,39 @@ class LUInitCluster(LogicalUnit):
     hostname = self.hostname
 
     # set up the simple store
-    ss = ssconf.SimpleStore()
+    self.sstore = ss = ssconf.SimpleStore()
     ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
-    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
-    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
+    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
+    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
     ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
-    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
+    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
+    ss.SetKey(ss.SS_FILE_STORAGE_DIR, self.op.file_storage_dir)
 
     # set up the inter-node password and certificate
     _InitGanetiServerSetup(ss)
 
     # start the master ip
-    rpc.call_node_start_master(hostname['hostname_full'])
+    rpc.call_node_start_master(hostname.name)
 
     # set up ssh config and /etc/hosts
-    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
+    f = open(constants.SSH_HOST_RSA_PUB, 'r')
     try:
       sshline = f.read()
     finally:
       f.close()
     sshkey = sshline.split(" ")[1]
 
-    _UpdateEtcHosts(hostname['hostname_full'],
-                    hostname['ip'],
-                    )
-
-    _UpdateKnownHosts(hostname['hostname_full'],
-                      hostname['ip'],
-                      sshkey,
-                      )
-
-    _InitSSHSetup(hostname['hostname'])
+    _AddHostToEtcHosts(hostname.name)
+    _InitSSHSetup(hostname.name)
 
     # init of cluster config file
-    cfgw = config.ConfigWriter()
-    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
+    self.cfg = cfgw = config.ConfigWriter()
+    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
                     sshkey, self.op.mac_prefix,
                     self.op.vg_name, self.op.def_bridge)
 
+    ssh.WriteKnownHostsFile(cfgw, ss, constants.SSH_KNOWN_HOSTS_FILE)
+
 
 class LUDestroyCluster(NoHooksLU):
   """Logical unit for destroying the cluster.
@@ -671,16 +591,20 @@ class LUDestroyCluster(NoHooksLU):
     """Destroys the cluster.
 
     """
-    utils.CreateBackup('/root/.ssh/id_dsa')
-    utils.CreateBackup('/root/.ssh/id_dsa.pub')
-    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
+    master = self.sstore.GetMasterNode()
+    if not rpc.call_node_stop_master(master):
+      raise errors.OpExecError("Could not disable the master role")
+    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
+    utils.CreateBackup(priv_key)
+    utils.CreateBackup(pub_key)
+    rpc.call_node_leave_cluster(master)
 
 
 class LUVerifyCluster(NoHooksLU):
   """Verifies the cluster status.
 
   """
-  _OP_REQP = []
+  _OP_REQP = ["skip_checks"]
 
   def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                   remote_version, feedback_fn):
@@ -701,7 +625,7 @@ class LUVerifyCluster(NoHooksLU):
     # compares ganeti version
     local_version = constants.PROTOCOL_VERSION
     if not remote_version:
-      feedback_fn(" - ERROR: connection to %s failed" % (node))
+      feedback_fn("  - ERROR: connection to %s failed" % (node))
       return True
 
     if local_version != remote_version:
@@ -752,7 +676,8 @@ class LUVerifyCluster(NoHooksLU):
       feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
     return bad
 
-  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
+  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
+                      node_instance, feedback_fn):
     """Verify an instance.
 
     This function checks to see if the required block devices are
@@ -761,13 +686,6 @@ class LUVerifyCluster(NoHooksLU):
     """
     bad = False
 
-    instancelist = self.cfg.GetInstanceList()
-    if not instance in instancelist:
-      feedback_fn("  - ERROR: instance %s not in instance list %s" %
-                      (instance, instancelist))
-      bad = True
-
-    instanceconfig = self.cfg.GetInstanceInfo(instance)
     node_current = instanceconfig.primary_node
 
     node_vol_should = {}
@@ -781,7 +699,8 @@ class LUVerifyCluster(NoHooksLU):
           bad = True
 
     if not instanceconfig.status == 'down':
-      if not instance in node_instance[node_current]:
+      if (node_current not in node_instance or
+          not instance in node_instance[node_current]):
         feedback_fn("  - ERROR: instance %s not running on node %s" %
                         (instance, node_current))
         bad = True
@@ -793,7 +712,7 @@ class LUVerifyCluster(NoHooksLU):
                           (instance, node))
           bad = True
 
-    return not bad
+    return bad
 
   def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
     """Verify if there are any unknown volumes in the cluster.
@@ -827,13 +746,44 @@ class LUVerifyCluster(NoHooksLU):
           bad = True
     return bad
 
+  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
+    """Verify N+1 Memory Resilience.
+
+    Check that if one single node dies we can still start all the instances it
+    was primary for.
+
+    """
+    bad = False
+
+    for node, nodeinfo in node_info.iteritems():
+      # This code checks that every node which is now listed as secondary has
+      # enough memory to host all instances it is supposed to should a single
+      # other node in the cluster fail.
+      # FIXME: not ready for failover to an arbitrary node
+      # FIXME: does not support file-backed instances
+      # WARNING: we currently take into account down instances as well as up
+      # ones, considering that even if they're down someone might want to start
+      # them even in the event of a node failure.
+      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
+        needed_mem = 0
+        for instance in instances:
+          needed_mem += instance_cfg[instance].memory
+        if nodeinfo['mfree'] < needed_mem:
+          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
+                      " failovers should node %s fail" % (node, prinode))
+          bad = True
+    return bad
+
   def CheckPrereq(self):
     """Check prerequisites.
 
-    This has no prerequisites.
+    Transform the list of checks we're going to skip into a set and check that
+    all its members are valid.
 
     """
-    pass
+    self.skip_set = frozenset(self.op.skip_checks)
+    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
+      raise errors.OpPrereqError("Invalid checks to be skipped specified")
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster, performing various test on nodes.
@@ -841,14 +791,17 @@ class LUVerifyCluster(NoHooksLU):
     """
     bad = False
     feedback_fn("* Verifying global settings")
-    self.cfg.VerifyConfig()
+    for msg in self.cfg.VerifyConfig():
+      feedback_fn("  - ERROR: %s" % msg)
 
-    master = self.sstore.GetMasterNode()
     vg_name = self.cfg.GetVGName()
     nodelist = utils.NiceSort(self.cfg.GetNodeList())
     instancelist = utils.NiceSort(self.cfg.GetInstanceList())
+    i_non_redundant = [] # Non redundant instances
     node_volume = {}
     node_instance = {}
+    node_info = {}
+    instance_cfg = {}
 
     # FIXME: verify OS list
     # do local checksums
@@ -868,6 +821,7 @@ class LUVerifyCluster(NoHooksLU):
       }
     all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
     all_rversion = rpc.call_version(nodelist)
+    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
 
     for node in nodelist:
       feedback_fn("* Verifying node %s" % node)
@@ -879,12 +833,17 @@ class LUVerifyCluster(NoHooksLU):
       # node_volume
       volumeinfo = all_volumeinfo[node]
 
-      if type(volumeinfo) != dict:
+      if isinstance(volumeinfo, basestring):
+        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
+                    (node, volumeinfo[-400:].encode('string_escape')))
+        bad = True
+        node_volume[node] = {}
+      elif not isinstance(volumeinfo, dict):
         feedback_fn("  - ERROR: connection to %s failed" % (node,))
         bad = True
         continue
-
-      node_volume[node] = volumeinfo
+      else:
+        node_volume[node] = volumeinfo
 
       # node_instance
       nodeinstance = all_instanceinfo[node]
@@ -895,18 +854,74 @@ class LUVerifyCluster(NoHooksLU):
 
       node_instance[node] = nodeinstance
 
+      # node_info
+      nodeinfo = all_ninfo[node]
+      if not isinstance(nodeinfo, dict):
+        feedback_fn("  - ERROR: connection to %s failed" % (node,))
+        bad = True
+        continue
+
+      try:
+        node_info[node] = {
+          "mfree": int(nodeinfo['memory_free']),
+          "dfree": int(nodeinfo['vg_free']),
+          "pinst": [],
+          "sinst": [],
+          # dictionary holding all instances this node is secondary for,
+          # grouped by their primary node. Each key is a cluster node, and each
+          # value is a list of instances which have the key as primary and the
+          # current node as secondary.  this is handy to calculate N+1 memory
+          # availability if you can only failover from a primary to its
+          # secondary.
+          "sinst-by-pnode": {},
+        }
+      except ValueError:
+        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
+        bad = True
+        continue
+
     node_vol_should = {}
 
     for instance in instancelist:
       feedback_fn("* Verifying instance %s" % instance)
-      result =  self._VerifyInstance(instance, node_volume, node_instance,
-                                     feedback_fn)
-      bad = bad or result
-
       inst_config = self.cfg.GetInstanceInfo(instance)
+      result =  self._VerifyInstance(instance, inst_config, node_volume,
+                                     node_instance, feedback_fn)
+      bad = bad or result
 
       inst_config.MapLVsByNode(node_vol_should)
 
+      instance_cfg[instance] = inst_config
+
+      pnode = inst_config.primary_node
+      if pnode in node_info:
+        node_info[pnode]['pinst'].append(instance)
+      else:
+        feedback_fn("  - ERROR: instance %s, connection to primary node"
+                    " %s failed" % (instance, pnode))
+        bad = True
+
+      # If the instance is non-redundant we cannot survive losing its primary
+      # node, so we are not N+1 compliant. On the other hand we have no disk
+      # templates with more than one secondary so that situation is not well
+      # supported either.
+      # FIXME: does not support file-backed instances
+      if len(inst_config.secondary_nodes) == 0:
+        i_non_redundant.append(instance)
+      elif len(inst_config.secondary_nodes) > 1:
+        feedback_fn("  - WARNING: multiple secondaries for instance %s"
+                    % instance)
+
+      for snode in inst_config.secondary_nodes:
+        if snode in node_info:
+          node_info[snode]['sinst'].append(instance)
+          if pnode not in node_info[snode]['sinst-by-pnode']:
+            node_info[snode]['sinst-by-pnode'][pnode] = []
+          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
+        else:
+          feedback_fn("  - ERROR: instance %s, connection to secondary node"
+                      " %s failed" % (instance, snode))
+
     feedback_fn("* Verifying orphan volumes")
     result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
                                        feedback_fn)
@@ -917,10 +932,244 @@ class LUVerifyCluster(NoHooksLU):
                                          feedback_fn)
     bad = bad or result
 
+    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
+      feedback_fn("* Verifying N+1 Memory redundancy")
+      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
+      bad = bad or result
+
+    feedback_fn("* Other Notes")
+    if i_non_redundant:
+      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
+                  % len(i_non_redundant))
+
     return int(bad)
 
 
-def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
+class LUVerifyDisks(NoHooksLU):
+  """Verifies the cluster disks status.
+
+  """
+  _OP_REQP = []
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This has no prerequisites.
+
+    """
+    pass
+
+  def Exec(self, feedback_fn):
+    """Verify integrity of cluster disks.
+
+    """
+    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
+
+    vg_name = self.cfg.GetVGName()
+    nodes = utils.NiceSort(self.cfg.GetNodeList())
+    instances = [self.cfg.GetInstanceInfo(name)
+                 for name in self.cfg.GetInstanceList()]
+
+    nv_dict = {}
+    for inst in instances:
+      inst_lvs = {}
+      if (inst.status != "up" or
+          inst.disk_template not in constants.DTS_NET_MIRROR):
+        continue
+      inst.MapLVsByNode(inst_lvs)
+      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
+      for node, vol_list in inst_lvs.iteritems():
+        for vol in vol_list:
+          nv_dict[(node, vol)] = inst
+
+    if not nv_dict:
+      return result
+
+    node_lvs = rpc.call_volume_list(nodes, vg_name)
+
+    to_act = set()
+    for node in nodes:
+      # node_volume
+      lvs = node_lvs[node]
+
+      if isinstance(lvs, basestring):
+        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
+        res_nlvm[node] = lvs
+      elif not isinstance(lvs, dict):
+        logger.Info("connection to node %s failed or invalid data returned" %
+                    (node,))
+        res_nodes.append(node)
+        continue
+
+      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
+        inst = nv_dict.pop((node, lv_name), None)
+        if (not lv_online and inst is not None
+            and inst.name not in res_instances):
+          res_instances.append(inst.name)
+
+    # any leftover items in nv_dict are missing LVs, let's arrange the
+    # data better
+    for key, inst in nv_dict.iteritems():
+      if inst.name not in res_missing:
+        res_missing[inst.name] = []
+      res_missing[inst.name].append(key)
+
+    return result
+
+
+class LURenameCluster(LogicalUnit):
+  """Rename the cluster.
+
+  """
+  HPATH = "cluster-rename"
+  HTYPE = constants.HTYPE_CLUSTER
+  _OP_REQP = ["name"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    env = {
+      "OP_TARGET": self.sstore.GetClusterName(),
+      "NEW_NAME": self.op.name,
+      }
+    mn = self.sstore.GetMasterNode()
+    return env, [mn], [mn]
+
+  def CheckPrereq(self):
+    """Verify that the passed name is a valid one.
+
+    """
+    hostname = utils.HostInfo(self.op.name)
+
+    new_name = hostname.name
+    self.ip = new_ip = hostname.ip
+    old_name = self.sstore.GetClusterName()
+    old_ip = self.sstore.GetMasterIP()
+    if new_name == old_name and new_ip == old_ip:
+      raise errors.OpPrereqError("Neither the name nor the IP address of the"
+                                 " cluster has changed")
+    if new_ip != old_ip:
+      result = utils.RunCmd(["fping", "-q", new_ip])
+      if not result.failed:
+        raise errors.OpPrereqError("The given cluster IP address (%s) is"
+                                   " reachable on the network. Aborting." %
+                                   new_ip)
+
+    self.op.name = new_name
+
+  def Exec(self, feedback_fn):
+    """Rename the cluster.
+
+    """
+    clustername = self.op.name
+    ip = self.ip
+    ss = self.sstore
+
+    # shutdown the master IP
+    master = ss.GetMasterNode()
+    if not rpc.call_node_stop_master(master):
+      raise errors.OpExecError("Could not disable the master role")
+
+    try:
+      # modify the sstore
+      ss.SetKey(ss.SS_MASTER_IP, ip)
+      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
+
+      # Distribute updated ss config to all nodes
+      myself = self.cfg.GetNodeInfo(master)
+      dist_nodes = self.cfg.GetNodeList()
+      if myself.name in dist_nodes:
+        dist_nodes.remove(myself.name)
+
+      logger.Debug("Copying updated ssconf data to all nodes")
+      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
+        fname = ss.KeyToFilename(keyname)
+        result = rpc.call_upload_file(dist_nodes, fname)
+        for to_node in dist_nodes:
+          if not result[to_node]:
+            logger.Error("copy of file %s to node %s failed" %
+                         (fname, to_node))
+    finally:
+      if not rpc.call_node_start_master(master):
+        logger.Error("Could not re-enable the master role on the master,"
+                     " please restart manually.")
+
+
+def _RecursiveCheckIfLVMBased(disk):
+  """Check if the given disk or its children are lvm-based.
+
+  Args:
+    disk: ganeti.objects.Disk object
+
+  Returns:
+    boolean indicating whether a LD_LV dev_type was found or not
+
+  """
+  if disk.children:
+    for chdisk in disk.children:
+      if _RecursiveCheckIfLVMBased(chdisk):
+        return True
+  return disk.dev_type == constants.LD_LV
+
+
+class LUSetClusterParams(LogicalUnit):
+  """Change the parameters of the cluster.
+
+  """
+  HPATH = "cluster-modify"
+  HTYPE = constants.HTYPE_CLUSTER
+  _OP_REQP = []
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    env = {
+      "OP_TARGET": self.sstore.GetClusterName(),
+      "NEW_VG_NAME": self.op.vg_name,
+      }
+    mn = self.sstore.GetMasterNode()
+    return env, [mn], [mn]
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
+    if not self.op.vg_name:
+      instances = [self.cfg.GetInstanceInfo(name)
+                   for name in self.cfg.GetInstanceList()]
+      for inst in instances:
+        for disk in inst.disks:
+          if _RecursiveCheckIfLVMBased(disk):
+            raise errors.OpPrereqError("Cannot disable lvm storage while"
+                                       " lvm-based instances exist")
+
+    # if vg_name not None, checks given volume group on all nodes
+    if self.op.vg_name:
+      node_list = self.cfg.GetNodeList()
+      vglist = rpc.call_vg_list(node_list)
+      for node in node_list:
+        vgstatus = _HasValidVG(vglist[node], self.op.vg_name)
+        if vgstatus:
+          raise errors.OpPrereqError("Error on node '%s': %s" %
+                                     (node, vgstatus))
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    if self.op.vg_name != self.cfg.GetVGName():
+      self.cfg.SetVGName(self.op.vg_name)
+    else:
+      feedback_fn("Cluster LVM configuration already in desired"
+                  " state, not changing")
+
+
+def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
   """Sleep and poll for an instance's disk to sync.
 
   """
@@ -928,7 +1177,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
     return True
 
   if not oneshot:
-    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
+    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
 
   node = instance.primary_node
 
@@ -942,7 +1191,7 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
     cumul_degraded = False
     rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
     if not rstats:
-      logger.ToStderr("Can't get any data from node %s" % node)
+      proc.LogWarning("Can't get any data from node %s" % node)
       retries += 1
       if retries >= 10:
         raise errors.RemoteError("Can't contact node %s for mirror data,"
@@ -953,10 +1202,11 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
     for i in range(len(rstats)):
       mstat = rstats[i]
       if mstat is None:
-        logger.ToStderr("Can't compute data for node %s/%s" %
+        proc.LogWarning("Can't compute data for node %s/%s" %
                         (node, instance.disks[i].iv_name))
         continue
-      perc_done, est_time, is_degraded = mstat
+      # we ignore the ldisk parameter
+      perc_done, est_time, is_degraded, _ = mstat
       cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
       if perc_done is not None:
         done = False
@@ -965,8 +1215,8 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
           max_time = est_time
         else:
           rem_time = "no time estimate"
-        logger.ToStdout("- device %s: %5.2f%% done, %s" %
-                        (instance.disks[i].iv_name, perc_done, rem_time))
+        proc.LogInfo("- device %s: %5.2f%% done, %s" %
+                     (instance.disks[i].iv_name, perc_done, rem_time))
     if done or oneshot:
       break
 
@@ -979,24 +1229,32 @@ def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
         utils.Lock('cmd')
 
   if done:
-    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
+    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
   return not cumul_degraded
 
 
-def _CheckDiskConsistency(cfgw, dev, node, on_primary):
+def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
   """Check that mirrors are not degraded.
 
+  The ldisk parameter, if True, will change the test from the
+  is_degraded attribute (which represents overall non-ok status for
+  the device(s)) to the ldisk (representing the local storage status).
+
   """
   cfgw.SetDiskID(dev, node)
+  if ldisk:
+    idx = 6
+  else:
+    idx = 5
 
   result = True
   if on_primary or dev.AssembleOnSecondary():
     rstats = rpc.call_blockdev_find(node, dev)
     if not rstats:
-      logger.ToStderr("Can't get any data from node %s" % node)
+      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
       result = False
     else:
-      result = result and (not rstats[5])
+      result = result and (not rstats[idx])
   if dev.children:
     for child in dev.children:
       result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
@@ -1008,7 +1266,7 @@ class LUDiagnoseOS(NoHooksLU):
   """Logical unit for OS diagnose/query.
 
   """
-  _OP_REQP = []
+  _OP_REQP = ["output_fields", "names"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1016,7 +1274,44 @@ class LUDiagnoseOS(NoHooksLU):
     This always succeeds, since this is a pure query LU.
 
     """
-    return
+    if self.op.names:
+      raise errors.OpPrereqError("Selective OS query not supported")
+
+    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
+    _CheckOutputFields(static=[],
+                       dynamic=self.dynamic_fields,
+                       selected=self.op.output_fields)
+
+  @staticmethod
+  def _DiagnoseByOS(node_list, rlist):
+    """Remaps a per-node return list into an a per-os per-node dictionary
+
+      Args:
+        node_list: a list with the names of all nodes
+        rlist: a map with node names as keys and OS objects as values
+
+      Returns:
+        map: a map with osnames as keys and as value another map, with
+             nodes as
+             keys and list of OS objects as values
+             e.g. {"debian-etch": {"node1": [<object>,...],
+                                   "node2": [<object>,]}
+                  }
+
+    """
+    all_os = {}
+    for node_name, nr in rlist.iteritems():
+      if not nr:
+        continue
+      for os in nr:
+        if os.name not in all_os:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_os[os.name] = {}
+          for nname in node_list:
+            all_os[os.name][nname] = []
+        all_os[os.name][node_name].append(os)
+    return all_os
 
   def Exec(self, feedback_fn):
     """Compute the list of OSes.
@@ -1026,7 +1321,25 @@ class LUDiagnoseOS(NoHooksLU):
     node_data = rpc.call_os_diagnose(node_list)
     if node_data == False:
       raise errors.OpExecError("Can't gather the list of OSes")
-    return node_data
+    pol = self._DiagnoseByOS(node_list, node_data)
+    output = []
+    for os_name, os_data in pol.iteritems():
+      row = []
+      for field in self.op.output_fields:
+        if field == "name":
+          val = os_name
+        elif field == "valid":
+          val = utils.all([osl and osl[0] for osl in os_data.values()])
+        elif field == "node_status":
+          val = {}
+          for node_name, nos_list in os_data.iteritems():
+            val[node_name] = [(v.status, v.path) for v in nos_list]
+        else:
+          raise errors.ParameterError(field)
+        row.append(val)
+      output.append(row)
+
+    return output
 
 
 class LURemoveNode(LogicalUnit):
@@ -1045,6 +1358,7 @@ class LURemoveNode(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.op.node_name,
       "NODE_NAME": self.op.node_name,
       }
     all_nodes = self.cfg.GetNodeList()
@@ -1094,18 +1408,20 @@ class LURemoveNode(LogicalUnit):
 
     rpc.call_node_leave_cluster(node.name)
 
-    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
+    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
 
     logger.Info("Removing node %s from config" % node.name)
 
     self.cfg.RemoveNode(node.name)
 
+    _RemoveHostFromEtcHosts(node.name)
+
 
 class LUQueryNodes(NoHooksLU):
   """Logical unit for querying nodes.
 
   """
-  _OP_REQP = ["output_fields"]
+  _OP_REQP = ["output_fields", "names"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1114,21 +1430,24 @@ class LUQueryNodes(NoHooksLU):
 
     """
     self.dynamic_fields = frozenset(["dtotal", "dfree",
-                                     "mtotal", "mnode", "mfree"])
+                                     "mtotal", "mnode", "mfree",
+                                     "bootid"])
 
-    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
+    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
+                               "pinst_list", "sinst_list",
+                               "pip", "sip"],
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
+    self.wanted = _GetWantedNodes(self, self.op.names)
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = utils.NiceSort(self.cfg.GetNodeList())
+    nodenames = self.wanted
     nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
 
-
     # begin data gathering
 
     if self.dynamic_fields.intersection(self.op.output_fields):
@@ -1143,23 +1462,28 @@ class LUQueryNodes(NoHooksLU):
             "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
             "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
             "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
+            "bootid": nodeinfo['bootid'],
             }
         else:
           live_data[name] = {}
     else:
       live_data = dict.fromkeys(nodenames, {})
 
-    node_to_primary = dict.fromkeys(nodenames, 0)
-    node_to_secondary = dict.fromkeys(nodenames, 0)
+    node_to_primary = dict([(name, set()) for name in nodenames])
+    node_to_secondary = dict([(name, set()) for name in nodenames])
 
-    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
+    inst_fields = frozenset(("pinst_cnt", "pinst_list",
+                             "sinst_cnt", "sinst_list"))
+    if inst_fields & frozenset(self.op.output_fields):
       instancelist = self.cfg.GetInstanceList()
 
-      for instance in instancelist:
-        instanceinfo = self.cfg.GetInstanceInfo(instance)
-        node_to_primary[instanceinfo.primary_node] += 1
-        for secnode in instanceinfo.secondary_nodes:
-          node_to_secondary[secnode] += 1
+      for instance_name in instancelist:
+        inst = self.cfg.GetInstanceInfo(instance_name)
+        if inst.primary_node in node_to_primary:
+          node_to_primary[inst.primary_node].add(inst.name)
+        for secnode in inst.secondary_nodes:
+          if secnode in node_to_secondary:
+            node_to_secondary[secnode].add(inst.name)
 
     # end data gathering
 
@@ -1169,19 +1493,22 @@ class LUQueryNodes(NoHooksLU):
       for field in self.op.output_fields:
         if field == "name":
           val = node.name
-        elif field == "pinst":
-          val = node_to_primary[node.name]
-        elif field == "sinst":
-          val = node_to_secondary[node.name]
+        elif field == "pinst_list":
+          val = list(node_to_primary[node.name])
+        elif field == "sinst_list":
+          val = list(node_to_secondary[node.name])
+        elif field == "pinst_cnt":
+          val = len(node_to_primary[node.name])
+        elif field == "sinst_cnt":
+          val = len(node_to_secondary[node.name])
         elif field == "pip":
           val = node.primary_ip
         elif field == "sip":
           val = node.secondary_ip
         elif field in self.dynamic_fields:
-          val = live_data[node.name].get(field, "?")
+          val = live_data[node.name].get(field, None)
         else:
           raise errors.ParameterError(field)
-        val = str(val)
         node_output.append(val)
       output.append(node_output)
 
@@ -1211,7 +1538,7 @@ class LUQueryNodeVolumes(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = utils.NiceSort([node.name for node in self.nodes])
+    nodenames = self.nodes
     volumes = rpc.call_node_volumes(nodenames)
 
     ilist = [self.cfg.GetInstanceInfo(iname) for iname
@@ -1273,6 +1600,7 @@ class LUAddNode(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.op.node_name,
       "NODE_NAME": self.op.node_name,
       "NODE_PIP": self.op.primary_ip,
       "NODE_SIP": self.op.secondary_ip,
@@ -1295,25 +1623,34 @@ class LUAddNode(LogicalUnit):
     node_name = self.op.node_name
     cfg = self.cfg
 
-    dns_data = utils.LookupHostname(node_name)
-    if not dns_data:
-      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
+    dns_data = utils.HostInfo(node_name)
 
-    node = dns_data['hostname']
-    primary_ip = self.op.primary_ip = dns_data['ip']
+    node = dns_data.name
+    primary_ip = self.op.primary_ip = dns_data.ip
     secondary_ip = getattr(self.op, "secondary_ip", None)
     if secondary_ip is None:
       secondary_ip = primary_ip
     if not utils.IsValidIP(secondary_ip):
       raise errors.OpPrereqError("Invalid secondary IP given")
     self.op.secondary_ip = secondary_ip
+
     node_list = cfg.GetNodeList()
-    if node in node_list:
-      raise errors.OpPrereqError("Node %s is already in the configuration"
-                                 % node)
+    if not self.op.readd and node in node_list:
+      raise errors.OpPrereqError("Node %s is already in the configuration" %
+                                 node)
+    elif self.op.readd and node not in node_list:
+      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
 
     for existing_node_name in node_list:
       existing_node = cfg.GetNodeInfo(existing_node_name)
+
+      if self.op.readd and node == existing_node_name:
+        if (existing_node.primary_ip != primary_ip or
+            existing_node.secondary_ip != secondary_ip):
+          raise errors.OpPrereqError("Readded node doesn't have the same IP"
+                                     " address configuration as before")
+        continue
+
       if (existing_node.primary_ip == primary_ip or
           existing_node.secondary_ip == primary_ip or
           existing_node.primary_ip == secondary_ip or
@@ -1335,22 +1672,25 @@ class LUAddNode(LogicalUnit):
                                    " new node doesn't have one")
 
     # checks reachablity
-    command = ["fping", "-q", primary_ip]
-    result = utils.RunCmd(command)
-    if result.failed:
+    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
       raise errors.OpPrereqError("Node not reachable by ping")
 
     if not newbie_singlehomed:
       # check reachability from my secondary ip to newbie's secondary ip
-      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
-      result = utils.RunCmd(command)
-      if result.failed:
-        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
+      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
+                           source=myself.secondary_ip):
+        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
+                                   " based ping to noded port")
 
     self.new_node = objects.Node(name=node,
                                  primary_ip=primary_ip,
                                  secondary_ip=secondary_ip)
 
+    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+      if not os.path.exists(constants.VNC_PASSWORD_FILE):
+        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
+                                   constants.VNC_PASSWORD_FILE)
+
   def Exec(self, feedback_fn):
     """Adds the new node to the cluster.
 
@@ -1389,7 +1729,7 @@ class LUAddNode(LogicalUnit):
                   constants.SSL_CERT_FILE, gntpem,
                   constants.NODE_INITD_SCRIPT))
 
-    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
+    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
     if result.failed:
       raise errors.OpExecError("Remote command on node %s, error: %s,"
                                " output: %s" %
@@ -1412,10 +1752,11 @@ class LUAddNode(LogicalUnit):
 
     # setup ssh on node
     logger.Info("copy ssh key to node %s" % node)
+    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
     keyarray = []
-    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
-                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
-                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
+    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
+                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
+                priv_key, pub_key]
 
     for i in keyfiles:
       f = open(i, 'r')
@@ -1431,24 +1772,23 @@ class LUAddNode(LogicalUnit):
       raise errors.OpExecError("Cannot transfer ssh keys to the new node")
 
     # Add node to our /etc/hosts, and add key to known_hosts
-    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
-    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
-                      self.cfg.GetHostKey())
+    _AddHostToEtcHosts(new_node.name)
 
     if new_node.secondary_ip != new_node.primary_ip:
-      result = ssh.SSHCall(node, "root",
-                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
-      if result.failed:
-        raise errors.OpExecError("Node claims it doesn't have the"
-                                 " secondary ip you gave (%s).\n"
-                                 "Please fix and re-run this command." %
-                                 new_node.secondary_ip)
-
-    success, msg = ssh.VerifyNodeHostname(node)
+      if not rpc.call_node_tcp_ping(new_node.name,
+                                    constants.LOCALHOST_IP_ADDRESS,
+                                    new_node.secondary_ip,
+                                    constants.DEFAULT_NODED_PORT,
+                                    10, False):
+        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
+                                 " you gave (%s). Please fix and re-run this"
+                                 " command." % new_node.secondary_ip)
+
+    success, msg = self.ssh.VerifyNodeHostname(node)
     if not success:
       raise errors.OpExecError("Node '%s' claims it has a different hostname"
-                               " than the one the resolver gives: %s.\n"
-                               "Please fix and re-run this command." %
+                               " than the one the resolver gives: %s."
+                               " Please fix and re-run this command." %
                                (node, msg))
 
     # Distribute updated /etc/hosts and known_hosts to all nodes,
@@ -1459,7 +1799,7 @@ class LUAddNode(LogicalUnit):
       dist_nodes.remove(myself.name)
 
     logger.Debug("Copying hosts and known_hosts to all nodes")
-    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
+    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
       result = rpc.call_upload_file(dist_nodes, fname)
       for to_node in dist_nodes:
         if not result[to_node]:
@@ -1467,12 +1807,15 @@ class LUAddNode(LogicalUnit):
                        (fname, to_node))
 
     to_copy = ss.GetFileList()
+    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
+      to_copy.append(constants.VNC_PASSWORD_FILE)
     for fname in to_copy:
-      if not ssh.CopyFileToNode(node, fname):
+      if not self.ssh.CopyFileToNode(node, fname):
         logger.Error("could not copy file %s to node %s" % (fname, node))
 
-    logger.Info("adding node %s to cluster.conf" % node)
-    self.cfg.AddNode(new_node)
+    if not self.op.readd:
+      logger.Info("adding node %s to cluster.conf" % node)
+      self.cfg.AddNode(new_node)
 
 
 class LUMasterFailover(LogicalUnit):
@@ -1494,6 +1837,7 @@ class LUMasterFailover(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.new_master,
       "NEW_MASTER": self.new_master,
       "OLD_MASTER": self.old_master,
       }
@@ -1505,14 +1849,13 @@ class LUMasterFailover(LogicalUnit):
     This checks that we are not already the master.
 
     """
-    self.new_master = socket.gethostname()
-
+    self.new_master = utils.HostInfo().name
     self.old_master = self.sstore.GetMasterNode()
 
     if self.old_master == self.new_master:
       raise errors.OpPrereqError("This commands must be run on the node"
-                                 " where you want the new master to be.\n"
-                                 "%s is already the master" %
+                                 " where you want the new master to be."
+                                 " %s is already the master" %
                                  self.old_master)
 
   def Exec(self, feedback_fn):
@@ -1541,8 +1884,8 @@ class LUMasterFailover(LogicalUnit):
     if not rpc.call_node_start_master(self.new_master):
       logger.Error("could not start the master role on the new master"
                    " %s, please check" % self.new_master)
-      feedback_fn("Error in activating the master IP on the new master,\n"
-                  "please fix manually.")
+      feedback_fn("Error in activating the master IP on the new master,"
+                  " please fix manually.")
 
 
 
@@ -1607,12 +1950,12 @@ class LUClusterCopyFile(NoHooksLU):
     """
     filename = self.op.filename
 
-    myname = socket.gethostname()
+    myname = utils.HostInfo().name
 
-    for node in [node.name for node in self.nodes]:
+    for node in self.nodes:
       if node == myname:
         continue
-      if not ssh.CopyFileToNode(node, filename):
+      if not self.ssh.CopyFileToNode(node, filename):
         logger.Error("Copy of file %s to node %s failed" % (filename, node))
 
 
@@ -1653,10 +1996,16 @@ class LURunClusterCommand(NoHooksLU):
     """Run a command on some nodes.
 
     """
+    # put the master at the end of the nodes list
+    master_node = self.sstore.GetMasterNode()
+    if master_node in self.nodes:
+      self.nodes.remove(master_node)
+      self.nodes.append(master_node)
+
     data = []
     for node in self.nodes:
-      result = ssh.SSHCall(node.name, "root", self.op.command)
-      data.append((node.name, result.output, result.exit_code))
+      result = self.ssh.Run(node, "root", self.op.command)
+      data.append((node, result.output, result.exit_code))
 
     return data
 
@@ -1709,21 +2058,47 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
   """
   device_info = []
   disks_ok = True
+  iname = instance.name
+  # With the two passes mechanism we try to reduce the window of
+  # opportunity for the race condition of switching DRBD to primary
+  # before handshaking occured, but we do not eliminate it
+
+  # The proper fix would be to wait (with some limits) until the
+  # connection has been made and drbd transitions from WFConnection
+  # into any other network-connected state (Connected, SyncTarget,
+  # SyncSource, etc.)
+
+  # 1st pass, assemble on all nodes in secondary mode
   for inst_disk in instance.disks:
-    master_result = None
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       cfg.SetDiskID(node_disk, node)
-      is_primary = node == instance.primary_node
-      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
+      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
       if not result:
-        logger.Error("could not prepare block device %s on node %s (is_pri"
-                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
-        if is_primary or not ignore_secondaries:
+        logger.Error("could not prepare block device %s on node %s"
+                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
+        if not ignore_secondaries:
           disks_ok = False
-      if is_primary:
-        master_result = result
-    device_info.append((instance.primary_node, inst_disk.iv_name,
-                        master_result))
+
+  # FIXME: race condition on drbd migration to primary
+
+  # 2nd pass, do only the primary node
+  for inst_disk in instance.disks:
+    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
+      if node != instance.primary_node:
+        continue
+      cfg.SetDiskID(node_disk, node)
+      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
+      if not result:
+        logger.Error("could not prepare block device %s on node %s"
+                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
+        disks_ok = False
+    device_info.append((instance.primary_node, inst_disk.iv_name, result))
+
+  # leave the disks configured for the primary node
+  # this is a workaround that would be fixed better by
+  # improving the logical/physical id handling
+  for disk in instance.disks:
+    cfg.SetDiskID(disk, instance.primary_node)
 
   return disks_ok, device_info
 
@@ -1800,6 +2175,36 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
   return result
 
 
+def _CheckNodeFreeMemory(cfg, node, reason, requested):
+  """Checks if a node has enough free memory.
+
+  This function check if a given node has the needed amount of free
+  memory. In case the node has less memory or we cannot get the
+  information from the node, this function raise an OpPrereqError
+  exception.
+
+  Args:
+    - cfg: a ConfigWriter instance
+    - node: the node name
+    - reason: string to use in the error message
+    - requested: the amount of memory in MiB
+
+  """
+  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
+  if not nodeinfo or not isinstance(nodeinfo, dict):
+    raise errors.OpPrereqError("Could not contact node %s for resource"
+                             " information" % (node,))
+
+  free_mem = nodeinfo[node].get('memory_free')
+  if not isinstance(free_mem, int):
+    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
+                             " was '%s'" % (node, free_mem))
+  if requested > free_mem:
+    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
+                             " needed %s MiB, available %s MiB" %
+                             (node, reason, requested, free_mem))
+
+
 class LUStartupInstance(LogicalUnit):
   """Starts an instance.
 
@@ -1835,11 +2240,11 @@ class LUStartupInstance(LogicalUnit):
                                  self.op.instance_name)
 
     # check bridges existance
-    brlist = [nic.bridge for nic in instance.nics]
-    if not rpc.call_bridges_exist(instance.primary_node, brlist):
-      raise errors.OpPrereqError("one or more target bridges %s does not"
-                                 " exist on destination node '%s'" %
-                                 (brlist, instance.primary_node))
+    _CheckInstanceBridgesExist(instance)
+
+    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
+                         "starting instance %s" % instance.name,
+                         instance.memory)
 
     self.instance = instance
     self.op.instance_name = instance.name
@@ -1852,21 +2257,9 @@ class LUStartupInstance(LogicalUnit):
     force = self.op.force
     extra_args = getattr(self.op, "extra_args", "")
 
-    node_current = instance.primary_node
-
-    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
-    if not nodeinfo:
-      raise errors.OpExecError("Could not contact node %s for infos" %
-                               (node_current))
+    self.cfg.MarkInstanceUp(instance.name)
 
-    freememory = nodeinfo[node_current]['memory_free']
-    memory = instance.memory
-    if memory > freememory:
-      raise errors.OpExecError("Not enough memory to start instance"
-                               " %s on node %s"
-                               " needed %s MiB, available %s MiB" %
-                               (instance.name, node_current, memory,
-                                freememory))
+    node_current = instance.primary_node
 
     _StartInstanceDisks(self.cfg, instance, force)
 
@@ -1874,6 +2267,80 @@ class LUStartupInstance(LogicalUnit):
       _ShutdownInstanceDisks(instance, self.cfg)
       raise errors.OpExecError("Could not start instance")
 
+
+class LURebootInstance(LogicalUnit):
+  """Reboot an instance.
+
+  """
+  HPATH = "instance-reboot"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+      }
+    env.update(_BuildInstanceHookEnvByObject(self.instance))
+    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+          list(self.instance.secondary_nodes))
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    instance = self.cfg.GetInstanceInfo(
+      self.cfg.ExpandInstanceName(self.op.instance_name))
+    if instance is None:
+      raise errors.OpPrereqError("Instance '%s' not known" %
+                                 self.op.instance_name)
+
+    # check bridges existance
+    _CheckInstanceBridgesExist(instance)
+
+    self.instance = instance
+    self.op.instance_name = instance.name
+
+  def Exec(self, feedback_fn):
+    """Reboot the instance.
+
+    """
+    instance = self.instance
+    ignore_secondaries = self.op.ignore_secondaries
+    reboot_type = self.op.reboot_type
+    extra_args = getattr(self.op, "extra_args", "")
+
+    node_current = instance.primary_node
+
+    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
+                           constants.INSTANCE_REBOOT_HARD,
+                           constants.INSTANCE_REBOOT_FULL]:
+      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
+                                  (constants.INSTANCE_REBOOT_SOFT,
+                                   constants.INSTANCE_REBOOT_HARD,
+                                   constants.INSTANCE_REBOOT_FULL))
+
+    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+                       constants.INSTANCE_REBOOT_HARD]:
+      if not rpc.call_instance_reboot(node_current, instance,
+                                      reboot_type, extra_args):
+        raise errors.OpExecError("Could not reboot instance")
+    else:
+      if not rpc.call_instance_shutdown(node_current, instance):
+        raise errors.OpExecError("could not shutdown instance for full reboot")
+      _ShutdownInstanceDisks(instance, self.cfg)
+      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
+      if not rpc.call_instance_start(node_current, instance, extra_args):
+        _ShutdownInstanceDisks(instance, self.cfg)
+        raise errors.OpExecError("Could not start instance for full reboot")
+
     self.cfg.MarkInstanceUp(instance.name)
 
 
@@ -1915,10 +2382,10 @@ class LUShutdownInstance(LogicalUnit):
     """
     instance = self.instance
     node_current = instance.primary_node
+    self.cfg.MarkInstanceDown(instance.name)
     if not rpc.call_instance_shutdown(node_current, instance):
       logger.Error("could not shutdown instance")
 
-    self.cfg.MarkInstanceDown(instance.name)
     _ShutdownInstanceDisks(instance, self.cfg)
 
 
@@ -1972,8 +2439,8 @@ class LUReinstallInstance(LogicalUnit):
       if pnode is None:
         raise errors.OpPrereqError("Primary node '%s' is unknown" %
                                    self.op.pnode)
-      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
-      if not isinstance(os_obj, objects.OS):
+      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
+      if not os_obj:
         raise errors.OpPrereqError("OS '%s' not in supported OS list for"
                                    " primary node"  % self.op.os_type)
 
@@ -1994,13 +2461,117 @@ class LUReinstallInstance(LogicalUnit):
     try:
       feedback_fn("Running the instance OS create scripts...")
       if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
-        raise errors.OpExecError("Could not install OS for instance %s "
-                                 "on node %s" %
+        raise errors.OpExecError("Could not install OS for instance %s"
+                                 " on node %s" %
                                  (inst.name, inst.primary_node))
     finally:
       _ShutdownInstanceDisks(inst, self.cfg)
 
 
+class LURenameInstance(LogicalUnit):
+  """Rename an instance.
+
+  """
+  HPATH = "instance-rename"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "new_name"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = _BuildInstanceHookEnvByObject(self.instance)
+    env["INSTANCE_NEW_NAME"] = self.op.new_name
+    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+          list(self.instance.secondary_nodes))
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster and is not running.
+
+    """
+    instance = self.cfg.GetInstanceInfo(
+      self.cfg.ExpandInstanceName(self.op.instance_name))
+    if instance is None:
+      raise errors.OpPrereqError("Instance '%s' not known" %
+                                 self.op.instance_name)
+    if instance.status != "down":
+      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
+                                 self.op.instance_name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    if remote_info:
+      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
+                                 (self.op.instance_name,
+                                  instance.primary_node))
+    self.instance = instance
+
+    # new name verification
+    name_info = utils.HostInfo(self.op.new_name)
+
+    self.op.new_name = new_name = name_info.name
+    instance_list = self.cfg.GetInstanceList()
+    if new_name in instance_list:
+      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
+                                 new_name)
+
+    if not getattr(self.op, "ignore_ip", False):
+      command = ["fping", "-q", name_info.ip]
+      result = utils.RunCmd(command)
+      if not result.failed:
+        raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                   (name_info.ip, new_name))
+
+
+  def Exec(self, feedback_fn):
+    """Reinstall the instance.
+
+    """
+    inst = self.instance
+    old_name = inst.name
+
+    if inst.disk_template == constants.DT_FILE:
+      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
+
+    self.cfg.RenameInstance(inst.name, self.op.new_name)
+
+    # re-read the instance from the configuration after rename
+    inst = self.cfg.GetInstanceInfo(self.op.new_name)
+
+    if inst.disk_template == constants.DT_FILE:
+      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
+      result = rpc.call_file_storage_dir_rename(inst.primary_node,
+                                                old_file_storage_dir,
+                                                new_file_storage_dir)
+
+      if not result:
+        raise errors.OpExecError("Could not connect to node '%s' to rename"
+                                 " directory '%s' to '%s' (but the instance"
+                                 " has been renamed in Ganeti)" % (
+                                 inst.primary_node, old_file_storage_dir,
+                                 new_file_storage_dir))
+
+      if not result[0]:
+        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
+                                 " (but the instance has been renamed in"
+                                 " Ganeti)" % (old_file_storage_dir,
+                                               new_file_storage_dir))
+
+    _StartInstanceDisks(self.cfg, inst, None)
+    try:
+      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
+                                          "sda", "sdb"):
+        msg = ("Could run OS rename script for instance %s on node %s (but the"
+               " instance has been renamed in Ganeti)" %
+               (inst.name, inst.primary_node))
+        logger.Error(msg)
+    finally:
+      _ShutdownInstanceDisks(inst, self.cfg)
+
+
 class LURemoveInstance(LogicalUnit):
   """Remove an instance.
 
@@ -2016,8 +2587,7 @@ class LURemoveInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
-          list(self.instance.secondary_nodes))
+    nl = [self.sstore.GetMasterNode()]
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -2042,12 +2612,19 @@ class LURemoveInstance(LogicalUnit):
                 (instance.name, instance.primary_node))
 
     if not rpc.call_instance_shutdown(instance.primary_node, instance):
-      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                               (instance.name, instance.primary_node))
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't shutdown instance")
+      else:
+        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+                                 (instance.name, instance.primary_node))
 
     logger.Info("removing block devices for instance %s" % instance.name)
 
-    _RemoveDisks(instance, self.cfg)
+    if not _RemoveDisks(instance, self.cfg):
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't remove instance's disks")
+      else:
+        raise errors.OpExecError("Can't remove instance's disks")
 
     logger.Info("removing instance %s out of cluster config" % instance.name)
 
@@ -2058,7 +2635,7 @@ class LUQueryInstances(NoHooksLU):
   """Logical unit for querying instances.
 
   """
-  _OP_REQP = ["output_fields"]
+  _OP_REQP = ["output_fields", "names"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -2066,19 +2643,21 @@ class LUQueryInstances(NoHooksLU):
     This checks that the fields required are valid output fields.
 
     """
-    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
+    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
     _CheckOutputFields(static=["name", "os", "pnode", "snodes",
                                "admin_state", "admin_ram",
                                "disk_template", "ip", "mac", "bridge",
-                               "sda_size", "sdb_size"],
+                               "sda_size", "sdb_size", "vcpus"],
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
+    self.wanted = _GetWantedInstances(self, self.op.names)
+
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
+    instance_names = self.wanted
     instance_list = [self.cfg.GetInstanceInfo(iname) for iname
                      in instance_names]
 
@@ -2113,25 +2692,34 @@ class LUQueryInstances(NoHooksLU):
         elif field == "pnode":
           val = instance.primary_node
         elif field == "snodes":
-          val = ",".join(instance.secondary_nodes) or "-"
+          val = list(instance.secondary_nodes)
         elif field == "admin_state":
-          if instance.status == "down":
-            val = "no"
-          else:
-            val = "yes"
+          val = (instance.status != "down")
         elif field == "oper_state":
           if instance.primary_node in bad_nodes:
-            val = "(node down)"
+            val = None
+          else:
+            val = bool(live_data.get(instance.name))
+        elif field == "status":
+          if instance.primary_node in bad_nodes:
+            val = "ERROR_nodedown"
           else:
-            if live_data.get(instance.name):
-              val = "running"
+            running = bool(live_data.get(instance.name))
+            if running:
+              if instance.status != "down":
+                val = "running"
+              else:
+                val = "ERROR_up"
             else:
-              val = "stopped"
+              if instance.status != "down":
+                val = "ERROR_down"
+              else:
+                val = "ADMIN_down"
         elif field == "admin_ram":
           val = instance.memory
         elif field == "oper_ram":
           if instance.primary_node in bad_nodes:
-            val = "(node down)"
+            val = None
           elif instance.name in live_data:
             val = live_data[instance.name].get("memory", "?")
           else:
@@ -2147,12 +2735,13 @@ class LUQueryInstances(NoHooksLU):
         elif field == "sda_size" or field == "sdb_size":
           disk = instance.FindDisk(field[:3])
           if disk is None:
-            val = "N/A"
+            val = None
           else:
             val = disk.size
+        elif field == "vcpus":
+          val = instance.vcpus
         else:
           raise errors.ParameterError(field)
-        val = str(val)
         iout.append(val)
       output.append(iout)
 
@@ -2192,34 +2781,26 @@ class LUFailoverInstance(LogicalUnit):
       raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
 
-    if instance.disk_template != constants.DT_REMOTE_RAID1:
+    if instance.disk_template not in constants.DTS_NET_MIRROR:
       raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " remote_raid1.")
+                                 " network mirrored, cannot failover.")
 
     secondary_nodes = instance.secondary_nodes
     if not secondary_nodes:
       raise errors.ProgrammerError("no secondary node but using "
                                    "DT_REMOTE_RAID1 template")
 
-    # check memory requirements on the secondary node
     target_node = secondary_nodes[0]
-    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
-    info = nodeinfo.get(target_node, None)
-    if not info:
-      raise errors.OpPrereqError("Cannot get current information"
-                                 " from node '%s'" % nodeinfo)
-    if instance.memory > info['memory_free']:
-      raise errors.OpPrereqError("Not enough memory on target node %s."
-                                 " %d MB available, %d MB required" %
-                                 (target_node, info['memory_free'],
-                                  instance.memory))
+    # check memory requirements on the secondary node
+    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
+                         instance.name, instance.memory)
 
     # check bridge existance
     brlist = [nic.bridge for nic in instance.nics]
-    if not rpc.call_bridges_exist(instance.primary_node, brlist):
+    if not rpc.call_bridges_exist(target_node, brlist):
       raise errors.OpPrereqError("One or more target bridges %s does not"
                                  " exist on destination node '%s'" %
-                                 (brlist, instance.primary_node))
+                                 (brlist, target_node))
 
     self.instance = instance
 
@@ -2239,33 +2820,22 @@ class LUFailoverInstance(LogicalUnit):
     for dev in instance.disks:
       # for remote_raid1, these are md over drbd
       if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
-        if not self.op.ignore_consistency:
+        if instance.status == "up" and not self.op.ignore_consistency:
           raise errors.OpExecError("Disk %s is degraded on target node,"
                                    " aborting failover." % dev.iv_name)
 
-    feedback_fn("* checking target node resource availability")
-    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
-
-    if not nodeinfo:
-      raise errors.OpExecError("Could not contact target node %s." %
-                               target_node)
-
-    free_memory = int(nodeinfo[target_node]['memory_free'])
-    memory = instance.memory
-    if memory > free_memory:
-      raise errors.OpExecError("Not enough memory to create instance %s on"
-                               " node %s. needed %s MiB, available %s MiB" %
-                               (instance.name, target_node, memory,
-                                free_memory))
-
     feedback_fn("* shutting down instance on source node")
     logger.Info("Shutting down instance %s on node %s" %
                 (instance.name, source_node))
 
     if not rpc.call_instance_shutdown(source_node, instance):
-      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
-                   " anyway. Please make sure node %s is down"  %
-                   (instance.name, source_node, source_node))
+      if self.op.ignore_consistency:
+        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
+                     " anyway. Please make sure node %s is down"  %
+                     (instance.name, source_node, source_node))
+      else:
+        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+                                 (instance.name, source_node))
 
     feedback_fn("* deactivating the instance's disks on source node")
     if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
@@ -2275,24 +2845,26 @@ class LUFailoverInstance(LogicalUnit):
     # distribute new instance config to the other nodes
     self.cfg.AddInstance(instance)
 
-    feedback_fn("* activating the instance's disks on target node")
-    logger.Info("Starting instance %s on node %s" %
-                (instance.name, target_node))
+    # Only start the instance if it's marked as up
+    if instance.status == "up":
+      feedback_fn("* activating the instance's disks on target node")
+      logger.Info("Starting instance %s on node %s" %
+                  (instance.name, target_node))
 
-    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
-                                             ignore_secondaries=True)
-    if not disks_ok:
-      _ShutdownInstanceDisks(instance, self.cfg)
-      raise errors.OpExecError("Can't activate the instance's disks")
+      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
+                                               ignore_secondaries=True)
+      if not disks_ok:
+        _ShutdownInstanceDisks(instance, self.cfg)
+        raise errors.OpExecError("Can't activate the instance's disks")
 
-    feedback_fn("* starting the instance on the target node")
-    if not rpc.call_instance_start(target_node, instance, None):
-      _ShutdownInstanceDisks(instance, self.cfg)
-      raise errors.OpExecError("Could not start instance %s on node %s." %
-                               (instance.name, target_node))
+      feedback_fn("* starting the instance on the target node")
+      if not rpc.call_instance_start(target_node, instance, None):
+        _ShutdownInstanceDisks(instance, self.cfg)
+        raise errors.OpExecError("Could not start instance %s on node %s." %
+                                 (instance.name, target_node))
 
 
-def _CreateBlockDevOnPrimary(cfg, node, device, info):
+def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
   """Create a tree of block devices on the primary node.
 
   This always creates all devices.
@@ -2300,11 +2872,12 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info):
   """
   if device.children:
     for child in device.children:
-      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
+      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
         return False
 
   cfg.SetDiskID(device, node)
-  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
+  new_id = rpc.call_blockdev_create(node, device, device.size,
+                                    instance.name, True, info)
   if not new_id:
     return False
   if device.physical_id is None:
@@ -2312,7 +2885,7 @@ def _CreateBlockDevOnPrimary(cfg, node, device, info):
   return True
 
 
-def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
+def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
   """Create a tree of block devices on a secondary node.
 
   If this device type has to be created on secondaries, create it and
@@ -2325,13 +2898,15 @@ def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
     force = True
   if device.children:
     for child in device.children:
-      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
+      if not _CreateBlockDevOnSecondary(cfg, node, instance,
+                                        child, force, info):
         return False
 
   if not force:
     return True
   cfg.SetDiskID(device, node)
-  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
+  new_id = rpc.call_blockdev_create(node, device, device.size,
+                                    instance.name, False, info)
   if not new_id:
     return False
   if device.physical_id is None:
@@ -2358,76 +2933,79 @@ def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
   """
   port = cfg.AllocatePort()
   vgname = cfg.GetVGName()
-  dev_data = objects.Disk(dev_type="lvm", size=size,
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgname, names[0]))
-  dev_meta = objects.Disk(dev_type="lvm", size=128,
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
                           logical_id=(vgname, names[1]))
-  drbd_dev = objects.Disk(dev_type="drbd", size=size,
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
                           logical_id = (primary, secondary, port),
                           children = [dev_data, dev_meta])
   return drbd_dev
 
 
+def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
+  """Generate a drbd8 device complete with its children.
+
+  """
+  port = cfg.AllocatePort()
+  vgname = cfg.GetVGName()
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+                          logical_id=(vgname, names[0]))
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+                          logical_id=(vgname, names[1]))
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+                          logical_id = (primary, secondary, port),
+                          children = [dev_data, dev_meta],
+                          iv_name=iv_name)
+  return drbd_dev
+
+
 def _GenerateDiskTemplate(cfg, template_name,
                           instance_name, primary_node,
-                          secondary_nodes, disk_sz, swap_sz):
+                          secondary_nodes, disk_sz, swap_sz,
+                          file_storage_dir, file_driver):
   """Generate the entire disk layout for a given template type.
 
   """
   #TODO: compute space requirements
 
   vgname = cfg.GetVGName()
-  if template_name == "diskless":
+  if template_name == constants.DT_DISKLESS:
     disks = []
-  elif template_name == "plain":
+  elif template_name == constants.DT_PLAIN:
     if len(secondary_nodes) != 0:
       raise errors.ProgrammerError("Wrong template configuration")
 
     names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
-    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                            logical_id=(vgname, names[0]),
                            iv_name = "sda")
-    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                            logical_id=(vgname, names[1]),
                            iv_name = "sdb")
     disks = [sda_dev, sdb_dev]
-  elif template_name == "local_raid1":
-    if len(secondary_nodes) != 0:
-      raise errors.ProgrammerError("Wrong template configuration")
-
-
-    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
-                                       ".sdb_m1", ".sdb_m2"])
-    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
-                              logical_id=(vgname, names[0]))
-    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
-                              logical_id=(vgname, names[1]))
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
-                              size=disk_sz,
-                              children = [sda_dev_m1, sda_dev_m2])
-    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
-                              logical_id=(vgname, names[2]))
-    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
-                              logical_id=(vgname, names[3]))
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
-                              size=swap_sz,
-                              children = [sdb_dev_m1, sdb_dev_m2])
-    disks = [md_sda_dev, md_sdb_dev]
-  elif template_name == constants.DT_REMOTE_RAID1:
+  elif template_name == constants.DT_DRBD8:
     if len(secondary_nodes) != 1:
       raise errors.ProgrammerError("Wrong template configuration")
     remote_node = secondary_nodes[0]
     names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
                                        ".sdb_data", ".sdb_meta"])
-    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
-                                         disk_sz, names[0:2])
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
-                              children = [drbd_sda_dev], size=disk_sz)
-    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
-                                         swap_sz, names[2:4])
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
-                              children = [drbd_sdb_dev], size=swap_sz)
-    disks = [md_sda_dev, md_sdb_dev]
+    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         disk_sz, names[0:2], "sda")
+    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         swap_sz, names[2:4], "sdb")
+    disks = [drbd_sda_dev, drbd_sdb_dev]
+  elif template_name == constants.DT_FILE:
+    if len(secondary_nodes) != 0:
+      raise errors.ProgrammerError("Wrong template configuration")
+
+    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
+                                iv_name="sda", logical_id=(file_driver,
+                                "%s/sda" % file_storage_dir))
+    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
+                                iv_name="sdb", logical_id=(file_driver,
+                                "%s/sdb" % file_storage_dir))
+    disks = [file_sda_dev, file_sdb_dev]
   else:
     raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
@@ -2454,21 +3032,36 @@ def _CreateDisks(cfg, instance):
   """
   info = _GetInstanceInfoText(instance)
 
+  if instance.disk_template == constants.DT_FILE:
+    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+    result = rpc.call_file_storage_dir_create(instance.primary_node,
+                                              file_storage_dir)
+
+    if not result:
+      logger.Error("Could not connect to node '%s'" % instance.primary_node)
+      return False
+
+    if not result[0]:
+      logger.Error("failed to create directory '%s'" % file_storage_dir)
+      return False
+
   for device in instance.disks:
     logger.Info("creating volume %s for instance %s" %
-              (device.iv_name, instance.name))
+                (device.iv_name, instance.name))
     #HARDCODE
     for secondary_node in instance.secondary_nodes:
-      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
-                                        info):
+      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
+                                        device, False, info):
         logger.Error("failed to create volume %s (%s) on secondary node %s!" %
                      (device.iv_name, device, secondary_node))
         return False
     #HARDCODE
-    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
+    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
+                                    instance, device, info):
       logger.Error("failed to create volume %s on primary!" %
                    device.iv_name)
       return False
+
   return True
 
 
@@ -2477,7 +3070,7 @@ def _RemoveDisks(instance, cfg):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be remove, the removal will continue with the other ones (compare
+  be removed, the removal will continue with the other ones (compare
   with `_CreateDisks()`).
 
   Args:
@@ -2498,18 +3091,85 @@ def _RemoveDisks(instance, cfg):
                      " continuing anyway" %
                      (device.iv_name, node))
         result = False
+
+  if instance.disk_template == constants.DT_FILE:
+    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
+    if not rpc.call_file_storage_dir_remove(instance.primary_node,
+                                            file_storage_dir):
+      logger.Error("could not remove directory '%s'" % file_storage_dir)
+      result = False
+
   return result
 
 
+def _ComputeDiskSize(disk_template, disk_size, swap_size):
+  """Compute disk size requirements in the volume group
+
+  This is currently hard-coded for the two-drive layout.
+
+  """
+  # Required free disk space as a function of disk and swap space
+  req_size_dict = {
+    constants.DT_DISKLESS: None,
+    constants.DT_PLAIN: disk_size + swap_size,
+    # 256 MB are added for drbd metadata, 128MB for each drbd device
+    constants.DT_DRBD8: disk_size + swap_size + 256,
+    constants.DT_FILE: None,
+  }
+
+  if disk_template not in req_size_dict:
+    raise errors.ProgrammerError("Disk template '%s' size requirement"
+                                 " is unknown" %  disk_template)
+
+  return req_size_dict[disk_template]
+
+
 class LUCreateInstance(LogicalUnit):
   """Create an instance.
 
   """
   HPATH = "instance-add"
   HTYPE = constants.HTYPE_INSTANCE
-  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
+  _OP_REQP = ["instance_name", "mem_size", "disk_size",
               "disk_template", "swap_size", "mode", "start", "vcpus",
-              "wait_for_sync"]
+              "wait_for_sync", "ip_check", "mac"]
+
+  def _RunAllocator(self):
+    """Run the allocator based on input opcode.
+
+    """
+    disks = [{"size": self.op.disk_size, "mode": "w"},
+             {"size": self.op.swap_size, "mode": "w"}]
+    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
+             "bridge": self.op.bridge}]
+    ial = IAllocator(self.cfg, self.sstore,
+                     name=self.op.instance_name,
+                     disk_template=self.op.disk_template,
+                     tags=[],
+                     os=self.op.os_type,
+                     vcpus=self.op.vcpus,
+                     mem_size=self.op.mem_size,
+                     disks=disks,
+                     nics=nics,
+                     mode=constants.IALLOCATOR_MODE_ALLOC)
+
+    ial.Run(self.op.iallocator)
+
+    if not ial.success:
+      raise errors.OpPrereqError("Can't compute nodes using"
+                                 " iallocator '%s': %s" % (self.op.iallocator,
+                                                           ial.info))
+    if len(ial.nodes) != ial.required_nodes:
+      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
+                                 " of nodes (%s), required %s" %
+                                 (len(ial.nodes), ial.required_nodes))
+    self.op.pnode = ial.nodes[0]
+    logger.ToStdout("Selected nodes for the instance: %s" %
+                    (", ".join(ial.nodes),))
+    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
+                (self.op.instance_name, self.op.iallocator, ial.nodes))
+    if ial.required_nodes == 2:
+      self.op.snode = ial.nodes[1]
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -2535,7 +3195,7 @@ class LUCreateInstance(LogicalUnit):
       os_type=self.op.os_type,
       memory=self.op.mem_size,
       vcpus=self.op.vcpus,
-      nics=[(self.inst_ip, self.op.bridge)],
+      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
     ))
 
     nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
@@ -2547,11 +3207,22 @@ class LUCreateInstance(LogicalUnit):
     """Check prerequisites.
 
     """
+    # set optional parameters to none if they don't exist
+    for attr in ["kernel_path", "initrd_path", "hvm_boot_order", "pnode",
+                 "iallocator"]:
+      if not hasattr(self.op, attr):
+        setattr(self.op, attr, None)
+
     if self.op.mode not in (constants.INSTANCE_CREATE,
                             constants.INSTANCE_IMPORT):
       raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
                                  self.op.mode)
 
+    if (not self.cfg.GetVGName() and
+        self.op.disk_template not in constants.DTS_NOT_LVM):
+      raise errors.OpPrereqError("Cluster does not support lvm-based"
+                                 " instances")
+
     if self.op.mode == constants.INSTANCE_IMPORT:
       src_node = getattr(self.op, "src_node", None)
       src_path = getattr(self.op, "src_path", None)
@@ -2592,96 +3263,48 @@ class LUCreateInstance(LogicalUnit):
       if getattr(self.op, "os_type", None) is None:
         raise errors.OpPrereqError("No guest OS specified")
 
-    # check primary node
-    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
-    if pnode is None:
-      raise errors.OpPrereqError("Primary node '%s' is unknown" %
-                                 self.op.pnode)
-    self.op.pnode = pnode.name
-    self.pnode = pnode
-    self.secondaries = []
+    #### instance parameters check
+
     # disk template and mirror node verification
     if self.op.disk_template not in constants.DISK_TEMPLATES:
       raise errors.OpPrereqError("Invalid disk template name")
 
-    if self.op.disk_template == constants.DT_REMOTE_RAID1:
-      if getattr(self.op, "snode", None) is None:
-        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
-                                   " a mirror node")
-
-      snode_name = self.cfg.ExpandNodeName(self.op.snode)
-      if snode_name is None:
-        raise errors.OpPrereqError("Unknown secondary node '%s'" %
-                                   self.op.snode)
-      elif snode_name == pnode.name:
-        raise errors.OpPrereqError("The secondary node cannot be"
-                                   " the primary node.")
-      self.secondaries.append(snode_name)
-
-    # Check lv size requirements
-    nodenames = [pnode.name] + self.secondaries
-    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
-
-    # Required free disk space as a function of disk and swap space
-    req_size_dict = {
-      constants.DT_DISKLESS: 0,
-      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
-      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
-      # 256 MB are added for drbd metadata, 128MB for each drbd device
-      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
-    }
-
-    if self.op.disk_template not in req_size_dict:
-      raise errors.ProgrammerError("Disk template '%s' size requirement"
-                                   " is unknown" %  self.op.disk_template)
-
-    req_size = req_size_dict[self.op.disk_template]
-
-    for node in nodenames:
-      info = nodeinfo.get(node, None)
-      if not info:
-        raise errors.OpPrereqError("Cannot get current information"
-                                   " from node '%s'" % nodeinfo)
-      if req_size > info['vg_free']:
-        raise errors.OpPrereqError("Not enough disk space on target node %s."
-                                   " %d MB available, %d MB required" %
-                                   (node, info['vg_free'], req_size))
-
-    # os verification
-    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
-    if not isinstance(os_obj, objects.OS):
-      raise errors.OpPrereqError("OS '%s' not in supported os list for"
-                                 " primary node"  % self.op.os_type)
-
-    # instance verification
-    hostname1 = utils.LookupHostname(self.op.instance_name)
-    if not hostname1:
-      raise errors.OpPrereqError("Instance name '%s' not found in dns" %
-                                 self.op.instance_name)
+    # instance name verification
+    hostname1 = utils.HostInfo(self.op.instance_name)
 
-    self.op.instance_name = instance_name = hostname1['hostname']
+    self.op.instance_name = instance_name = hostname1.name
     instance_list = self.cfg.GetInstanceList()
     if instance_name in instance_list:
       raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
                                  instance_name)
 
+    # ip validity checks
     ip = getattr(self.op, "ip", None)
     if ip is None or ip.lower() == "none":
       inst_ip = None
     elif ip.lower() == "auto":
-      inst_ip = hostname1['ip']
+      inst_ip = hostname1.ip
     else:
       if not utils.IsValidIP(ip):
         raise errors.OpPrereqError("given IP address '%s' doesn't look"
                                    " like a valid IP" % ip)
       inst_ip = ip
-    self.inst_ip = inst_ip
+    self.inst_ip = self.op.ip = inst_ip
+
+    if self.op.start and not self.op.ip_check:
+      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
+                                 " adding an instance in start mode")
 
-    command = ["fping", "-q", hostname1['ip']]
-    result = utils.RunCmd(command)
-    if not result.failed:
-      raise errors.OpPrereqError("IP %s of instance %s already in use" %
-                                 (hostname1['ip'], instance_name))
+    if self.op.ip_check:
+      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                   (hostname1.ip, instance_name))
+
+    # MAC address verification
+    if self.op.mac != "auto":
+      if not utils.IsValidMac(self.op.mac.lower()):
+        raise errors.OpPrereqError("invalid MAC address specified: %s" %
+                                   self.op.mac)
 
     # bridge verification
     bridge = getattr(self.op, "bridge", None)
@@ -2690,40 +3313,150 @@ class LUCreateInstance(LogicalUnit):
     else:
       self.op.bridge = bridge
 
-    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
-      raise errors.OpPrereqError("target bridge '%s' does not exist on"
-                                 " destination node '%s'" %
-                                 (self.op.bridge, pnode.name))
-
-    if self.op.start:
-      self.instance_status = 'up'
-    else:
-      self.instance_status = 'down'
+    # boot order verification
+    if self.op.hvm_boot_order is not None:
+      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
+        raise errors.OpPrereqError("invalid boot order specified,"
+                                   " must be one or more of [acdn]")
+    # file storage checks
+    if (self.op.file_driver and
+        not self.op.file_driver in constants.FILE_DRIVER):
+      raise errors.OpPrereqError("Invalid file driver name '%s'" %
+                                 self.op.file_driver)
 
-  def Exec(self, feedback_fn):
-    """Create and add the instance to the cluster.
+    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
+        raise errors.OpPrereqError("File storage directory not a relative"
+                                   " path")
+    #### allocator run
 
-    """
-    instance = self.op.instance_name
-    pnode_name = self.pnode.name
+    if [self.op.iallocator, self.op.pnode].count(None) != 1:
+      raise errors.OpPrereqError("One and only one of iallocator and primary"
+                                 " node must be given")
 
-    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
-    if self.inst_ip is not None:
-      nic.ip = self.inst_ip
+    if self.op.iallocator is not None:
+      self._RunAllocator()
 
-    disks = _GenerateDiskTemplate(self.cfg,
-                                  self.op.disk_template,
-                                  instance, pnode_name,
-                                  self.secondaries, self.op.disk_size,
-                                  self.op.swap_size)
+    #### node related checks
 
-    iobj = objects.Instance(name=instance, os=self.op.os_type,
-                            primary_node=pnode_name,
-                            memory=self.op.mem_size,
+    # check primary node
+    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
+    if pnode is None:
+      raise errors.OpPrereqError("Primary node '%s' is unknown" %
+                                 self.op.pnode)
+    self.op.pnode = pnode.name
+    self.pnode = pnode
+    self.secondaries = []
+
+    # mirror node verification
+    if self.op.disk_template in constants.DTS_NET_MIRROR:
+      if getattr(self.op, "snode", None) is None:
+        raise errors.OpPrereqError("The networked disk templates need"
+                                   " a mirror node")
+
+      snode_name = self.cfg.ExpandNodeName(self.op.snode)
+      if snode_name is None:
+        raise errors.OpPrereqError("Unknown secondary node '%s'" %
+                                   self.op.snode)
+      elif snode_name == pnode.name:
+        raise errors.OpPrereqError("The secondary node cannot be"
+                                   " the primary node.")
+      self.secondaries.append(snode_name)
+
+    req_size = _ComputeDiskSize(self.op.disk_template,
+                                self.op.disk_size, self.op.swap_size)
+
+    # Check lv size requirements
+    if req_size is not None:
+      nodenames = [pnode.name] + self.secondaries
+      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+      for node in nodenames:
+        info = nodeinfo.get(node, None)
+        if not info:
+          raise errors.OpPrereqError("Cannot get current information"
+                                     " from node '%s'" % nodeinfo)
+        vg_free = info.get('vg_free', None)
+        if not isinstance(vg_free, int):
+          raise errors.OpPrereqError("Can't compute free disk space on"
+                                     " node %s" % node)
+        if req_size > info['vg_free']:
+          raise errors.OpPrereqError("Not enough disk space on target node %s."
+                                     " %d MB available, %d MB required" %
+                                     (node, info['vg_free'], req_size))
+
+    # os verification
+    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
+    if not os_obj:
+      raise errors.OpPrereqError("OS '%s' not in supported os list for"
+                                 " primary node"  % self.op.os_type)
+
+    if self.op.kernel_path == constants.VALUE_NONE:
+      raise errors.OpPrereqError("Can't set instance kernel to none")
+
+
+    # bridge check on primary node
+    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
+      raise errors.OpPrereqError("target bridge '%s' does not exist on"
+                                 " destination node '%s'" %
+                                 (self.op.bridge, pnode.name))
+
+    if self.op.start:
+      self.instance_status = 'up'
+    else:
+      self.instance_status = 'down'
+
+  def Exec(self, feedback_fn):
+    """Create and add the instance to the cluster.
+
+    """
+    instance = self.op.instance_name
+    pnode_name = self.pnode.name
+
+    if self.op.mac == "auto":
+      mac_address = self.cfg.GenerateMAC()
+    else:
+      mac_address = self.op.mac
+
+    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
+    if self.inst_ip is not None:
+      nic.ip = self.inst_ip
+
+    ht_kind = self.sstore.GetHypervisorType()
+    if ht_kind in constants.HTS_REQ_PORT:
+      network_port = self.cfg.AllocatePort()
+    else:
+      network_port = None
+
+    # this is needed because os.path.join does not accept None arguments
+    if self.op.file_storage_dir is None:
+      string_file_storage_dir = ""
+    else:
+      string_file_storage_dir = self.op.file_storage_dir
+
+    # build the full file storage dir path
+    file_storage_dir = os.path.normpath(os.path.join(
+                                        self.sstore.GetFileStorageDir(),
+                                        string_file_storage_dir, instance))
+
+
+    disks = _GenerateDiskTemplate(self.cfg,
+                                  self.op.disk_template,
+                                  instance, pnode_name,
+                                  self.secondaries, self.op.disk_size,
+                                  self.op.swap_size,
+                                  file_storage_dir,
+                                  self.op.file_driver)
+
+    iobj = objects.Instance(name=instance, os=self.op.os_type,
+                            primary_node=pnode_name,
+                            memory=self.op.mem_size,
                             vcpus=self.op.vcpus,
                             nics=[nic], disks=disks,
                             disk_template=self.op.disk_template,
                             status=self.instance_status,
+                            network_port=network_port,
+                            kernel_path=self.op.kernel_path,
+                            initrd_path=self.op.initrd_path,
+                            hvm_boot_order=self.op.hvm_boot_order,
                             )
 
     feedback_fn("* creating instance disks...")
@@ -2736,12 +3469,12 @@ class LUCreateInstance(LogicalUnit):
     self.cfg.AddInstance(iobj)
 
     if self.op.wait_for_sync:
-      disk_abort = not _WaitForSync(self.cfg, iobj)
-    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
+      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
+    elif iobj.disk_template in constants.DTS_NET_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
-      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
+      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
     else:
       disk_abort = False
 
@@ -2823,213 +3556,10 @@ class LUConnectConsole(NoHooksLU):
     logger.Debug("connecting to console of %s on %s" % (instance.name, node))
 
     hyper = hypervisor.GetHypervisor()
-    console_cmd = hyper.GetShellCommandForConsole(instance.name)
-    # build ssh cmdline
-    argv = ["ssh", "-q", "-t"]
-    argv.extend(ssh.KNOWN_HOSTS_OPTS)
-    argv.extend(ssh.BATCH_MODE_OPTS)
-    argv.append(node)
-    argv.append(console_cmd)
-    return "ssh", argv
-
-
-class LUAddMDDRBDComponent(LogicalUnit):
-  """Adda new mirror member to an instance's disk.
-
-  """
-  HPATH = "mirror-add"
-  HTYPE = constants.HTYPE_INSTANCE
-  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on the master, the primary and all the secondaries.
-
-    """
-    env = {
-      "NEW_SECONDARY": self.op.remote_node,
-      "DISK_NAME": self.op.disk_name,
-      }
-    env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
-          self.op.remote_node,] + list(self.instance.secondary_nodes)
-    return env, nl, nl
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
-    self.instance = instance
-
-    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
-    if remote_node is None:
-      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
-    self.remote_node = remote_node
-
-    if remote_node == instance.primary_node:
-      raise errors.OpPrereqError("The specified node is the primary node of"
-                                 " the instance.")
-
-    if instance.disk_template != constants.DT_REMOTE_RAID1:
-      raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " remote_raid1.")
-    for disk in instance.disks:
-      if disk.iv_name == self.op.disk_name:
-        break
-    else:
-      raise errors.OpPrereqError("Can't find this device ('%s') in the"
-                                 " instance." % self.op.disk_name)
-    if len(disk.children) > 1:
-      raise errors.OpPrereqError("The device already has two slave"
-                                 " devices.\n"
-                                 "This would create a 3-disk raid1"
-                                 " which we don't allow.")
-    self.disk = disk
-
-  def Exec(self, feedback_fn):
-    """Add the mirror component
-
-    """
-    disk = self.disk
-    instance = self.instance
-
-    remote_node = self.remote_node
-    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
-    names = _GenerateUniqueNames(self.cfg, lv_names)
-    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
-                                     remote_node, disk.size, names)
-
-    logger.Info("adding new mirror component on secondary")
-    #HARDCODE
-    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
-                                      _GetInstanceInfoText(instance)):
-      raise errors.OpExecError("Failed to create new component on secondary"
-                               " node %s" % remote_node)
-
-    logger.Info("adding new mirror component on primary")
-    #HARDCODE
-    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
-                                    _GetInstanceInfoText(instance)):
-      # remove secondary dev
-      self.cfg.SetDiskID(new_drbd, remote_node)
-      rpc.call_blockdev_remove(remote_node, new_drbd)
-      raise errors.OpExecError("Failed to create volume on primary")
-
-    # the device exists now
-    # call the primary node to add the mirror to md
-    logger.Info("adding new mirror component to md")
-    if not rpc.call_blockdev_addchild(instance.primary_node,
-                                           disk, new_drbd):
-      logger.Error("Can't add mirror compoment to md!")
-      self.cfg.SetDiskID(new_drbd, remote_node)
-      if not rpc.call_blockdev_remove(remote_node, new_drbd):
-        logger.Error("Can't rollback on secondary")
-      self.cfg.SetDiskID(new_drbd, instance.primary_node)
-      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
-        logger.Error("Can't rollback on primary")
-      raise errors.OpExecError("Can't add mirror component to md array")
-
-    disk.children.append(new_drbd)
-
-    self.cfg.AddInstance(instance)
-
-    _WaitForSync(self.cfg, instance)
-
-    return 0
-
-
-class LURemoveMDDRBDComponent(LogicalUnit):
-  """Remove a component from a remote_raid1 disk.
-
-  """
-  HPATH = "mirror-remove"
-  HTYPE = constants.HTYPE_INSTANCE
-  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on the master, the primary and all the secondaries.
-
-    """
-    env = {
-      "DISK_NAME": self.op.disk_name,
-      "DISK_ID": self.op.disk_id,
-      "OLD_SECONDARY": self.old_secondary,
-      }
-    env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = [self.sstore.GetMasterNode(),
-          self.instance.primary_node] + list(self.instance.secondary_nodes)
-    return env, nl, nl
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    instance = self.cfg.GetInstanceInfo(
-      self.cfg.ExpandInstanceName(self.op.instance_name))
-    if instance is None:
-      raise errors.OpPrereqError("Instance '%s' not known" %
-                                 self.op.instance_name)
-    self.instance = instance
-
-    if instance.disk_template != constants.DT_REMOTE_RAID1:
-      raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " remote_raid1.")
-    for disk in instance.disks:
-      if disk.iv_name == self.op.disk_name:
-        break
-    else:
-      raise errors.OpPrereqError("Can't find this device ('%s') in the"
-                                 " instance." % self.op.disk_name)
-    for child in disk.children:
-      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
-        break
-    else:
-      raise errors.OpPrereqError("Can't find the device with this port.")
-
-    if len(disk.children) < 2:
-      raise errors.OpPrereqError("Cannot remove the last component from"
-                                 " a mirror.")
-    self.disk = disk
-    self.child = child
-    if self.child.logical_id[0] == instance.primary_node:
-      oid = 1
-    else:
-      oid = 0
-    self.old_secondary = self.child.logical_id[oid]
+    console_cmd = hyper.GetShellCommandForConsole(instance)
 
-  def Exec(self, feedback_fn):
-    """Remove the mirror component
-
-    """
-    instance = self.instance
-    disk = self.disk
-    child = self.child
-    logger.Info("remove mirror component")
-    self.cfg.SetDiskID(disk, instance.primary_node)
-    if not rpc.call_blockdev_removechild(instance.primary_node,
-                                              disk, child):
-      raise errors.OpExecError("Can't remove child from mirror.")
-
-    for node in child.logical_id[:2]:
-      self.cfg.SetDiskID(child, node)
-      if not rpc.call_blockdev_remove(node, child):
-        logger.Error("Warning: failed to remove device from node %s,"
-                     " continuing operation." % node)
-
-    disk.children.remove(child)
-    self.cfg.AddInstance(instance)
+    # build ssh cmdline
+    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
 
 
 class LUReplaceDisks(LogicalUnit):
@@ -3038,7 +3568,7 @@ class LUReplaceDisks(LogicalUnit):
   """
   HPATH = "mirrors-replace"
   HTYPE = constants.HTYPE_INSTANCE
-  _OP_REQP = ["instance_name"]
+  _OP_REQP = ["instance_name", "mode", "disks"]
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -3047,12 +3577,17 @@ class LUReplaceDisks(LogicalUnit):
 
     """
     env = {
+      "MODE": self.op.mode,
       "NEW_SECONDARY": self.op.remote_node,
       "OLD_SECONDARY": self.instance.secondary_nodes[0],
       }
     env.update(_BuildInstanceHookEnvByObject(self.instance))
-    nl = [self.sstore.GetMasterNode(),
-          self.instance.primary_node] + list(self.instance.secondary_nodes)
+    nl = [
+      self.sstore.GetMasterNode(),
+      self.instance.primary_node,
+      ]
+    if self.op.remote_node is not None:
+      nl.append(self.op.remote_node)
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -3067,37 +3602,86 @@ class LUReplaceDisks(LogicalUnit):
       raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
     self.instance = instance
+    self.op.instance_name = instance.name
 
-    if instance.disk_template != constants.DT_REMOTE_RAID1:
+    if instance.disk_template not in constants.DTS_NET_MIRROR:
       raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " remote_raid1.")
+                                 " network mirrored.")
 
     if len(instance.secondary_nodes) != 1:
       raise errors.OpPrereqError("The instance has a strange layout,"
                                  " expected one secondary but found %d" %
                                  len(instance.secondary_nodes))
 
+    self.sec_node = instance.secondary_nodes[0]
+
     remote_node = getattr(self.op, "remote_node", None)
-    if remote_node is None:
-      remote_node = instance.secondary_nodes[0]
-    else:
+    if remote_node is not None:
       remote_node = self.cfg.ExpandNodeName(remote_node)
       if remote_node is None:
         raise errors.OpPrereqError("Node '%s' not known" %
                                    self.op.remote_node)
+      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
+    else:
+      self.remote_node_info = None
     if remote_node == instance.primary_node:
       raise errors.OpPrereqError("The specified node is the primary node of"
                                  " the instance.")
+    elif remote_node == self.sec_node:
+      if self.op.mode == constants.REPLACE_DISK_SEC:
+        # this is for DRBD8, where we can't execute the same mode of
+        # replacement as for drbd7 (no different port allocated)
+        raise errors.OpPrereqError("Same secondary given, cannot execute"
+                                   " replacement")
+      # the user gave the current secondary, switch to
+      # 'no-replace-secondary' mode for drbd7
+      remote_node = None
+    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
+        self.op.mode != constants.REPLACE_DISK_ALL):
+      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
+                                 " disks replacement, not individual ones")
+    if instance.disk_template == constants.DT_DRBD8:
+      if (self.op.mode == constants.REPLACE_DISK_ALL and
+          remote_node is not None):
+        # switch to replace secondary mode
+        self.op.mode = constants.REPLACE_DISK_SEC
+
+      if self.op.mode == constants.REPLACE_DISK_ALL:
+        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
+                                   " secondary disk replacement, not"
+                                   " both at once")
+      elif self.op.mode == constants.REPLACE_DISK_PRI:
+        if remote_node is not None:
+          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
+                                     " the secondary while doing a primary"
+                                     " node disk replacement")
+        self.tgt_node = instance.primary_node
+        self.oth_node = instance.secondary_nodes[0]
+      elif self.op.mode == constants.REPLACE_DISK_SEC:
+        self.new_node = remote_node # this can be None, in which case
+                                    # we don't change the secondary
+        self.tgt_node = instance.secondary_nodes[0]
+        self.oth_node = instance.primary_node
+      else:
+        raise errors.ProgrammerError("Unhandled disk replace mode")
+
+    for name in self.op.disks:
+      if instance.FindDisk(name) is None:
+        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
+                                   (name, instance.name))
     self.op.remote_node = remote_node
 
-  def Exec(self, feedback_fn):
+  def _ExecRR1(self, feedback_fn):
     """Replace the disks of an instance.
 
     """
     instance = self.instance
     iv_names = {}
     # start of work
-    remote_node = self.op.remote_node
+    if self.op.remote_node is None:
+      remote_node = self.sec_node
+    else:
+      remote_node = self.op.remote_node
     cfg = self.cfg
     for dev in instance.disks:
       size = dev.size
@@ -3109,28 +3693,29 @@ class LUReplaceDisks(LogicalUnit):
       logger.Info("adding new mirror component on secondary for %s" %
                   dev.iv_name)
       #HARDCODE
-      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
+      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
+                                        new_drbd, False,
                                         _GetInstanceInfoText(instance)):
-        raise errors.OpExecError("Failed to create new component on"
-                                 " secondary node %s\n"
-                                 "Full abort, cleanup manually!" %
+        raise errors.OpExecError("Failed to create new component on secondary"
+                                 " node %s. Full abort, cleanup manually!" %
                                  remote_node)
 
       logger.Info("adding new mirror component on primary")
       #HARDCODE
-      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
+      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
+                                      instance, new_drbd,
                                       _GetInstanceInfoText(instance)):
         # remove secondary dev
         cfg.SetDiskID(new_drbd, remote_node)
         rpc.call_blockdev_remove(remote_node, new_drbd)
-        raise errors.OpExecError("Failed to create volume on primary!\n"
-                                 "Full abort, cleanup manually!!")
+        raise errors.OpExecError("Failed to create volume on primary!"
+                                 " Full abort, cleanup manually!!")
 
       # the device exists now
       # call the primary node to add the mirror to md
       logger.Info("adding new mirror component to md")
-      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
-                                        new_drbd):
+      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
+                                           [new_drbd]):
         logger.Error("Can't add mirror compoment to md!")
         cfg.SetDiskID(new_drbd, remote_node)
         if not rpc.call_blockdev_remove(remote_node, new_drbd):
@@ -3146,7 +3731,7 @@ class LUReplaceDisks(LogicalUnit):
     # this can fail as the old devices are degraded and _WaitForSync
     # does a combined result over all disks, so we don't check its
     # return value
-    _WaitForSync(cfg, instance, unlock=True)
+    _WaitForSync(cfg, instance, self.proc, unlock=True)
 
     # so check manually all the devices
     for name in iv_names:
@@ -3164,8 +3749,8 @@ class LUReplaceDisks(LogicalUnit):
       dev, child, new_drbd = iv_names[name]
       logger.Info("remove mirror %s component" % name)
       cfg.SetDiskID(dev, instance.primary_node)
-      if not rpc.call_blockdev_removechild(instance.primary_node,
-                                                dev, child):
+      if not rpc.call_blockdev_removechildren(instance.primary_node,
+                                              dev, [child]):
         logger.Error("Can't remove child from mirror, aborting"
                      " *this device cleanup*.\nYou need to cleanup manually!!")
         continue
@@ -3181,6 +3766,358 @@ class LUReplaceDisks(LogicalUnit):
 
       cfg.AddInstance(instance)
 
+  def _ExecD8DiskOnly(self, feedback_fn):
+    """Replace a disk on the primary or secondary for dbrd8.
+
+    The algorithm for replace is quite complicated:
+      - for each disk to be replaced:
+        - create new LVs on the target node with unique names
+        - detach old LVs from the drbd device
+        - rename old LVs to name_replaced.<time_t>
+        - rename new LVs to old LVs
+        - attach the new LVs (with the old names now) to the drbd device
+      - wait for sync across all devices
+      - for each modified disk:
+        - remove old LVs (which have the name name_replaces.<time_t>)
+
+    Failures are not very well handled.
+
+    """
+    steps_total = 6
+    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
+    instance = self.instance
+    iv_names = {}
+    vgname = self.cfg.GetVGName()
+    # start of work
+    cfg = self.cfg
+    tgt_node = self.tgt_node
+    oth_node = self.oth_node
+
+    # Step: check device activation
+    self.proc.LogStep(1, steps_total, "check device existence")
+    info("checking volume groups")
+    my_vg = cfg.GetVGName()
+    results = rpc.call_vg_list([oth_node, tgt_node])
+    if not results:
+      raise errors.OpExecError("Can't list volume groups on the nodes")
+    for node in oth_node, tgt_node:
+      res = results.get(node, False)
+      if not res or my_vg not in res:
+        raise errors.OpExecError("Volume group '%s' not found on %s" %
+                                 (my_vg, node))
+    for dev in instance.disks:
+      if not dev.iv_name in self.op.disks:
+        continue
+      for node in tgt_node, oth_node:
+        info("checking %s on %s" % (dev.iv_name, node))
+        cfg.SetDiskID(dev, node)
+        if not rpc.call_blockdev_find(node, dev):
+          raise errors.OpExecError("Can't find device %s on node %s" %
+                                   (dev.iv_name, node))
+
+    # Step: check other node consistency
+    self.proc.LogStep(2, steps_total, "check peer consistency")
+    for dev in instance.disks:
+      if not dev.iv_name in self.op.disks:
+        continue
+      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
+      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
+                                   oth_node==instance.primary_node):
+        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
+                                 " to replace disks on this node (%s)" %
+                                 (oth_node, tgt_node))
+
+    # Step: create new storage
+    self.proc.LogStep(3, steps_total, "allocate new storage")
+    for dev in instance.disks:
+      if not dev.iv_name in self.op.disks:
+        continue
+      size = dev.size
+      cfg.SetDiskID(dev, tgt_node)
+      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
+      names = _GenerateUniqueNames(cfg, lv_names)
+      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+                             logical_id=(vgname, names[0]))
+      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+                             logical_id=(vgname, names[1]))
+      new_lvs = [lv_data, lv_meta]
+      old_lvs = dev.children
+      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
+      info("creating new local storage on %s for %s" %
+           (tgt_node, dev.iv_name))
+      # since we *always* want to create this LV, we use the
+      # _Create...OnPrimary (which forces the creation), even if we
+      # are talking about the secondary node
+      for new_lv in new_lvs:
+        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
+                                        _GetInstanceInfoText(instance)):
+          raise errors.OpExecError("Failed to create new LV named '%s' on"
+                                   " node '%s'" %
+                                   (new_lv.logical_id[1], tgt_node))
+
+    # Step: for each lv, detach+rename*2+attach
+    self.proc.LogStep(4, steps_total, "change drbd configuration")
+    for dev, old_lvs, new_lvs in iv_names.itervalues():
+      info("detaching %s drbd from local storage" % dev.iv_name)
+      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
+        raise errors.OpExecError("Can't detach drbd from local storage on node"
+                                 " %s for device %s" % (tgt_node, dev.iv_name))
+      #dev.children = []
+      #cfg.Update(instance)
+
+      # ok, we created the new LVs, so now we know we have the needed
+      # storage; as such, we proceed on the target node to rename
+      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
+      # using the assumption that logical_id == physical_id (which in
+      # turn is the unique_id on that node)
+
+      # FIXME(iustin): use a better name for the replaced LVs
+      temp_suffix = int(time.time())
+      ren_fn = lambda d, suff: (d.physical_id[0],
+                                d.physical_id[1] + "_replaced-%s" % suff)
+      # build the rename list based on what LVs exist on the node
+      rlist = []
+      for to_ren in old_lvs:
+        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
+        if find_res is not None: # device exists
+          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
+
+      info("renaming the old LVs on the target node")
+      if not rpc.call_blockdev_rename(tgt_node, rlist):
+        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
+      # now we rename the new LVs to the old LVs
+      info("renaming the new LVs on the target node")
+      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
+      if not rpc.call_blockdev_rename(tgt_node, rlist):
+        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
+
+      for old, new in zip(old_lvs, new_lvs):
+        new.logical_id = old.logical_id
+        cfg.SetDiskID(new, tgt_node)
+
+      for disk in old_lvs:
+        disk.logical_id = ren_fn(disk, temp_suffix)
+        cfg.SetDiskID(disk, tgt_node)
+
+      # now that the new lvs have the old name, we can add them to the device
+      info("adding new mirror component on %s" % tgt_node)
+      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
+        for new_lv in new_lvs:
+          if not rpc.call_blockdev_remove(tgt_node, new_lv):
+            warning("Can't rollback device %s", hint="manually cleanup unused"
+                    " logical volumes")
+        raise errors.OpExecError("Can't add local storage to drbd")
+
+      dev.children = new_lvs
+      cfg.Update(instance)
+
+    # Step: wait for sync
+
+    # this can fail as the old devices are degraded and _WaitForSync
+    # does a combined result over all disks, so we don't check its
+    # return value
+    self.proc.LogStep(5, steps_total, "sync devices")
+    _WaitForSync(cfg, instance, self.proc, unlock=True)
+
+    # so check manually all the devices
+    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+      cfg.SetDiskID(dev, instance.primary_node)
+      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
+      if is_degr:
+        raise errors.OpExecError("DRBD device %s is degraded!" % name)
+
+    # Step: remove old storage
+    self.proc.LogStep(6, steps_total, "removing old storage")
+    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+      info("remove logical volumes for %s" % name)
+      for lv in old_lvs:
+        cfg.SetDiskID(lv, tgt_node)
+        if not rpc.call_blockdev_remove(tgt_node, lv):
+          warning("Can't remove old LV", hint="manually remove unused LVs")
+          continue
+
+  def _ExecD8Secondary(self, feedback_fn):
+    """Replace the secondary node for drbd8.
+
+    The algorithm for replace is quite complicated:
+      - for all disks of the instance:
+        - create new LVs on the new node with same names
+        - shutdown the drbd device on the old secondary
+        - disconnect the drbd network on the primary
+        - create the drbd device on the new secondary
+        - network attach the drbd on the primary, using an artifice:
+          the drbd code for Attach() will connect to the network if it
+          finds a device which is connected to the good local disks but
+          not network enabled
+      - wait for sync across all devices
+      - remove all disks from the old secondary
+
+    Failures are not very well handled.
+
+    """
+    steps_total = 6
+    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
+    instance = self.instance
+    iv_names = {}
+    vgname = self.cfg.GetVGName()
+    # start of work
+    cfg = self.cfg
+    old_node = self.tgt_node
+    new_node = self.new_node
+    pri_node = instance.primary_node
+
+    # Step: check device activation
+    self.proc.LogStep(1, steps_total, "check device existence")
+    info("checking volume groups")
+    my_vg = cfg.GetVGName()
+    results = rpc.call_vg_list([pri_node, new_node])
+    if not results:
+      raise errors.OpExecError("Can't list volume groups on the nodes")
+    for node in pri_node, new_node:
+      res = results.get(node, False)
+      if not res or my_vg not in res:
+        raise errors.OpExecError("Volume group '%s' not found on %s" %
+                                 (my_vg, node))
+    for dev in instance.disks:
+      if not dev.iv_name in self.op.disks:
+        continue
+      info("checking %s on %s" % (dev.iv_name, pri_node))
+      cfg.SetDiskID(dev, pri_node)
+      if not rpc.call_blockdev_find(pri_node, dev):
+        raise errors.OpExecError("Can't find device %s on node %s" %
+                                 (dev.iv_name, pri_node))
+
+    # Step: check other node consistency
+    self.proc.LogStep(2, steps_total, "check peer consistency")
+    for dev in instance.disks:
+      if not dev.iv_name in self.op.disks:
+        continue
+      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
+      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
+        raise errors.OpExecError("Primary node (%s) has degraded storage,"
+                                 " unsafe to replace the secondary" %
+                                 pri_node)
+
+    # Step: create new storage
+    self.proc.LogStep(3, steps_total, "allocate new storage")
+    for dev in instance.disks:
+      size = dev.size
+      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
+      # since we *always* want to create this LV, we use the
+      # _Create...OnPrimary (which forces the creation), even if we
+      # are talking about the secondary node
+      for new_lv in dev.children:
+        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
+                                        _GetInstanceInfoText(instance)):
+          raise errors.OpExecError("Failed to create new LV named '%s' on"
+                                   " node '%s'" %
+                                   (new_lv.logical_id[1], new_node))
+
+      iv_names[dev.iv_name] = (dev, dev.children)
+
+    self.proc.LogStep(4, steps_total, "changing drbd configuration")
+    for dev in instance.disks:
+      size = dev.size
+      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
+      # create new devices on new_node
+      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
+                              logical_id=(pri_node, new_node,
+                                          dev.logical_id[2]),
+                              children=dev.children)
+      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
+                                        new_drbd, False,
+                                      _GetInstanceInfoText(instance)):
+        raise errors.OpExecError("Failed to create new DRBD on"
+                                 " node '%s'" % new_node)
+
+    for dev in instance.disks:
+      # we have new devices, shutdown the drbd on the old secondary
+      info("shutting down drbd for %s on old node" % dev.iv_name)
+      cfg.SetDiskID(dev, old_node)
+      if not rpc.call_blockdev_shutdown(old_node, dev):
+        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
+                hint="Please cleanup this device manually as soon as possible")
+
+    info("detaching primary drbds from the network (=> standalone)")
+    done = 0
+    for dev in instance.disks:
+      cfg.SetDiskID(dev, pri_node)
+      # set the physical (unique in bdev terms) id to None, meaning
+      # detach from network
+      dev.physical_id = (None,) * len(dev.physical_id)
+      # and 'find' the device, which will 'fix' it to match the
+      # standalone state
+      if rpc.call_blockdev_find(pri_node, dev):
+        done += 1
+      else:
+        warning("Failed to detach drbd %s from network, unusual case" %
+                dev.iv_name)
+
+    if not done:
+      # no detaches succeeded (very unlikely)
+      raise errors.OpExecError("Can't detach at least one DRBD from old node")
+
+    # if we managed to detach at least one, we update all the disks of
+    # the instance to point to the new secondary
+    info("updating instance configuration")
+    for dev in instance.disks:
+      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
+      cfg.SetDiskID(dev, pri_node)
+    cfg.Update(instance)
+
+    # and now perform the drbd attach
+    info("attaching primary drbds to new secondary (standalone => connected)")
+    failures = []
+    for dev in instance.disks:
+      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
+      # since the attach is smart, it's enough to 'find' the device,
+      # it will automatically activate the network, if the physical_id
+      # is correct
+      cfg.SetDiskID(dev, pri_node)
+      if not rpc.call_blockdev_find(pri_node, dev):
+        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
+                "please do a gnt-instance info to see the status of disks")
+
+    # this can fail as the old devices are degraded and _WaitForSync
+    # does a combined result over all disks, so we don't check its
+    # return value
+    self.proc.LogStep(5, steps_total, "sync devices")
+    _WaitForSync(cfg, instance, self.proc, unlock=True)
+
+    # so check manually all the devices
+    for name, (dev, old_lvs) in iv_names.iteritems():
+      cfg.SetDiskID(dev, pri_node)
+      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
+      if is_degr:
+        raise errors.OpExecError("DRBD device %s is degraded!" % name)
+
+    self.proc.LogStep(6, steps_total, "removing old storage")
+    for name, (dev, old_lvs) in iv_names.iteritems():
+      info("remove logical volumes for %s" % name)
+      for lv in old_lvs:
+        cfg.SetDiskID(lv, old_node)
+        if not rpc.call_blockdev_remove(old_node, lv):
+          warning("Can't remove LV on old secondary",
+                  hint="Cleanup stale volumes by hand")
+
+  def Exec(self, feedback_fn):
+    """Execute disk replacement.
+
+    This dispatches the disk replacement to the appropriate handler.
+
+    """
+    instance = self.instance
+    if instance.disk_template == constants.DT_REMOTE_RAID1:
+      fn = self._ExecRR1
+    elif instance.disk_template == constants.DT_DRBD8:
+      if self.op.remote_node is None:
+        fn = self._ExecD8DiskOnly
+      else:
+        fn = self._ExecD8Secondary
+    else:
+      raise errors.ProgrammerError("Unhandled disk replacement case")
+    return fn(feedback_fn)
+
 
 class LUQueryInstanceData(NoHooksLU):
   """Query runtime instance data.
@@ -3203,7 +4140,7 @@ class LUQueryInstanceData(NoHooksLU):
         instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
         if instance is None:
           raise errors.OpPrereqError("No such instance name '%s'" % name)
-      self.wanted_instances.append(instance)
+        self.wanted_instances.append(instance)
     else:
       self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
                                in self.cfg.GetInstanceList()]
@@ -3216,7 +4153,7 @@ class LUQueryInstanceData(NoHooksLU):
     """
     self.cfg.SetDiskID(dev, instance.primary_node)
     dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
-    if dev.dev_type == "drbd":
+    if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
         snode = dev.logical_id[1]
@@ -3275,6 +4212,11 @@ class LUQueryInstanceData(NoHooksLU):
         "memory": instance.memory,
         "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
         "disks": disks,
+        "network_port": instance.network_port,
+        "vcpus": instance.vcpus,
+        "kernel_path": instance.kernel_path,
+        "initrd_path": instance.initrd_path,
+        "hvm_boot_order": instance.hvm_boot_order,
         }
 
       result[instance.name] = idict
@@ -3282,38 +4224,7 @@ class LUQueryInstanceData(NoHooksLU):
     return result
 
 
-class LUQueryNodeData(NoHooksLU):
-  """Logical unit for querying node data.
-
-  """
-  _OP_REQP = ["nodes"]
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the optional node list against the existing names.
-
-    """
-    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
-
-  def Exec(self, feedback_fn):
-    """Compute and return the list of nodes.
-
-    """
-    ilist = [self.cfg.GetInstanceInfo(iname) for iname
-             in self.cfg.GetInstanceList()]
-    result = []
-    for node in self.wanted_nodes:
-      result.append((node.name, node.primary_ip, node.secondary_ip,
-                     [inst.name for inst in ilist
-                      if inst.primary_node == node.name],
-                     [inst.name for inst in ilist
-                      if node.name in inst.secondary_nodes],
-                     ))
-    return result
-
-
-class LUSetInstanceParms(LogicalUnit):
+class LUSetInstanceParams(LogicalUnit):
   """Modifies an instances's parameters.
 
   """
@@ -3332,7 +4243,7 @@ class LUSetInstanceParms(LogicalUnit):
       args['memory'] = self.mem
     if self.vcpus:
       args['vcpus'] = self.vcpus
-    if self.do_ip or self.do_bridge:
+    if self.do_ip or self.do_bridge or self.mac:
       if self.do_ip:
         ip = self.ip
       else:
@@ -3341,7 +4252,11 @@ class LUSetInstanceParms(LogicalUnit):
         bridge = self.bridge
       else:
         bridge = self.instance.nics[0].bridge
-      args['nics'] = [(ip, bridge)]
+      if self.mac:
+        mac = self.mac
+      else:
+        mac = self.instance.nics[0].mac
+      args['nics'] = [(ip, bridge, mac)]
     env = _BuildInstanceHookEnvByObject(self.instance, override=args)
     nl = [self.sstore.GetMasterNode(),
           self.instance.primary_node] + list(self.instance.secondary_nodes)
@@ -3356,8 +4271,14 @@ class LUSetInstanceParms(LogicalUnit):
     self.mem = getattr(self.op, "mem", None)
     self.vcpus = getattr(self.op, "vcpus", None)
     self.ip = getattr(self.op, "ip", None)
+    self.mac = getattr(self.op, "mac", None)
     self.bridge = getattr(self.op, "bridge", None)
-    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
+    self.kernel_path = getattr(self.op, "kernel_path", None)
+    self.initrd_path = getattr(self.op, "initrd_path", None)
+    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
+    all_params = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
+                  self.kernel_path, self.initrd_path, self.hvm_boot_order]
+    if all_params.count(None) == len(all_params):
       raise errors.OpPrereqError("No changes submitted")
     if self.mem is not None:
       try:
@@ -3379,6 +4300,42 @@ class LUSetInstanceParms(LogicalUnit):
     else:
       self.do_ip = False
     self.do_bridge = (self.bridge is not None)
+    if self.mac is not None:
+      if self.cfg.IsMacInUse(self.mac):
+        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
+                                   self.mac)
+      if not utils.IsValidMac(self.mac):
+        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
+
+    if self.kernel_path is not None:
+      self.do_kernel_path = True
+      if self.kernel_path == constants.VALUE_NONE:
+        raise errors.OpPrereqError("Can't set instance to no kernel")
+
+      if self.kernel_path != constants.VALUE_DEFAULT:
+        if not os.path.isabs(self.kernel_path):
+          raise errors.OpPrereqError("The kernel path must be an absolute"
+                                    " filename")
+    else:
+      self.do_kernel_path = False
+
+    if self.initrd_path is not None:
+      self.do_initrd_path = True
+      if self.initrd_path not in (constants.VALUE_NONE,
+                                  constants.VALUE_DEFAULT):
+        if not os.path.isabs(self.initrd_path):
+          raise errors.OpPrereqError("The initrd path must be an absolute"
+                                    " filename")
+    else:
+      self.do_initrd_path = False
+
+    # boot order verification
+    if self.hvm_boot_order is not None:
+      if self.hvm_boot_order != constants.VALUE_DEFAULT:
+        if len(self.hvm_boot_order.strip("acdn")) != 0:
+          raise errors.OpPrereqError("invalid boot order specified,"
+                                     " must be one or more of [acdn]"
+                                     " or 'default'")
 
     instance = self.cfg.GetInstanceInfo(
       self.cfg.ExpandInstanceName(self.op.instance_name))
@@ -3408,6 +4365,21 @@ class LUSetInstanceParms(LogicalUnit):
     if self.bridge:
       instance.nics[0].bridge = self.bridge
       result.append(("bridge", self.bridge))
+    if self.mac:
+      instance.nics[0].mac = self.mac
+      result.append(("mac", self.mac))
+    if self.do_kernel_path:
+      instance.kernel_path = self.kernel_path
+      result.append(("kernel_path", self.kernel_path))
+    if self.do_initrd_path:
+      instance.initrd_path = self.initrd_path
+      result.append(("initrd_path", self.initrd_path))
+    if self.hvm_boot_order:
+      if self.hvm_boot_order == constants.VALUE_DEFAULT:
+        instance.hvm_boot_order = None
+      else:
+        instance.hvm_boot_order = self.hvm_boot_order
+      result.append(("hvm_boot_order", self.hvm_boot_order))
 
     self.cfg.AddInstance(instance)
 
@@ -3435,7 +4407,7 @@ class LUQueryExports(NoHooksLU):
       that node.
 
     """
-    return rpc.call_export_list([node.name for node in self.nodes])
+    return rpc.call_export_list(self.nodes)
 
 
 class LUExportInstance(LogicalUnit):
@@ -3464,7 +4436,7 @@ class LUExportInstance(LogicalUnit):
   def CheckPrereq(self):
     """Check prerequisites.
 
-    This checks that the instance name is a valid one.
+    This checks that the instance and node names are valid.
 
     """
     instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
@@ -3489,10 +4461,11 @@ class LUExportInstance(LogicalUnit):
     instance = self.instance
     dst_node = self.dst_node
     src_node = instance.primary_node
-    # shutdown the instance, unless requested not to do so
     if self.op.shutdown:
-      op = opcodes.OpShutdownInstance(instance_name=instance.name)
-      self.processor.ChainOpCode(op, feedback_fn)
+      # shutdown the instance, but not the disks
+      if not rpc.call_instance_shutdown(src_node, instance):
+         raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+                                 (instance.name, src_node))
 
     vgname = self.cfg.GetVGName()
 
@@ -3508,29 +4481,27 @@ class LUExportInstance(LogicalUnit):
             logger.Error("could not snapshot block device %s on node %s" %
                          (disk.logical_id[1], src_node))
           else:
-            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
+            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
                                       logical_id=(vgname, new_dev_name),
                                       physical_id=(vgname, new_dev_name),
                                       iv_name=disk.iv_name)
             snap_disks.append(new_dev)
 
     finally:
-      if self.op.shutdown:
-        op = opcodes.OpStartupInstance(instance_name=instance.name,
-                                       force=False)
-        self.processor.ChainOpCode(op, feedback_fn)
+      if self.op.shutdown and instance.status == "up":
+        if not rpc.call_instance_start(src_node, instance, None):
+          _ShutdownInstanceDisks(instance, self.cfg)
+          raise errors.OpExecError("Could not start instance")
 
     # TODO: check for size
 
     for dev in snap_disks:
-      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
-                                           instance):
-        logger.Error("could not export block device %s from node"
-                     " %s to node %s" %
-                     (dev.logical_id[1], src_node, dst_node.name))
+      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
+        logger.Error("could not export block device %s from node %s to node %s"
+                     % (dev.logical_id[1], src_node, dst_node.name))
       if not rpc.call_blockdev_remove(src_node, dev):
-        logger.Error("could not remove snapshot block device %s from"
-                     " node %s" % (dev.logical_id[1], src_node))
+        logger.Error("could not remove snapshot block device %s from node %s" %
+                     (dev.logical_id[1], src_node))
 
     if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
       logger.Error("could not finalize export for instance %s on node %s" %
@@ -3544,7 +4515,7 @@ class LUExportInstance(LogicalUnit):
     # substitutes an empty list with the full cluster node list.
     if nodelist:
       op = opcodes.OpQueryExports(nodes=nodelist)
-      exportlist = self.processor.ChainOpCode(op, feedback_fn)
+      exportlist = self.proc.ChainOpCode(op)
       for node in exportlist:
         if instance.name in exportlist[node]:
           if not rpc.call_export_remove(node, instance.name):
@@ -3552,6 +4523,45 @@ class LUExportInstance(LogicalUnit):
                          " on node %s" % (instance.name, node))
 
 
+class LURemoveExport(NoHooksLU):
+  """Remove exports related to the named instance.
+
+  """
+  _OP_REQP = ["instance_name"]
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+    """
+    pass
+
+  def Exec(self, feedback_fn):
+    """Remove any export.
+
+    """
+    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+    # If the instance was not found we'll try with the name that was passed in.
+    # This will only work if it was an FQDN, though.
+    fqdn_warn = False
+    if not instance_name:
+      fqdn_warn = True
+      instance_name = self.op.instance_name
+
+    op = opcodes.OpQueryExports(nodes=[])
+    exportlist = self.proc.ChainOpCode(op)
+    found = False
+    for node in exportlist:
+      if instance_name in exportlist[node]:
+        found = True
+        if not rpc.call_export_remove(node, instance_name):
+          logger.Error("could not remove export for instance %s"
+                       " on node %s" % (instance_name, node))
+
+    if fqdn_warn and not found:
+      feedback_fn("Export not found. If trying to remove an export belonging"
+                  " to a deleted instance please use its Fully Qualified"
+                  " Domain Name.")
+
+
 class TagsLU(NoHooksLU):
   """Generic tags LU.
 
@@ -3572,7 +4582,7 @@ class TagsLU(NoHooksLU):
       self.op.name = name
       self.target = self.cfg.GetNodeInfo(name)
     elif self.op.kind == constants.TAG_INSTANCE:
-      name = self.cfg.ExpandInstanceName(name)
+      name = self.cfg.ExpandInstanceName(self.op.name)
       if name is None:
         raise errors.OpPrereqError("Invalid instance name (%s)" %
                                    (self.op.name,))
@@ -3596,11 +4606,47 @@ class LUGetTags(TagsLU):
     return self.target.GetTags()
 
 
-class LUAddTag(TagsLU):
+class LUSearchTags(NoHooksLU):
+  """Searches the tags for a given pattern.
+
+  """
+  _OP_REQP = ["pattern"]
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks the pattern passed for validity by compiling it.
+
+    """
+    try:
+      self.re = re.compile(self.op.pattern)
+    except re.error, err:
+      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
+                                 (self.op.pattern, err))
+
+  def Exec(self, feedback_fn):
+    """Returns the tag list.
+
+    """
+    cfg = self.cfg
+    tgts = [("/cluster", cfg.GetClusterInfo())]
+    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
+    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
+    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
+    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
+    results = []
+    for path, target in tgts:
+      for tag in target.GetTags():
+        if self.re.search(tag):
+          results.append((path, tag))
+    return results
+
+
+class LUAddTags(TagsLU):
   """Sets a tag on a given object.
 
   """
-  _OP_REQP = ["kind", "name", "tag"]
+  _OP_REQP = ["kind", "name", "tags"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -3609,14 +4655,16 @@ class LUAddTag(TagsLU):
 
     """
     TagsLU.CheckPrereq(self)
-    objects.TaggableObject.ValidateTag(self.op.tag)
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
 
   def Exec(self, feedback_fn):
     """Sets the tag.
 
     """
     try:
-      self.target.AddTag(self.op.tag)
+      for tag in self.op.tags:
+        self.target.AddTag(tag)
     except errors.TagError, err:
       raise errors.OpExecError("Error while setting tag: %s" % str(err))
     try:
@@ -3627,11 +4675,11 @@ class LUAddTag(TagsLU):
                                 " aborted. Please retry.")
 
 
-class LUDelTag(TagsLU):
-  """Delete a tag from a given object.
+class LUDelTags(TagsLU):
+  """Delete a list of tags from a given object.
 
   """
-  _OP_REQP = ["kind", "name", "tag"]
+  _OP_REQP = ["kind", "name", "tags"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -3640,18 +4688,401 @@ class LUDelTag(TagsLU):
 
     """
     TagsLU.CheckPrereq(self)
-    objects.TaggableObject.ValidateTag(self.op.tag)
-    if self.op.tag not in self.target.GetTags():
-      raise errors.OpPrereqError("Tag not found")
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
+    del_tags = frozenset(self.op.tags)
+    cur_tags = self.target.GetTags()
+    if not del_tags <= cur_tags:
+      diff_tags = del_tags - cur_tags
+      diff_names = ["'%s'" % tag for tag in diff_tags]
+      diff_names.sort()
+      raise errors.OpPrereqError("Tag(s) %s not found" %
+                                 (",".join(diff_names)))
 
   def Exec(self, feedback_fn):
     """Remove the tag from the object.
 
     """
-    self.target.RemoveTag(self.op.tag)
+    for tag in self.op.tags:
+      self.target.RemoveTag(tag)
     try:
       self.cfg.Update(self.target)
     except errors.ConfigurationError:
       raise errors.OpRetryError("There has been a modification to the"
                                 " config file and the operation has been"
                                 " aborted. Please retry.")
+
+class LUTestDelay(NoHooksLU):
+  """Sleep for a specified amount of time.
+
+  This LU sleeps on the master and/or nodes for a specified amoutn of
+  time.
+
+  """
+  _OP_REQP = ["duration", "on_master", "on_nodes"]
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that we have a good list of nodes and/or the duration
+    is valid.
+
+    """
+
+    if self.op.on_nodes:
+      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+
+  def Exec(self, feedback_fn):
+    """Do the actual sleep.
+
+    """
+    if self.op.on_master:
+      if not utils.TestDelay(self.op.duration):
+        raise errors.OpExecError("Error during master delay test")
+    if self.op.on_nodes:
+      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
+      if not result:
+        raise errors.OpExecError("Complete failure from rpc call")
+      for node, node_result in result.items():
+        if not node_result:
+          raise errors.OpExecError("Failure during rpc call to node %s,"
+                                   " result: %s" % (node, node_result))
+
+
+class IAllocator(object):
+  """IAllocator framework.
+
+  An IAllocator instance has three sets of attributes:
+    - cfg/sstore that are needed to query the cluster
+    - input data (all members of the _KEYS class attribute are required)
+    - four buffer attributes (in|out_data|text), that represent the
+      input (to the external script) in text and data structure format,
+      and the output from it, again in two formats
+    - the result variables from the script (success, info, nodes) for
+      easy usage
+
+  """
+  _KEYS = [
+    "mode", "name",
+    "mem_size", "disks", "disk_template",
+    "os", "tags", "nics", "vcpus",
+    ]
+
+  def __init__(self, cfg, sstore, **kwargs):
+    self.cfg = cfg
+    self.sstore = sstore
+    # init buffer variables
+    self.in_text = self.out_text = self.in_data = self.out_data = None
+    # init all input fields so that pylint is happy
+    self.mode = self.name = None
+    self.mem_size = self.disks = self.disk_template = None
+    self.os = self.tags = self.nics = self.vcpus = None
+    # computed fields
+    self.required_nodes = None
+    # init result fields
+    self.success = self.info = self.nodes = None
+    for key in kwargs:
+      if key not in self._KEYS:
+        raise errors.ProgrammerError("Invalid input parameter '%s' to"
+                                     " IAllocator" % key)
+      setattr(self, key, kwargs[key])
+    for key in self._KEYS:
+      if key not in kwargs:
+        raise errors.ProgrammerError("Missing input parameter '%s' to"
+                                     " IAllocator" % key)
+    self._BuildInputData()
+
+  def _ComputeClusterData(self):
+    """Compute the generic allocator input data.
+
+    This is the data that is independent of the actual operation.
+
+    """
+    cfg = self.cfg
+    # cluster data
+    data = {
+      "version": 1,
+      "cluster_name": self.sstore.GetClusterName(),
+      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
+      # we don't have job IDs
+      }
+
+    # node data
+    node_results = {}
+    node_list = cfg.GetNodeList()
+    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
+    for nname in node_list:
+      ninfo = cfg.GetNodeInfo(nname)
+      if nname not in node_data or not isinstance(node_data[nname], dict):
+        raise errors.OpExecError("Can't get data for node %s" % nname)
+      remote_info = node_data[nname]
+      for attr in ['memory_total', 'memory_free',
+                   'vg_size', 'vg_free']:
+        if attr not in remote_info:
+          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
+                                   (nname, attr))
+        try:
+          int(remote_info[attr])
+        except ValueError, err:
+          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
+                                   " %s" % (nname, attr, str(err)))
+      pnr = {
+        "tags": list(ninfo.GetTags()),
+        "total_memory": utils.TryConvert(int, remote_info['memory_total']),
+        "free_memory": utils.TryConvert(int, remote_info['memory_free']),
+        "total_disk": utils.TryConvert(int, remote_info['vg_size']),
+        "free_disk": utils.TryConvert(int, remote_info['vg_free']),
+        "primary_ip": ninfo.primary_ip,
+        "secondary_ip": ninfo.secondary_ip,
+        }
+      node_results[nname] = pnr
+    data["nodes"] = node_results
+
+    # instance data
+    instance_data = {}
+    i_list = cfg.GetInstanceList()
+    for iname in i_list:
+      iinfo = cfg.GetInstanceInfo(iname)
+      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
+                  for n in iinfo.nics]
+      pir = {
+        "tags": list(iinfo.GetTags()),
+        "should_run": iinfo.status == "up",
+        "vcpus": iinfo.vcpus,
+        "memory": iinfo.memory,
+        "os": iinfo.os,
+        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
+        "nics": nic_data,
+        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
+        "disk_template": iinfo.disk_template,
+        }
+      instance_data[iname] = pir
+
+    data["instances"] = instance_data
+
+    self.in_data = data
+
+  def _AddNewInstance(self):
+    """Add new instance data to allocator structure.
+
+    This in combination with _AllocatorGetClusterData will create the
+    correct structure needed as input for the allocator.
+
+    The checks for the completeness of the opcode must have already been
+    done.
+
+    """
+    data = self.in_data
+    if len(self.disks) != 2:
+      raise errors.OpExecError("Only two-disk configurations supported")
+
+    disk_space = _ComputeDiskSize(self.disk_template,
+                                  self.disks[0]["size"], self.disks[1]["size"])
+
+    if self.disk_template in constants.DTS_NET_MIRROR:
+      self.required_nodes = 2
+    else:
+      self.required_nodes = 1
+    request = {
+      "type": "allocate",
+      "name": self.name,
+      "disk_template": self.disk_template,
+      "tags": self.tags,
+      "os": self.os,
+      "vcpus": self.vcpus,
+      "memory": self.mem_size,
+      "disks": self.disks,
+      "disk_space_total": disk_space,
+      "nics": self.nics,
+      "required_nodes": self.required_nodes,
+      }
+    data["request"] = request
+
+  def _AddRelocateInstance(self):
+    """Add relocate instance data to allocator structure.
+
+    This in combination with _IAllocatorGetClusterData will create the
+    correct structure needed as input for the allocator.
+
+    The checks for the completeness of the opcode must have already been
+    done.
+
+    """
+    instance = self.cfg.GetInstanceInfo(self.name)
+    if instance is None:
+      raise errors.ProgrammerError("Unknown instance '%s' passed to"
+                                   " IAllocator" % self.name)
+
+    if instance.disk_template not in constants.DTS_NET_MIRROR:
+      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
+
+    if len(instance.secondary_nodes) != 1:
+      raise errors.OpPrereqError("Instance has not exactly one secondary node")
+
+    self.required_nodes = 1
+
+    disk_space = _ComputeDiskSize(instance.disk_template,
+                                  instance.disks[0].size,
+                                  instance.disks[1].size)
+
+    request = {
+      "type": "relocate",
+      "name": self.name,
+      "disk_space_total": disk_space,
+      "required_nodes": self.required_nodes,
+      "nodes": list(instance.secondary_nodes),
+      }
+    self.in_data["request"] = request
+
+  def _BuildInputData(self):
+    """Build input data structures.
+
+    """
+    self._ComputeClusterData()
+
+    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
+      self._AddNewInstance()
+    else:
+      self._AddRelocateInstance()
+
+    self.in_text = serializer.Dump(self.in_data)
+
+  def Run(self, name, validate=True):
+    """Run an instance allocator and return the results.
+
+    """
+    data = self.in_text
+
+    alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
+                                  os.path.isfile)
+    if alloc_script is None:
+      raise errors.OpExecError("Can't find allocator '%s'" % name)
+
+    fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
+    try:
+      os.write(fd, data)
+      os.close(fd)
+      result = utils.RunCmd([alloc_script, fin_name])
+      if result.failed:
+        raise errors.OpExecError("Instance allocator call failed: %s,"
+                                 " output: %s" %
+                                 (result.fail_reason, result.output))
+    finally:
+      os.unlink(fin_name)
+    self.out_text = result.stdout
+    if validate:
+      self._ValidateResult()
+
+  def _ValidateResult(self):
+    """Process the allocator results.
+
+    This will process and if successful save the result in
+    self.out_data and the other parameters.
+
+    """
+    try:
+      rdict = serializer.Load(self.out_text)
+    except Exception, err:
+      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
+
+    if not isinstance(rdict, dict):
+      raise errors.OpExecError("Can't parse iallocator results: not a dict")
+
+    for key in "success", "info", "nodes":
+      if key not in rdict:
+        raise errors.OpExecError("Can't parse iallocator results:"
+                                 " missing key '%s'" % key)
+      setattr(self, key, rdict[key])
+
+    if not isinstance(rdict["nodes"], list):
+      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
+                               " is not a list")
+    self.out_data = rdict
+
+
+class LUTestAllocator(NoHooksLU):
+  """Run allocator tests.
+
+  This LU runs the allocator tests
+
+  """
+  _OP_REQP = ["direction", "mode", "name"]
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks the opcode parameters depending on the director and mode test.
+
+    """
+    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
+      for attr in ["name", "mem_size", "disks", "disk_template",
+                   "os", "tags", "nics", "vcpus"]:
+        if not hasattr(self.op, attr):
+          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
+                                     attr)
+      iname = self.cfg.ExpandInstanceName(self.op.name)
+      if iname is not None:
+        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
+                                   iname)
+      if not isinstance(self.op.nics, list):
+        raise errors.OpPrereqError("Invalid parameter 'nics'")
+      for row in self.op.nics:
+        if (not isinstance(row, dict) or
+            "mac" not in row or
+            "ip" not in row or
+            "bridge" not in row):
+          raise errors.OpPrereqError("Invalid contents of the"
+                                     " 'nics' parameter")
+      if not isinstance(self.op.disks, list):
+        raise errors.OpPrereqError("Invalid parameter 'disks'")
+      if len(self.op.disks) != 2:
+        raise errors.OpPrereqError("Only two-disk configurations supported")
+      for row in self.op.disks:
+        if (not isinstance(row, dict) or
+            "size" not in row or
+            not isinstance(row["size"], int) or
+            "mode" not in row or
+            row["mode"] not in ['r', 'w']):
+          raise errors.OpPrereqError("Invalid contents of the"
+                                     " 'disks' parameter")
+    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
+      if not hasattr(self.op, "name"):
+        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
+      fname = self.cfg.ExpandInstanceName(self.op.name)
+      if fname is None:
+        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
+                                   self.op.name)
+      self.op.name = fname
+    else:
+      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
+                                 self.op.mode)
+
+    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
+      if not hasattr(self.op, "allocator") or self.op.allocator is None:
+        raise errors.OpPrereqError("Missing allocator name")
+    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
+      raise errors.OpPrereqError("Wrong allocator test '%s'" %
+                                 self.op.direction)
+
+  def Exec(self, feedback_fn):
+    """Run the allocator test.
+
+    """
+    ial = IAllocator(self.cfg, self.sstore,
+                     mode=self.op.mode,
+                     name=self.op.name,
+                     mem_size=self.op.mem_size,
+                     disks=self.op.disks,
+                     disk_template=self.op.disk_template,
+                     os=self.op.os,
+                     tags=self.op.tags,
+                     nics=self.op.nics,
+                     vcpus=self.op.vcpus,
+                     )
+
+    if self.op.direction == constants.IALLOCATOR_DIR_IN:
+      result = ial.in_text
+    else:
+      ial.Run(self.op.allocator, validate=False)
+      result = ial.out_text
+    return result