Modify two mirror-device related rpc calls
[ganeti-local] / lib / cmdlib.py
index b6d2233..04612d7 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#
 #
 
 # Copyright (C) 2006, 2007 Google Inc.
@@ -85,7 +85,7 @@ class LogicalUnit(object):
                                    " use 'gnt-cluster init' first.")
       if self.REQ_MASTER:
         master = sstore.GetMasterNode()
-        if master != socket.gethostname():
+        if master != utils.HostInfo().name:
           raise errors.OpPrereqError("Commands must be run on the master"
                                      " node %s" % master)
 
@@ -161,31 +161,55 @@ class NoHooksLU(LogicalUnit):
     This is a no-op, since we don't run hooks.
 
     """
-    return
+    return {}, [], []
 
 
 def _GetWantedNodes(lu, nodes):
-  """Returns list of checked and expanded nodes.
+  """Returns list of checked and expanded node names.
 
   Args:
     nodes: List of nodes (strings) or None for all
 
   """
-  if nodes is not None and not isinstance(nodes, list):
+  if not isinstance(nodes, list):
     raise errors.OpPrereqError("Invalid argument type 'nodes'")
 
   if nodes:
-    wanted_nodes = []
+    wanted = []
 
     for name in nodes:
-      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
+      node = lu.cfg.ExpandNodeName(name)
       if node is None:
         raise errors.OpPrereqError("No such node name '%s'" % name)
-    wanted_nodes.append(node)
+      wanted.append(node)
+
+  else:
+    wanted = lu.cfg.GetNodeList()
+  return utils.NiceSort(wanted)
+
+
+def _GetWantedInstances(lu, instances):
+  """Returns list of checked and expanded instance names.
+
+  Args:
+    instances: List of instances (strings) or None for all
+
+  """
+  if not isinstance(instances, list):
+    raise errors.OpPrereqError("Invalid argument type 'instances'")
+
+  if instances:
+    wanted = []
+
+    for name in instances:
+      instance = lu.cfg.ExpandInstanceName(name)
+      if instance is None:
+        raise errors.OpPrereqError("No such instance name '%s'" % name)
+      wanted.append(instance)
 
-    return wanted_nodes
   else:
-    return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
+    wanted = lu.cfg.GetInstanceList()
+  return utils.NiceSort(wanted)
 
 
 def _CheckOutputFields(static, dynamic, selected):
@@ -215,6 +239,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     secondary_nodes: List of secondary nodes as strings
   """
   env = {
+    "OP_TARGET": name,
     "INSTANCE_NAME": name,
     "INSTANCE_PRIMARY": primary_node,
     "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
@@ -350,10 +375,10 @@ def _UpdateKnownHosts(fullnode, ip, pubkey):
     pubkey   - the public key of the cluster
 
   """
-  if os.path.exists('/etc/ssh/ssh_known_hosts'):
-    f = open('/etc/ssh/ssh_known_hosts', 'r+')
+  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
+    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
   else:
-    f = open('/etc/ssh/ssh_known_hosts', 'w+')
+    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
 
   inthere = False
 
@@ -405,12 +430,15 @@ def _UpdateKnownHosts(fullnode, ip, pubkey):
     save_lines = save_lines + add_lines
 
     # Write a new file and replace old.
-    fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh')
+    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
+                                   constants.DATA_DIR)
     newfile = os.fdopen(fd, 'w')
-    newfile.write(''.join(save_lines))
-    newfile.close()
+    try:
+      newfile.write(''.join(save_lines))
+    finally:
+      newfile.close()
     logger.Debug("Wrote new known_hosts.")
-    os.rename(tmpname, '/etc/ssh/ssh_known_hosts')
+    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
 
   elif add_lines:
     # Simply appending a new line will do the trick.
@@ -448,26 +476,23 @@ def _InitSSHSetup(node):
     node: the name of this host as a fqdn
 
   """
-  utils.RemoveFile('/root/.ssh/known_hosts')
+  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
 
-  if os.path.exists('/root/.ssh/id_dsa'):
-    utils.CreateBackup('/root/.ssh/id_dsa')
-  if os.path.exists('/root/.ssh/id_dsa.pub'):
-    utils.CreateBackup('/root/.ssh/id_dsa.pub')
-
-  utils.RemoveFile('/root/.ssh/id_dsa')
-  utils.RemoveFile('/root/.ssh/id_dsa.pub')
+  for name in priv_key, pub_key:
+    if os.path.exists(name):
+      utils.CreateBackup(name)
+    utils.RemoveFile(name)
 
   result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
-                         "-f", "/root/.ssh/id_dsa",
+                         "-f", priv_key,
                          "-q", "-N", ""])
   if result.failed:
     raise errors.OpExecError("Could not generate ssh keypair, error %s" %
                              result.output)
 
-  f = open('/root/.ssh/id_dsa.pub', 'r')
+  f = open(pub_key, 'r')
   try:
-    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
+    utils.AddAuthorizedKey(auth_keys, f.read(8192))
   finally:
     f.close()
 
@@ -503,6 +528,18 @@ def _InitGanetiServerSetup(ss):
                              (result.cmd, result.exit_code, result.output))
 
 
+def _CheckInstanceBridgesExist(instance):
+  """Check that the brigdes needed by an instance exist.
+
+  """
+  # check bridges existance
+  brlist = [nic.bridge for nic in instance.nics]
+  if not rpc.call_bridges_exist(instance.primary_node, brlist):
+    raise errors.OpPrereqError("one or more target bridges %s does not"
+                               " exist on destination node '%s'" %
+                               (brlist, instance.primary_node))
+
+
 class LUInitCluster(LogicalUnit):
   """Initialise the cluster.
 
@@ -520,11 +557,8 @@ class LUInitCluster(LogicalUnit):
     ourselves in the post-run node list.
 
     """
-    env = {
-      "CLUSTER": self.op.cluster_name,
-      "MASTER": self.hostname['hostname_full'],
-      }
-    return env, [], [self.hostname['hostname_full']]
+    env = {"OP_TARGET": self.op.cluster_name}
+    return env, [], [self.hostname.name]
 
   def CheckPrereq(self):
     """Verify that the passed name is a valid one.
@@ -533,33 +567,32 @@ class LUInitCluster(LogicalUnit):
     if config.ConfigWriter.IsCluster():
       raise errors.OpPrereqError("Cluster is already initialised")
 
-    hostname_local = socket.gethostname()
-    self.hostname = hostname = utils.LookupHostname(hostname_local)
-    if not hostname:
-      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
-                                 hostname_local)
+    self.hostname = hostname = utils.HostInfo()
 
-    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
-    if not clustername:
-      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
-                                 % self.op.cluster_name)
+    if hostname.ip.startswith("127."):
+      raise errors.OpPrereqError("This host's IP resolves to the private"
+                                 " range (%s). Please fix DNS or /etc/hosts." %
+                                 (hostname.ip,))
 
-    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
-    if result.failed:
+    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
+
+    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
+                         constants.DEFAULT_NODED_PORT):
       raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                                  " to %s,\nbut this ip address does not"
                                  " belong to this host."
-                                 " Aborting." % hostname['ip'])
+                                 " Aborting." % hostname.ip)
 
     secondary_ip = getattr(self.op, "secondary_ip", None)
     if secondary_ip and not utils.IsValidIP(secondary_ip):
       raise errors.OpPrereqError("Invalid secondary ip given")
-    if secondary_ip and secondary_ip != hostname['ip']:
-      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
-      if result.failed:
-        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
-                                   "but it does not belong to this host." %
-                                   secondary_ip)
+    if (secondary_ip and
+        secondary_ip != hostname.ip and
+        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
+                           constants.DEFAULT_NODED_PORT))):
+      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
+                                 "but it does not belong to this host." %
+                                 secondary_ip)
     self.secondary_ip = secondary_ip
 
     # checks presence of the volume group given
@@ -591,41 +624,36 @@ class LUInitCluster(LogicalUnit):
     hostname = self.hostname
 
     # set up the simple store
-    ss = ssconf.SimpleStore()
+    self.sstore = ss = ssconf.SimpleStore()
     ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
-    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
-    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
+    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
+    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
     ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
-    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
+    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
 
     # set up the inter-node password and certificate
     _InitGanetiServerSetup(ss)
 
     # start the master ip
-    rpc.call_node_start_master(hostname['hostname_full'])
+    rpc.call_node_start_master(hostname.name)
 
     # set up ssh config and /etc/hosts
-    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
+    f = open(constants.SSH_HOST_RSA_PUB, 'r')
     try:
       sshline = f.read()
     finally:
       f.close()
     sshkey = sshline.split(" ")[1]
 
-    _UpdateEtcHosts(hostname['hostname_full'],
-                    hostname['ip'],
-                    )
+    _UpdateEtcHosts(hostname.name, hostname.ip)
 
-    _UpdateKnownHosts(hostname['hostname_full'],
-                      hostname['ip'],
-                      sshkey,
-                      )
+    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
 
-    _InitSSHSetup(hostname['hostname'])
+    _InitSSHSetup(hostname.name)
 
     # init of cluster config file
-    cfgw = config.ConfigWriter()
-    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
+    self.cfg = cfgw = config.ConfigWriter()
+    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
                     sshkey, self.op.mac_prefix,
                     self.op.vg_name, self.op.def_bridge)
 
@@ -659,8 +687,9 @@ class LUDestroyCluster(NoHooksLU):
     """Destroys the cluster.
 
     """
-    utils.CreateBackup('/root/.ssh/id_dsa')
-    utils.CreateBackup('/root/.ssh/id_dsa.pub')
+    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
+    utils.CreateBackup(priv_key)
+    utils.CreateBackup(pub_key)
     rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
 
 
@@ -781,7 +810,7 @@ class LUVerifyCluster(NoHooksLU):
                           (instance, node))
           bad = True
 
-    return not bad
+    return bad
 
   def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
     """Verify if there are any unknown volumes in the cluster.
@@ -908,6 +937,85 @@ class LUVerifyCluster(NoHooksLU):
     return int(bad)
 
 
+class LURenameCluster(LogicalUnit):
+  """Rename the cluster.
+
+  """
+  HPATH = "cluster-rename"
+  HTYPE = constants.HTYPE_CLUSTER
+  _OP_REQP = ["name"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    env = {
+      "OP_TARGET": self.op.sstore.GetClusterName(),
+      "NEW_NAME": self.op.name,
+      }
+    mn = self.sstore.GetMasterNode()
+    return env, [mn], [mn]
+
+  def CheckPrereq(self):
+    """Verify that the passed name is a valid one.
+
+    """
+    hostname = utils.HostInfo(self.op.name)
+
+    new_name = hostname.name
+    self.ip = new_ip = hostname.ip
+    old_name = self.sstore.GetClusterName()
+    old_ip = self.sstore.GetMasterIP()
+    if new_name == old_name and new_ip == old_ip:
+      raise errors.OpPrereqError("Neither the name nor the IP address of the"
+                                 " cluster has changed")
+    if new_ip != old_ip:
+      result = utils.RunCmd(["fping", "-q", new_ip])
+      if not result.failed:
+        raise errors.OpPrereqError("The given cluster IP address (%s) is"
+                                   " reachable on the network. Aborting." %
+                                   new_ip)
+
+    self.op.name = new_name
+
+  def Exec(self, feedback_fn):
+    """Rename the cluster.
+
+    """
+    clustername = self.op.name
+    ip = self.ip
+    ss = self.sstore
+
+    # shutdown the master IP
+    master = ss.GetMasterNode()
+    if not rpc.call_node_stop_master(master):
+      raise errors.OpExecError("Could not disable the master role")
+
+    try:
+      # modify the sstore
+      ss.SetKey(ss.SS_MASTER_IP, ip)
+      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
+
+      # Distribute updated ss config to all nodes
+      myself = self.cfg.GetNodeInfo(master)
+      dist_nodes = self.cfg.GetNodeList()
+      if myself.name in dist_nodes:
+        dist_nodes.remove(myself.name)
+
+      logger.Debug("Copying updated ssconf data to all nodes")
+      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
+        fname = ss.KeyToFilename(keyname)
+        result = rpc.call_upload_file(dist_nodes, fname)
+        for to_node in dist_nodes:
+          if not result[to_node]:
+            logger.Error("copy of file %s to node %s failed" %
+                         (fname, to_node))
+    finally:
+      if not rpc.call_node_start_master(master):
+        logger.Error("Could not re-enable the master role on the master,\n"
+                     "please restart manually.")
+
+
 def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
   """Sleep and poll for an instance's disk to sync.
 
@@ -1033,6 +1141,7 @@ class LURemoveNode(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.op.node_name,
       "NODE_NAME": self.op.node_name,
       }
     all_nodes = self.cfg.GetNodeList()
@@ -1093,7 +1202,7 @@ class LUQueryNodes(NoHooksLU):
   """Logical unit for querying nodes.
 
   """
-  _OP_REQP = ["output_fields"]
+  _OP_REQP = ["output_fields", "names"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1102,21 +1211,24 @@ class LUQueryNodes(NoHooksLU):
 
     """
     self.dynamic_fields = frozenset(["dtotal", "dfree",
-                                     "mtotal", "mnode", "mfree"])
+                                     "mtotal", "mnode", "mfree",
+                                     "bootid"])
 
-    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
+    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
+                               "pinst_list", "sinst_list",
+                               "pip", "sip"],
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
+    self.wanted = _GetWantedNodes(self, self.op.names)
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = utils.NiceSort(self.cfg.GetNodeList())
+    nodenames = self.wanted
     nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
 
-
     # begin data gathering
 
     if self.dynamic_fields.intersection(self.op.output_fields):
@@ -1131,23 +1243,28 @@ class LUQueryNodes(NoHooksLU):
             "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
             "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
             "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
+            "bootid": nodeinfo['bootid'],
             }
         else:
           live_data[name] = {}
     else:
       live_data = dict.fromkeys(nodenames, {})
 
-    node_to_primary = dict.fromkeys(nodenames, 0)
-    node_to_secondary = dict.fromkeys(nodenames, 0)
+    node_to_primary = dict([(name, set()) for name in nodenames])
+    node_to_secondary = dict([(name, set()) for name in nodenames])
 
-    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
+    inst_fields = frozenset(("pinst_cnt", "pinst_list",
+                             "sinst_cnt", "sinst_list"))
+    if inst_fields & frozenset(self.op.output_fields):
       instancelist = self.cfg.GetInstanceList()
 
-      for instance in instancelist:
-        instanceinfo = self.cfg.GetInstanceInfo(instance)
-        node_to_primary[instanceinfo.primary_node] += 1
-        for secnode in instanceinfo.secondary_nodes:
-          node_to_secondary[secnode] += 1
+      for instance_name in instancelist:
+        inst = self.cfg.GetInstanceInfo(instance_name)
+        if inst.primary_node in node_to_primary:
+          node_to_primary[inst.primary_node].add(inst.name)
+        for secnode in inst.secondary_nodes:
+          if secnode in node_to_secondary:
+            node_to_secondary[secnode].add(inst.name)
 
     # end data gathering
 
@@ -1157,19 +1274,22 @@ class LUQueryNodes(NoHooksLU):
       for field in self.op.output_fields:
         if field == "name":
           val = node.name
-        elif field == "pinst":
-          val = node_to_primary[node.name]
-        elif field == "sinst":
-          val = node_to_secondary[node.name]
+        elif field == "pinst_list":
+          val = list(node_to_primary[node.name])
+        elif field == "sinst_list":
+          val = list(node_to_secondary[node.name])
+        elif field == "pinst_cnt":
+          val = len(node_to_primary[node.name])
+        elif field == "sinst_cnt":
+          val = len(node_to_secondary[node.name])
         elif field == "pip":
           val = node.primary_ip
         elif field == "sip":
           val = node.secondary_ip
         elif field in self.dynamic_fields:
-          val = live_data[node.name].get(field, "?")
+          val = live_data[node.name].get(field, None)
         else:
           raise errors.ParameterError(field)
-        val = str(val)
         node_output.append(val)
       output.append(node_output)
 
@@ -1199,7 +1319,7 @@ class LUQueryNodeVolumes(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = utils.NiceSort([node.name for node in self.nodes])
+    nodenames = self.nodes
     volumes = rpc.call_node_volumes(nodenames)
 
     ilist = [self.cfg.GetInstanceInfo(iname) for iname
@@ -1261,6 +1381,7 @@ class LUAddNode(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.op.node_name,
       "NODE_NAME": self.op.node_name,
       "NODE_PIP": self.op.primary_ip,
       "NODE_SIP": self.op.secondary_ip,
@@ -1283,12 +1404,10 @@ class LUAddNode(LogicalUnit):
     node_name = self.op.node_name
     cfg = self.cfg
 
-    dns_data = utils.LookupHostname(node_name)
-    if not dns_data:
-      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
+    dns_data = utils.HostInfo(node_name)
 
-    node = dns_data['hostname']
-    primary_ip = self.op.primary_ip = dns_data['ip']
+    node = dns_data.name
+    primary_ip = self.op.primary_ip = dns_data.ip
     secondary_ip = getattr(self.op, "secondary_ip", None)
     if secondary_ip is None:
       secondary_ip = primary_ip
@@ -1323,17 +1442,18 @@ class LUAddNode(LogicalUnit):
                                    " new node doesn't have one")
 
     # checks reachablity
-    command = ["fping", "-q", primary_ip]
-    result = utils.RunCmd(command)
-    if result.failed:
+    if not utils.TcpPing(utils.HostInfo().name,
+                         primary_ip,
+                         constants.DEFAULT_NODED_PORT):
       raise errors.OpPrereqError("Node not reachable by ping")
 
     if not newbie_singlehomed:
       # check reachability from my secondary ip to newbie's secondary ip
-      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
-      result = utils.RunCmd(command)
-      if result.failed:
-        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
+      if not utils.TcpPing(myself.secondary_ip,
+                           secondary_ip,
+                           constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError(
+          "Node secondary ip not reachable by TCP based ping to noded port")
 
     self.new_node = objects.Node(name=node,
                                  primary_ip=primary_ip,
@@ -1365,8 +1485,6 @@ class LUAddNode(LogicalUnit):
       raise errors.OpExecError("PEM must end with newline")
     logger.Info("copy cluster pass to %s and starting the node daemon" % node)
 
-    # remove first the root's known_hosts file
-    utils.RemoveFile("/root/.ssh/known_hosts")
     # and then connect with ssh to set password and start ganeti-noded
     # note that all the below variables are sanitized at this point,
     # either by being constants or by the checks above
@@ -1402,10 +1520,11 @@ class LUAddNode(LogicalUnit):
 
     # setup ssh on node
     logger.Info("copy ssh key to node %s" % node)
+    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
     keyarray = []
-    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
-                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
-                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
+    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
+                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
+                priv_key, pub_key]
 
     for i in keyfiles:
       f = open(i, 'r')
@@ -1426,14 +1545,23 @@ class LUAddNode(LogicalUnit):
                       self.cfg.GetHostKey())
 
     if new_node.secondary_ip != new_node.primary_ip:
-      result = ssh.SSHCall(node, "root",
-                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
-      if result.failed:
+      if not rpc.call_node_tcp_ping(new_node.name,
+                                    constants.LOCALHOST_IP_ADDRESS,
+                                    new_node.secondary_ip,
+                                    constants.DEFAULT_NODED_PORT,
+                                    10, False):
         raise errors.OpExecError("Node claims it doesn't have the"
                                  " secondary ip you gave (%s).\n"
                                  "Please fix and re-run this command." %
                                  new_node.secondary_ip)
 
+    success, msg = ssh.VerifyNodeHostname(node)
+    if not success:
+      raise errors.OpExecError("Node '%s' claims it has a different hostname"
+                               " than the one the resolver gives: %s.\n"
+                               "Please fix and re-run this command." %
+                               (node, msg))
+
     # Distribute updated /etc/hosts and known_hosts to all nodes,
     # including the node just added
     myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
@@ -1442,7 +1570,7 @@ class LUAddNode(LogicalUnit):
       dist_nodes.remove(myself.name)
 
     logger.Debug("Copying hosts and known_hosts to all nodes")
-    for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"):
+    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
       result = rpc.call_upload_file(dist_nodes, fname)
       for to_node in dist_nodes:
         if not result[to_node]:
@@ -1477,6 +1605,7 @@ class LUMasterFailover(LogicalUnit):
 
     """
     env = {
+      "OP_TARGET": self.new_master,
       "NEW_MASTER": self.new_master,
       "OLD_MASTER": self.old_master,
       }
@@ -1488,8 +1617,7 @@ class LUMasterFailover(LogicalUnit):
     This checks that we are not already the master.
 
     """
-    self.new_master = socket.gethostname()
-
+    self.new_master = utils.HostInfo().name
     self.old_master = self.sstore.GetMasterNode()
 
     if self.old_master == self.new_master:
@@ -1590,7 +1718,7 @@ class LUClusterCopyFile(NoHooksLU):
     """
     filename = self.op.filename
 
-    myname = socket.gethostname()
+    myname = utils.HostInfo().name
 
     for node in self.nodes:
       if node == myname:
@@ -1638,8 +1766,8 @@ class LURunClusterCommand(NoHooksLU):
     """
     data = []
     for node in self.nodes:
-      result = utils.RunCmd(["ssh", node.name, self.op.command])
-      data.append((node.name, result.cmd, result.output, result.exit_code))
+      result = ssh.SSHCall(node, "root", self.op.command)
+      data.append((node, result.output, result.exit_code))
 
     return data
 
@@ -1708,6 +1836,12 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
     device_info.append((instance.primary_node, inst_disk.iv_name,
                         master_result))
 
+  # leave the disks configured for the primary node
+  # this is a workaround that would be fixed better by
+  # improving the logical/physical id handling
+  for disk in instance.disks:
+    cfg.SetDiskID(disk, instance.primary_node)
+
   return disks_ok, device_info
 
 
@@ -1818,11 +1952,7 @@ class LUStartupInstance(LogicalUnit):
                                  self.op.instance_name)
 
     # check bridges existance
-    brlist = [nic.bridge for nic in instance.nics]
-    if not rpc.call_bridges_exist(instance.primary_node, brlist):
-      raise errors.OpPrereqError("one or more target bridges %s does not"
-                                 " exist on destination node '%s'" %
-                                 (brlist, instance.primary_node))
+    _CheckInstanceBridgesExist(instance)
 
     self.instance = instance
     self.op.instance_name = instance.name
@@ -1860,6 +1990,82 @@ class LUStartupInstance(LogicalUnit):
     self.cfg.MarkInstanceUp(instance.name)
 
 
+class LURebootInstance(LogicalUnit):
+  """Reboot an instance.
+
+  """
+  HPATH = "instance-reboot"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+      }
+    env.update(_BuildInstanceHookEnvByObject(self.instance))
+    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+          list(self.instance.secondary_nodes))
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    instance = self.cfg.GetInstanceInfo(
+      self.cfg.ExpandInstanceName(self.op.instance_name))
+    if instance is None:
+      raise errors.OpPrereqError("Instance '%s' not known" %
+                                 self.op.instance_name)
+
+    # check bridges existance
+    _CheckInstanceBridgesExist(instance)
+
+    self.instance = instance
+    self.op.instance_name = instance.name
+
+  def Exec(self, feedback_fn):
+    """Reboot the instance.
+
+    """
+    instance = self.instance
+    ignore_secondaries = self.op.ignore_secondaries
+    reboot_type = self.op.reboot_type
+    extra_args = getattr(self.op, "extra_args", "")
+
+    node_current = instance.primary_node
+
+    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
+                           constants.INSTANCE_REBOOT_HARD,
+                           constants.INSTANCE_REBOOT_FULL]:
+      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
+                                  (constants.INSTANCE_REBOOT_SOFT,
+                                   constants.INSTANCE_REBOOT_HARD,
+                                   constants.INSTANCE_REBOOT_FULL))
+
+    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+                       constants.INSTANCE_REBOOT_HARD]:
+      if not rpc.call_instance_reboot(node_current, instance,
+                                      reboot_type, extra_args):
+        raise errors.OpExecError("Could not reboot instance")
+    else:
+      if not rpc.call_instance_shutdown(node_current, instance):
+        raise errors.OpExecError("could not shutdown instance for full reboot")
+      _ShutdownInstanceDisks(instance, self.cfg)
+      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
+      if not rpc.call_instance_start(node_current, instance, extra_args):
+        _ShutdownInstanceDisks(instance, self.cfg)
+        raise errors.OpExecError("Could not start instance for full reboot")
+
+    self.cfg.MarkInstanceUp(instance.name)
+
+
 class LUShutdownInstance(LogicalUnit):
   """Shutdown an instance.
 
@@ -1984,6 +2190,84 @@ class LUReinstallInstance(LogicalUnit):
       _ShutdownInstanceDisks(inst, self.cfg)
 
 
+class LURenameInstance(LogicalUnit):
+  """Rename an instance.
+
+  """
+  HPATH = "instance-rename"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "new_name"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = _BuildInstanceHookEnvByObject(self.instance)
+    env["INSTANCE_NEW_NAME"] = self.op.new_name
+    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+          list(self.instance.secondary_nodes))
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster and is not running.
+
+    """
+    instance = self.cfg.GetInstanceInfo(
+      self.cfg.ExpandInstanceName(self.op.instance_name))
+    if instance is None:
+      raise errors.OpPrereqError("Instance '%s' not known" %
+                                 self.op.instance_name)
+    if instance.status != "down":
+      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
+                                 self.op.instance_name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    if remote_info:
+      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
+                                 (self.op.instance_name,
+                                  instance.primary_node))
+    self.instance = instance
+
+    # new name verification
+    name_info = utils.HostInfo(self.op.new_name)
+
+    self.op.new_name = new_name = name_info.name
+    if not getattr(self.op, "ignore_ip", False):
+      command = ["fping", "-q", name_info.ip]
+      result = utils.RunCmd(command)
+      if not result.failed:
+        raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                   (name_info.ip, new_name))
+
+
+  def Exec(self, feedback_fn):
+    """Reinstall the instance.
+
+    """
+    inst = self.instance
+    old_name = inst.name
+
+    self.cfg.RenameInstance(inst.name, self.op.new_name)
+
+    # re-read the instance from the configuration after rename
+    inst = self.cfg.GetInstanceInfo(self.op.new_name)
+
+    _StartInstanceDisks(self.cfg, inst, None)
+    try:
+      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
+                                          "sda", "sdb"):
+        msg = ("Could run OS rename script for instance %s\n"
+               "on node %s\n"
+               "(but the instance has been renamed in Ganeti)" %
+               (inst.name, inst.primary_node))
+        logger.Error(msg)
+    finally:
+      _ShutdownInstanceDisks(inst, self.cfg)
+
+
 class LURemoveInstance(LogicalUnit):
   """Remove an instance.
 
@@ -1999,8 +2283,7 @@ class LURemoveInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
-          list(self.instance.secondary_nodes))
+    nl = [self.sstore.GetMasterNode()]
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -2025,12 +2308,19 @@ class LURemoveInstance(LogicalUnit):
                 (instance.name, instance.primary_node))
 
     if not rpc.call_instance_shutdown(instance.primary_node, instance):
-      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                               (instance.name, instance.primary_node))
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't shutdown instance")
+      else:
+        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+                                 (instance.name, instance.primary_node))
 
     logger.Info("removing block devices for instance %s" % instance.name)
 
-    _RemoveDisks(instance, self.cfg)
+    if not _RemoveDisks(instance, self.cfg):
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't remove instance's disks")
+      else:
+        raise errors.OpExecError("Can't remove instance's disks")
 
     logger.Info("removing instance %s out of cluster config" % instance.name)
 
@@ -2041,7 +2331,7 @@ class LUQueryInstances(NoHooksLU):
   """Logical unit for querying instances.
 
   """
-  _OP_REQP = ["output_fields"]
+  _OP_REQP = ["output_fields", "names"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -2052,15 +2342,18 @@ class LUQueryInstances(NoHooksLU):
     self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
     _CheckOutputFields(static=["name", "os", "pnode", "snodes",
                                "admin_state", "admin_ram",
-                               "disk_template", "ip", "mac", "bridge"],
+                               "disk_template", "ip", "mac", "bridge",
+                               "sda_size", "sdb_size"],
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
+    self.wanted = _GetWantedInstances(self, self.op.names)
+
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
 
     """
-    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
+    instance_names = self.wanted
     instance_list = [self.cfg.GetInstanceInfo(iname) for iname
                      in instance_names]
 
@@ -2095,25 +2388,19 @@ class LUQueryInstances(NoHooksLU):
         elif field == "pnode":
           val = instance.primary_node
         elif field == "snodes":
-          val = ",".join(instance.secondary_nodes) or "-"
+          val = list(instance.secondary_nodes)
         elif field == "admin_state":
-          if instance.status == "down":
-            val = "no"
-          else:
-            val = "yes"
+          val = (instance.status != "down")
         elif field == "oper_state":
           if instance.primary_node in bad_nodes:
-            val = "(node down)"
+            val = None
           else:
-            if live_data.get(instance.name):
-              val = "running"
-            else:
-              val = "stopped"
+            val = bool(live_data.get(instance.name))
         elif field == "admin_ram":
           val = instance.memory
         elif field == "oper_ram":
           if instance.primary_node in bad_nodes:
-            val = "(node down)"
+            val = None
           elif instance.name in live_data:
             val = live_data[instance.name].get("memory", "?")
           else:
@@ -2126,9 +2413,14 @@ class LUQueryInstances(NoHooksLU):
           val = instance.nics[0].bridge
         elif field == "mac":
           val = instance.nics[0].mac
+        elif field == "sda_size" or field == "sdb_size":
+          disk = instance.FindDisk(field[:3])
+          if disk is None:
+            val = None
+          else:
+            val = disk.size
         else:
           raise errors.ParameterError(field)
-        val = str(val)
         iout.append(val)
       output.append(iout)
 
@@ -2168,8 +2460,17 @@ class LUFailoverInstance(LogicalUnit):
       raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
 
+    if instance.disk_template not in constants.DTS_NET_MIRROR:
+      raise errors.OpPrereqError("Instance's disk layout is not"
+                                 " network mirrored, cannot failover.")
+
+    secondary_nodes = instance.secondary_nodes
+    if not secondary_nodes:
+      raise errors.ProgrammerError("no secondary node but using "
+                                   "DT_REMOTE_RAID1 template")
+
     # check memory requirements on the secondary node
-    target_node = instance.secondary_nodes[0]
+    target_node = secondary_nodes[0]
     nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
     info = nodeinfo.get(target_node, None)
     if not info:
@@ -2325,16 +2626,32 @@ def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
   """
   port = cfg.AllocatePort()
   vgname = cfg.GetVGName()
-  dev_data = objects.Disk(dev_type="lvm", size=size,
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgname, names[0]))
-  dev_meta = objects.Disk(dev_type="lvm", size=128,
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
                           logical_id=(vgname, names[1]))
-  drbd_dev = objects.Disk(dev_type="drbd", size=size,
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
                           logical_id = (primary, secondary, port),
                           children = [dev_data, dev_meta])
   return drbd_dev
 
 
+def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
+  """Generate a drbd8 device complete with its children.
+
+  """
+  port = cfg.AllocatePort()
+  vgname = cfg.GetVGName()
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+                          logical_id=(vgname, names[0]))
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+                          logical_id=(vgname, names[1]))
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+                          logical_id = (primary, secondary, port),
+                          children = [dev_data, dev_meta],
+                          iv_name=iv_name)
+  return drbd_dev
+
 def _GenerateDiskTemplate(cfg, template_name,
                           instance_name, primary_node,
                           secondary_nodes, disk_sz, swap_sz):
@@ -2351,10 +2668,10 @@ def _GenerateDiskTemplate(cfg, template_name,
       raise errors.ProgrammerError("Wrong template configuration")
 
     names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
-    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                            logical_id=(vgname, names[0]),
                            iv_name = "sda")
-    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                            logical_id=(vgname, names[1]),
                            iv_name = "sdb")
     disks = [sda_dev, sdb_dev]
@@ -2365,22 +2682,22 @@ def _GenerateDiskTemplate(cfg, template_name,
 
     names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
                                        ".sdb_m1", ".sdb_m2"])
-    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                               logical_id=(vgname, names[0]))
-    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                               logical_id=(vgname, names[1]))
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
+    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
                               size=disk_sz,
                               children = [sda_dev_m1, sda_dev_m2])
-    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                               logical_id=(vgname, names[2]))
-    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                               logical_id=(vgname, names[3]))
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
+    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
                               size=swap_sz,
                               children = [sdb_dev_m1, sdb_dev_m2])
     disks = [md_sda_dev, md_sdb_dev]
-  elif template_name == "remote_raid1":
+  elif template_name == constants.DT_REMOTE_RAID1:
     if len(secondary_nodes) != 1:
       raise errors.ProgrammerError("Wrong template configuration")
     remote_node = secondary_nodes[0]
@@ -2388,13 +2705,24 @@ def _GenerateDiskTemplate(cfg, template_name,
                                        ".sdb_data", ".sdb_meta"])
     drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
                                          disk_sz, names[0:2])
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
+    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
                               children = [drbd_sda_dev], size=disk_sz)
     drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
                                          swap_sz, names[2:4])
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
+    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
                               children = [drbd_sdb_dev], size=swap_sz)
     disks = [md_sda_dev, md_sdb_dev]
+  elif template_name == constants.DT_DRBD8:
+    if len(secondary_nodes) != 1:
+      raise errors.ProgrammerError("Wrong template configuration")
+    remote_node = secondary_nodes[0]
+    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
+                                       ".sdb_data", ".sdb_meta"])
+    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         disk_sz, names[0:2], "sda")
+    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         swap_sz, names[2:4], "sdb")
+    disks = [drbd_sda_dev, drbd_sdb_dev]
   else:
     raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
@@ -2444,7 +2772,7 @@ def _RemoveDisks(instance, cfg):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be remove, the removal will continue with the other ones (compare
+  be removed, the removal will continue with the other ones (compare
   with `_CreateDisks()`).
 
   Args:
@@ -2476,7 +2804,7 @@ class LUCreateInstance(LogicalUnit):
   HTYPE = constants.HTYPE_INSTANCE
   _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
               "disk_template", "swap_size", "mode", "start", "vcpus",
-              "wait_for_sync"]
+              "wait_for_sync", "ip_check"]
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -2571,9 +2899,9 @@ class LUCreateInstance(LogicalUnit):
     if self.op.disk_template not in constants.DISK_TEMPLATES:
       raise errors.OpPrereqError("Invalid disk template name")
 
-    if self.op.disk_template == constants.DT_REMOTE_RAID1:
+    if self.op.disk_template in constants.DTS_NET_MIRROR:
       if getattr(self.op, "snode", None) is None:
-        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
+        raise errors.OpPrereqError("The networked disk templates need"
                                    " a mirror node")
 
       snode_name = self.cfg.ExpandNodeName(self.op.snode)
@@ -2596,6 +2924,7 @@ class LUCreateInstance(LogicalUnit):
       constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
       # 256 MB are added for drbd metadata, 128MB for each drbd device
       constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
+      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
     }
 
     if self.op.disk_template not in req_size_dict:
@@ -2621,12 +2950,9 @@ class LUCreateInstance(LogicalUnit):
                                  " primary node"  % self.op.os_type)
 
     # instance verification
-    hostname1 = utils.LookupHostname(self.op.instance_name)
-    if not hostname1:
-      raise errors.OpPrereqError("Instance name '%s' not found in dns" %
-                                 self.op.instance_name)
+    hostname1 = utils.HostInfo(self.op.instance_name)
 
-    self.op.instance_name = instance_name = hostname1['hostname']
+    self.op.instance_name = instance_name = hostname1.name
     instance_list = self.cfg.GetInstanceList()
     if instance_name in instance_list:
       raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
@@ -2636,7 +2962,7 @@ class LUCreateInstance(LogicalUnit):
     if ip is None or ip.lower() == "none":
       inst_ip = None
     elif ip.lower() == "auto":
-      inst_ip = hostname1['ip']
+      inst_ip = hostname1.ip
     else:
       if not utils.IsValidIP(ip):
         raise errors.OpPrereqError("given IP address '%s' doesn't look"
@@ -2644,11 +2970,15 @@ class LUCreateInstance(LogicalUnit):
       inst_ip = ip
     self.inst_ip = inst_ip
 
-    command = ["fping", "-q", hostname1['ip']]
-    result = utils.RunCmd(command)
-    if not result.failed:
-      raise errors.OpPrereqError("IP %s of instance %s already in use" %
-                                 (hostname1['ip'], instance_name))
+    if self.op.start and not self.op.ip_check:
+      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
+                                 " adding an instance in start mode")
+
+    if self.op.ip_check:
+      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
+                       constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                   (hostname1.ip, instance_name))
 
     # bridge verification
     bridge = getattr(self.op, "bridge", None)
@@ -2704,7 +3034,7 @@ class LUCreateInstance(LogicalUnit):
 
     if self.op.wait_for_sync:
       disk_abort = not _WaitForSync(self.cfg, iobj)
-    elif iobj.disk_template == "remote_raid1":
+    elif iobj.disk_template in constants.DTS_NET_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
@@ -2791,7 +3121,13 @@ class LUConnectConsole(NoHooksLU):
 
     hyper = hypervisor.GetHypervisor()
     console_cmd = hyper.GetShellCommandForConsole(instance.name)
-    return node, console_cmd
+    # build ssh cmdline
+    argv = ["ssh", "-q", "-t"]
+    argv.extend(ssh.KNOWN_HOSTS_OPTS)
+    argv.extend(ssh.BATCH_MODE_OPTS)
+    argv.append(node)
+    argv.append(console_cmd)
+    return "ssh", argv
 
 
 class LUAddMDDRBDComponent(LogicalUnit):
@@ -2887,8 +3223,8 @@ class LUAddMDDRBDComponent(LogicalUnit):
     # the device exists now
     # call the primary node to add the mirror to md
     logger.Info("adding new mirror component to md")
-    if not rpc.call_blockdev_addchild(instance.primary_node,
-                                           disk, new_drbd):
+    if not rpc.call_blockdev_addchildren(instance.primary_node,
+                                         disk, [new_drbd]):
       logger.Error("Can't add mirror compoment to md!")
       self.cfg.SetDiskID(new_drbd, remote_node)
       if not rpc.call_blockdev_remove(remote_node, new_drbd):
@@ -2954,7 +3290,8 @@ class LURemoveMDDRBDComponent(LogicalUnit):
       raise errors.OpPrereqError("Can't find this device ('%s') in the"
                                  " instance." % self.op.disk_name)
     for child in disk.children:
-      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
+      if (child.dev_type == constants.LD_DRBD7 and
+          child.logical_id[2] == self.op.disk_id):
         break
     else:
       raise errors.OpPrereqError("Can't find the device with this port.")
@@ -2979,8 +3316,8 @@ class LURemoveMDDRBDComponent(LogicalUnit):
     child = self.child
     logger.Info("remove mirror component")
     self.cfg.SetDiskID(disk, instance.primary_node)
-    if not rpc.call_blockdev_removechild(instance.primary_node,
-                                              disk, child):
+    if not rpc.call_blockdev_removechildren(instance.primary_node,
+                                            disk, [child]):
       raise errors.OpExecError("Can't remove child from mirror.")
 
     for node in child.logical_id[:2]:
@@ -3060,7 +3397,6 @@ class LUReplaceDisks(LogicalUnit):
     # start of work
     remote_node = self.op.remote_node
     cfg = self.cfg
-    vgname = cfg.GetVGName()
     for dev in instance.disks:
       size = dev.size
       lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
@@ -3091,8 +3427,8 @@ class LUReplaceDisks(LogicalUnit):
       # the device exists now
       # call the primary node to add the mirror to md
       logger.Info("adding new mirror component to md")
-      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
-                                        new_drbd):
+      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
+                                           [new_drbd]):
         logger.Error("Can't add mirror compoment to md!")
         cfg.SetDiskID(new_drbd, remote_node)
         if not rpc.call_blockdev_remove(remote_node, new_drbd):
@@ -3126,8 +3462,8 @@ class LUReplaceDisks(LogicalUnit):
       dev, child, new_drbd = iv_names[name]
       logger.Info("remove mirror %s component" % name)
       cfg.SetDiskID(dev, instance.primary_node)
-      if not rpc.call_blockdev_removechild(instance.primary_node,
-                                                dev, child):
+      if not rpc.call_blockdev_removechildren(instance.primary_node,
+                                              dev, [child]):
         logger.Error("Can't remove child from mirror, aborting"
                      " *this device cleanup*.\nYou need to cleanup manually!!")
         continue
@@ -3178,7 +3514,7 @@ class LUQueryInstanceData(NoHooksLU):
     """
     self.cfg.SetDiskID(dev, instance.primary_node)
     dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
-    if dev.dev_type == "drbd":
+    if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
         snode = dev.logical_id[1]
@@ -3237,6 +3573,7 @@ class LUQueryInstanceData(NoHooksLU):
         "memory": instance.memory,
         "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
         "disks": disks,
+        "vcpus": instance.vcpus,
         }
 
       result[instance.name] = idict
@@ -3244,37 +3581,6 @@ class LUQueryInstanceData(NoHooksLU):
     return result
 
 
-class LUQueryNodeData(NoHooksLU):
-  """Logical unit for querying node data.
-
-  """
-  _OP_REQP = ["nodes"]
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the optional node list against the existing names.
-
-    """
-    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
-
-  def Exec(self, feedback_fn):
-    """Compute and return the list of nodes.
-
-    """
-    ilist = [self.cfg.GetInstanceInfo(iname) for iname
-             in self.cfg.GetInstanceList()]
-    result = []
-    for node in self.wanted_nodes:
-      result.append((node.name, node.primary_ip, node.secondary_ip,
-                     [inst.name for inst in ilist
-                      if inst.primary_node == node.name],
-                     [inst.name for inst in ilist
-                      if node.name in inst.secondary_nodes],
-                     ))
-    return result
-
-
 class LUSetInstanceParms(LogicalUnit):
   """Modifies an instances's parameters.
 
@@ -3397,7 +3703,7 @@ class LUQueryExports(NoHooksLU):
       that node.
 
     """
-    return rpc.call_export_list([node.name for node in self.nodes])
+    return rpc.call_export_list(self.nodes)
 
 
 class LUExportInstance(LogicalUnit):
@@ -3470,7 +3776,7 @@ class LUExportInstance(LogicalUnit):
             logger.Error("could not snapshot block device %s on node %s" %
                          (disk.logical_id[1], src_node))
           else:
-            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
+            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
                                       logical_id=(vgname, new_dev_name),
                                       physical_id=(vgname, new_dev_name),
                                       iv_name=disk.iv_name)
@@ -3534,7 +3840,7 @@ class TagsLU(NoHooksLU):
       self.op.name = name
       self.target = self.cfg.GetNodeInfo(name)
     elif self.op.kind == constants.TAG_INSTANCE:
-      name = self.cfg.ExpandInstanceName(name)
+      name = self.cfg.ExpandInstanceName(self.op.name)
       if name is None:
         raise errors.OpPrereqError("Invalid instance name (%s)" %
                                    (self.op.name,))
@@ -3558,11 +3864,11 @@ class LUGetTags(TagsLU):
     return self.target.GetTags()
 
 
-class LUAddTag(TagsLU):
+class LUAddTags(TagsLU):
   """Sets a tag on a given object.
 
   """
-  _OP_REQP = ["kind", "name", "tag"]
+  _OP_REQP = ["kind", "name", "tags"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -3571,14 +3877,16 @@ class LUAddTag(TagsLU):
 
     """
     TagsLU.CheckPrereq(self)
-    objects.TaggableObject.ValidateTag(self.op.tag)
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
 
   def Exec(self, feedback_fn):
     """Sets the tag.
 
     """
     try:
-      self.target.AddTag(self.op.tag)
+      for tag in self.op.tags:
+        self.target.AddTag(tag)
     except errors.TagError, err:
       raise errors.OpExecError("Error while setting tag: %s" % str(err))
     try:
@@ -3589,11 +3897,11 @@ class LUAddTag(TagsLU):
                                 " aborted. Please retry.")
 
 
-class LUDelTag(TagsLU):
-  """Delete a tag from a given object.
+class LUDelTags(TagsLU):
+  """Delete a list of tags from a given object.
 
   """
-  _OP_REQP = ["kind", "name", "tag"]
+  _OP_REQP = ["kind", "name", "tags"]
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -3602,15 +3910,23 @@ class LUDelTag(TagsLU):
 
     """
     TagsLU.CheckPrereq(self)
-    objects.TaggableObject.ValidateTag(self.op.tag)
-    if self.op.tag not in self.target.GetTags():
-      raise errors.OpPrereqError("Tag not found")
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
+    del_tags = frozenset(self.op.tags)
+    cur_tags = self.target.GetTags()
+    if not del_tags <= cur_tags:
+      diff_tags = del_tags - cur_tags
+      diff_names = ["'%s'" % tag for tag in diff_tags]
+      diff_names.sort()
+      raise errors.OpPrereqError("Tag(s) %s not found" %
+                                 (",".join(diff_names)))
 
   def Exec(self, feedback_fn):
     """Remove the tag from the object.
 
     """
-    self.target.RemoveTag(self.op.tag)
+    for tag in self.op.tags:
+      self.target.RemoveTag(tag)
     try:
       self.cfg.Update(self.target)
     except errors.ConfigurationError: