Add utils.IsNormAbsPath function
[ganeti-local] / lib / cmdlib.py
index 0cf2955..27dfe5f 100644 (file)
@@ -25,7 +25,6 @@
 
 import os
 import os.path
-import sha
 import time
 import tempfile
 import re
@@ -392,8 +391,8 @@ def _GetWantedInstances(lu, instances):
       wanted.append(instance)
 
   else:
-    wanted = lu.cfg.GetInstanceList()
-  return utils.NiceSort(wanted)
+    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
+  return wanted
 
 
 def _CheckOutputFields(static, dynamic, selected):
@@ -434,15 +433,27 @@ def _CheckNodeOnline(lu, node):
 
   @param lu: the LU on behalf of which we make the check
   @param node: the node to check
-  @raise errors.OpPrereqError: if the nodes is offline
+  @raise errors.OpPrereqError: if the node is offline
 
   """
   if lu.cfg.GetNodeInfo(node).offline:
     raise errors.OpPrereqError("Can't use offline node %s" % node)
 
 
+def _CheckNodeNotDrained(lu, node):
+  """Ensure that a given node is not drained.
+
+  @param lu: the LU on behalf of which we make the check
+  @param node: the node to check
+  @raise errors.OpPrereqError: if the node is drained
+
+  """
+  if lu.cfg.GetNodeInfo(node).drained:
+    raise errors.OpPrereqError("Can't use drained node %s" % node)
+
+
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                          memory, vcpus, nics):
+                          memory, vcpus, nics, disk_template, disks):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -464,6 +475,10 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type nics: list
   @param nics: list of tuples (ip, bridge, mac) representing
       the NICs the instance  has
+  @type disk_template: string
+  @param disk_template: the distk template of the instance
+  @type disks: list
+  @param disks: the list of (size, mode) pairs
   @rtype: dict
   @return: the hook environment for this instance
 
@@ -481,6 +496,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": str_status,
     "INSTANCE_MEMORY": memory,
     "INSTANCE_VCPUS": vcpus,
+    "INSTANCE_DISK_TEMPLATE": disk_template,
   }
 
   if nics:
@@ -490,12 +506,22 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
         ip = ""
       env["INSTANCE_NIC%d_IP" % idx] = ip
       env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
-      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
+      env["INSTANCE_NIC%d_MAC" % idx] = mac
   else:
     nic_count = 0
 
   env["INSTANCE_NIC_COUNT"] = nic_count
 
+  if disks:
+    disk_count = len(disks)
+    for idx, (size, mode) in enumerate(disks):
+      env["INSTANCE_DISK%d_SIZE" % idx] = size
+      env["INSTANCE_DISK%d_MODE" % idx] = mode
+  else:
+    disk_count = 0
+
+  env["INSTANCE_DISK_COUNT"] = disk_count
+
   return env
 
 
@@ -524,6 +550,8 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     'memory': bep[constants.BE_MEMORY],
     'vcpus': bep[constants.BE_VCPUS],
     'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
+    'disk_template': instance.disk_template,
+    'disks': [(disk.size, disk.mode) for disk in instance.disks],
   }
   if override:
     args.update(override)
@@ -618,7 +646,7 @@ class LUVerifyCluster(LogicalUnit):
 
   def _VerifyNode(self, nodeinfo, file_list, local_cksum,
                   node_result, feedback_fn, master_files,
-                  drbd_map):
+                  drbd_map, vg_name):
     """Run multiple tests against a node.
 
     Test list:
@@ -638,6 +666,7 @@ class LUVerifyCluster(LogicalUnit):
     @param drbd_map: the useddrbd minors for this node, in
         form of minor: (instance, must_exist) which correspond to instances
         and their running status
+    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
 
     """
     node = nodeinfo.name
@@ -650,29 +679,39 @@ class LUVerifyCluster(LogicalUnit):
     # compares ganeti version
     local_version = constants.PROTOCOL_VERSION
     remote_version = node_result.get('version', None)
-    if not remote_version:
+    if not (remote_version and isinstance(remote_version, (list, tuple)) and
+            len(remote_version) == 2):
       feedback_fn("  - ERROR: connection to %s failed" % (node))
       return True
 
-    if local_version != remote_version:
-      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
-                      (local_version, node, remote_version))
+    if local_version != remote_version[0]:
+      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
+                  " node %s %s" % (local_version, node, remote_version[0]))
       return True
 
-    # checks vg existance and size > 20G
+    # node seems compatible, we can actually try to look into its results
 
     bad = False
-    vglist = node_result.get(constants.NV_VGLIST, None)
-    if not vglist:
-      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
-                      (node,))
-      bad = True
-    else:
-      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
-                                            constants.MIN_VG_SIZE)
-      if vgstatus:
-        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
+
+    # full package version
+    if constants.RELEASE_VERSION != remote_version[1]:
+      feedback_fn("  - WARNING: software version mismatch: master %s,"
+                  " node %s %s" %
+                  (constants.RELEASE_VERSION, node, remote_version[1]))
+
+    # checks vg existence and size > 20G
+    if vg_name is not None:
+      vglist = node_result.get(constants.NV_VGLIST, None)
+      if not vglist:
+        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
+                        (node,))
         bad = True
+      else:
+        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
+                                              constants.MIN_VG_SIZE)
+        if vgstatus:
+          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
+          bad = True
 
     # checks config file checksum
 
@@ -734,16 +773,22 @@ class LUVerifyCluster(LogicalUnit):
                       (hv_name, hv_result))
 
     # check used drbd list
-    used_minors = node_result.get(constants.NV_DRBDLIST, [])
-    for minor, (iname, must_exist) in drbd_map.items():
-      if minor not in used_minors and must_exist:
-        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
-                    (minor, iname))
-        bad = True
-    for minor in used_minors:
-      if minor not in drbd_map:
-        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
-        bad = True
+    if vg_name is not None:
+      used_minors = node_result.get(constants.NV_DRBDLIST, [])
+      if not isinstance(used_minors, (tuple, list)):
+        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
+                    str(used_minors))
+      else:
+        for minor, (iname, must_exist) in drbd_map.items():
+          if minor not in used_minors and must_exist:
+            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
+                        " not active" % (minor, iname))
+            bad = True
+        for minor in used_minors:
+          if minor not in drbd_map:
+            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
+                        minor)
+            bad = True
 
     return bad
 
@@ -870,8 +915,12 @@ class LUVerifyCluster(LogicalUnit):
 
     """
     all_nodes = self.cfg.GetNodeList()
-    # TODO: populate the environment with useful information for verify hooks
-    env = {}
+    env = {
+      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
+      }
+    for node in self.cfg.GetAllNodesInfo().values():
+      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
+
     return env, [], all_nodes
 
   def Exec(self, feedback_fn):
@@ -893,6 +942,7 @@ class LUVerifyCluster(LogicalUnit):
     i_non_redundant = [] # Non redundant instances
     i_non_a_balanced = [] # Non auto-balanced instances
     n_offline = [] # List of offline nodes
+    n_drained = [] # List of nodes being drained
     node_volume = {}
     node_instance = {}
     node_info = {}
@@ -918,13 +968,14 @@ class LUVerifyCluster(LogicalUnit):
       constants.NV_NODENETTEST: [(node.name, node.primary_ip,
                                   node.secondary_ip) for node in nodeinfo
                                  if not node.offline],
-      constants.NV_LVLIST: vg_name,
       constants.NV_INSTANCELIST: hypervisors,
-      constants.NV_VGLIST: None,
       constants.NV_VERSION: None,
       constants.NV_HVINFO: self.cfg.GetHypervisorType(),
-      constants.NV_DRBDLIST: None,
       }
+    if vg_name is not None:
+      node_verify_param[constants.NV_VGLIST] = None
+      node_verify_param[constants.NV_LVLIST] = vg_name
+      node_verify_param[constants.NV_DRBDLIST] = None
     all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
                                            self.cfg.GetClusterName())
 
@@ -945,6 +996,9 @@ class LUVerifyCluster(LogicalUnit):
         ntype = "master"
       elif node_i.master_candidate:
         ntype = "master candidate"
+      elif node_i.drained:
+        ntype = "drained"
+        n_drained.append(node)
       else:
         ntype = "regular"
       feedback_fn("* Verifying node %s (%s)" % (node, ntype))
@@ -956,17 +1010,27 @@ class LUVerifyCluster(LogicalUnit):
 
       node_drbd = {}
       for minor, instance in all_drbd_map[node].items():
-        instance = instanceinfo[instance]
-        node_drbd[minor] = (instance.name, instance.admin_up)
+        if instance not in instanceinfo:
+          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
+                      instance)
+          # ghost instance should not be running, but otherwise we
+          # don't give double warnings (both ghost instance and
+          # unallocated minor in use)
+          node_drbd[minor] = (instance, False)
+        else:
+          instance = instanceinfo[instance]
+          node_drbd[minor] = (instance.name, instance.admin_up)
       result = self._VerifyNode(node_i, file_names, local_checksums,
                                 nresult, feedback_fn, master_files,
-                                node_drbd)
+                                node_drbd, vg_name)
       bad = bad or result
 
       lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
-      if isinstance(lvdata, basestring):
+      if vg_name is None:
+        node_volume[node] = {}
+      elif isinstance(lvdata, basestring):
         feedback_fn("  - ERROR: LVM problem on node %s: %s" %
-                    (node, lvdata.encode('string_escape')))
+                    (node, utils.SafeEncode(lvdata)))
         bad = True
         node_volume[node] = {}
       elif not isinstance(lvdata, dict):
@@ -996,7 +1060,6 @@ class LUVerifyCluster(LogicalUnit):
       try:
         node_info[node] = {
           "mfree": int(nodeinfo['memory_free']),
-          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
           "pinst": [],
           "sinst": [],
           # dictionary holding all instances this node is secondary for,
@@ -1007,8 +1070,19 @@ class LUVerifyCluster(LogicalUnit):
           # secondary.
           "sinst-by-pnode": {},
         }
-      except ValueError:
-        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
+        # FIXME: devise a free space model for file based instances as well
+        if vg_name is not None:
+          if (constants.NV_VGLIST not in nresult or
+              vg_name not in nresult[constants.NV_VGLIST]):
+            feedback_fn("  - ERROR: node %s didn't return data for the"
+                        " volume group '%s' - it is either missing or broken" %
+                        (node, vg_name))
+            bad = True
+            continue
+          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
+      except (ValueError, KeyError):
+        feedback_fn("  - ERROR: invalid nodeinfo value returned"
+                    " from node %s" % (node,))
         bad = True
         continue
 
@@ -1097,6 +1171,9 @@ class LUVerifyCluster(LogicalUnit):
     if n_offline:
       feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
 
+    if n_drained:
+      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
+
     return not bad
 
   def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
@@ -1212,6 +1289,7 @@ class LUVerifyDisks(NoHooksLU):
       if isinstance(lvs, basestring):
         logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
         res_nlvm[node] = lvs
+        continue
       elif not isinstance(lvs, dict):
         logging.warning("Connection to node %s failed or invalid data"
                         " returned", node)
@@ -1339,7 +1417,7 @@ class LUSetClusterParams(LogicalUnit):
   _OP_REQP = []
   REQ_BGL = False
 
-  def CheckParameters(self):
+  def CheckArguments(self):
     """Check parameters
 
     """
@@ -1348,7 +1426,7 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.candidate_pool_size is not None:
       try:
         self.op.candidate_pool_size = int(self.op.candidate_pool_size)
-      except ValueError, err:
+      except (ValueError, TypeError), err:
         raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
                                    str(err))
       if self.op.candidate_pool_size < 1:
@@ -1380,8 +1458,6 @@ class LUSetClusterParams(LogicalUnit):
     if the given volume group is valid.
 
     """
-    # FIXME: This only works because there is only one parameter that can be
-    # changed or removed.
     if self.op.vg_name is not None and not self.op.vg_name:
       instances = self.cfg.GetAllInstancesInfo().values()
       for inst in instances:
@@ -1410,7 +1486,7 @@ class LUSetClusterParams(LogicalUnit):
     self.cluster = cluster = self.cfg.GetClusterInfo()
     # validate beparams changes
     if self.op.beparams:
-      utils.CheckBEParams(self.op.beparams)
+      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
       self.new_beparams = cluster.FillDict(
         cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
 
@@ -1438,6 +1514,7 @@ class LUSetClusterParams(LogicalUnit):
              hv_name in self.op.enabled_hypervisors)):
           # either this is a new hypervisor, or its parameters have changed
           hv_class = hypervisor.GetHypervisor(hv_name)
+          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
 
@@ -1446,8 +1523,11 @@ class LUSetClusterParams(LogicalUnit):
 
     """
     if self.op.vg_name is not None:
-      if self.op.vg_name != self.cfg.GetVGName():
-        self.cfg.SetVGName(self.op.vg_name)
+      new_volume = self.op.vg_name
+      if not new_volume:
+        new_volume = None
+      if new_volume != self.cfg.GetVGName():
+        self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
@@ -1468,6 +1548,45 @@ class LUSetClusterParams(LogicalUnit):
       _AdjustCandidatePool(self)
 
 
+def _RedistributeAncillaryFiles(lu, additional_nodes=None):
+  """Distribute additional files which are part of the cluster configuration.
+
+  ConfigWriter takes care of distributing the config and ssconf files, but
+  there are more files which should be distributed to all nodes. This function
+  makes sure those are copied.
+
+  @param lu: calling logical unit
+  @param additional_nodes: list of nodes not in the config to distribute to
+
+  """
+  # 1. Gather target nodes
+  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
+  dist_nodes = lu.cfg.GetNodeList()
+  if additional_nodes is not None:
+    dist_nodes.extend(additional_nodes)
+  if myself.name in dist_nodes:
+    dist_nodes.remove(myself.name)
+  # 2. Gather files to distribute
+  dist_files = set([constants.ETC_HOSTS,
+                    constants.SSH_KNOWN_HOSTS_FILE,
+                    constants.RAPI_CERT_FILE,
+                    constants.RAPI_USERS_FILE,
+                   ])
+
+  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
+  for hv_name in enabled_hypervisors:
+    hv_class = hypervisor.GetHypervisor(hv_name)
+    dist_files.update(hv_class.GetAncillaryFiles())
+
+  # 3. Perform the files upload
+  for fname in dist_files:
+    if os.path.exists(fname):
+      result = lu.rpc.call_upload_file(dist_nodes, fname)
+      for to_node, to_result in result.items():
+        if to_result.failed or not to_result.data:
+          logging.error("Copy of file %s to node %s failed", fname, to_node)
+
+
 class LURedistributeConfig(NoHooksLU):
   """Force the redistribution of cluster configuration.
 
@@ -1493,6 +1612,7 @@ class LURedistributeConfig(NoHooksLU):
 
     """
     self.cfg.Update(self.cfg.GetClusterInfo())
+    _RedistributeAncillaryFiles(self)
 
 
 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
@@ -1570,11 +1690,15 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
   result = True
   if on_primary or dev.AssembleOnSecondary():
     rstats = lu.rpc.call_blockdev_find(node, dev)
-    if rstats.failed or not rstats.data:
-      logging.warning("Node %s: disk degraded, not found or node down", node)
+    msg = rstats.RemoteFailMsg()
+    if msg:
+      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
+      result = False
+    elif not rstats.payload:
+      lu.LogWarning("Can't find disk on node %s", node)
       result = False
     else:
-      result = result and (not rstats.data[idx])
+      result = result and (not rstats.payload[idx])
   if dev.children:
     for child in dev.children:
       result = result and _CheckDiskConsistency(lu, child, node, on_primary)
@@ -1600,9 +1724,11 @@ class LUDiagnoseOS(NoHooksLU):
                        selected=self.op.output_fields)
 
     # Lock all nodes, in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
     self.needed_locks = {}
-    self.share_locks[locking.LEVEL_NODE] = 1
-    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1617,7 +1743,7 @@ class LUDiagnoseOS(NoHooksLU):
     @param rlist: a map with node names as keys and OS objects as values
 
     @rtype: dict
-    @returns: a dictionary with osnames as keys and as value another map, with
+    @return: a dictionary with osnames as keys and as value another map, with
         nodes as keys and list of OS objects as values, eg::
 
           {"debian-etch": {"node1": [<object>,...],
@@ -1626,6 +1752,11 @@ class LUDiagnoseOS(NoHooksLU):
 
     """
     all_os = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].failed]
     for node_name, nr in rlist.iteritems():
       if nr.failed or not nr.data:
         continue
@@ -1634,7 +1765,7 @@ class LUDiagnoseOS(NoHooksLU):
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
           all_os[os_obj.name] = {}
-          for nname in node_list:
+          for nname in good_nodes:
             all_os[os_obj.name][nname] = []
         all_os[os_obj.name][node_name].append(os_obj)
     return all_os
@@ -1643,9 +1774,7 @@ class LUDiagnoseOS(NoHooksLU):
     """Compute the list of OSes.
 
     """
-    node_list = self.acquired_locks[locking.LEVEL_NODE]
-    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
-                   if node in node_list]
+    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
     node_data = self.rpc.call_os_diagnose(valid_nodes)
     if node_data == False:
       raise errors.OpExecError("Can't gather the list of OSes")
@@ -1743,13 +1872,13 @@ class LUQueryNodes(NoHooksLU):
   """Logical unit for querying nodes.
 
   """
-  _OP_REQP = ["output_fields", "names"]
+  _OP_REQP = ["output_fields", "names", "use_locking"]
   REQ_BGL = False
   _FIELDS_DYNAMIC = utils.FieldSet(
     "dtotal", "dfree",
     "mtotal", "mnode", "mfree",
     "bootid",
-    "ctotal",
+    "ctotal", "cnodes", "csockets",
     )
 
   _FIELDS_STATIC = utils.FieldSet(
@@ -1760,6 +1889,7 @@ class LUQueryNodes(NoHooksLU):
     "master_candidate",
     "master",
     "offline",
+    "drained",
     )
 
   def ExpandNames(self):
@@ -1775,7 +1905,8 @@ class LUQueryNodes(NoHooksLU):
     else:
       self.wanted = locking.ALL_SET
 
-    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+    self.do_locking = self.do_node_query and self.op.use_locking
     if self.do_locking:
       # if we don't request only static fields, we need to lock the nodes
       self.needed_locks[locking.LEVEL_NODE] = self.wanted
@@ -1810,7 +1941,7 @@ class LUQueryNodes(NoHooksLU):
 
     # begin data gathering
 
-    if self.do_locking:
+    if self.do_node_query:
       live_data = {}
       node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
                                           self.cfg.GetHypervisorType())
@@ -1827,6 +1958,8 @@ class LUQueryNodes(NoHooksLU):
             "dfree": fn(int, nodeinfo.get('vg_free', None)),
             "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
             "bootid": nodeinfo.get('bootid', None),
+            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
+            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
             }
         else:
           live_data[name] = {}
@@ -1881,6 +2014,8 @@ class LUQueryNodes(NoHooksLU):
           val = node.name == master_node
         elif field == "offline":
           val = node.offline
+        elif field == "drained":
+          val = node.drained
         elif self._FIELDS_DYNAMIC.Matches(field):
           val = live_data[node.name].get(field, None)
         else:
@@ -2077,7 +2212,7 @@ class LUAddNode(LogicalUnit):
                                  primary_ip=primary_ip,
                                  secondary_ip=secondary_ip,
                                  master_candidate=master_candidate,
-                                 offline=False)
+                                 offline=False, drained=False)
 
   def Exec(self, feedback_fn):
     """Adds the new node to the cluster.
@@ -2119,8 +2254,10 @@ class LUAddNode(LogicalUnit):
                                     keyarray[2],
                                     keyarray[3], keyarray[4], keyarray[5])
 
-    if result.failed or not result.data:
-      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
+    msg = result.RemoteFailMsg()
+    if msg:
+      raise errors.OpExecError("Cannot transfer ssh keys to the"
+                               " new node: %s" % msg)
 
     # Add node to our /etc/hosts, and add key to known_hosts
     utils.AddHostToEtcHosts(new_node.name)
@@ -2148,36 +2285,14 @@ class LUAddNode(LogicalUnit):
       if result[verifier].data['nodelist']:
         for failed in result[verifier].data['nodelist']:
           feedback_fn("ssh/hostname verification failed %s -> %s" %
-                      (verifier, result[verifier]['nodelist'][failed]))
+                      (verifier, result[verifier].data['nodelist'][failed]))
         raise errors.OpExecError("ssh/hostname verification failed.")
 
-    # Distribute updated /etc/hosts and known_hosts to all nodes,
-    # including the node just added
-    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
-    dist_nodes = self.cfg.GetNodeList()
-    if not self.op.readd:
-      dist_nodes.append(node)
-    if myself.name in dist_nodes:
-      dist_nodes.remove(myself.name)
-
-    logging.debug("Copying hosts and known_hosts to all nodes")
-    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
-      result = self.rpc.call_upload_file(dist_nodes, fname)
-      for to_node, to_result in result.iteritems():
-        if to_result.failed or not to_result.data:
-          logging.error("Copy of file %s to node %s failed", fname, to_node)
-
-    to_copy = []
-    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
-      to_copy.append(constants.VNC_PASSWORD_FILE)
-    for fname in to_copy:
-      result = self.rpc.call_upload_file([node], fname)
-      if result[node].failed or not result[node]:
-        logging.error("Could not copy file %s to node %s", fname, node)
-
     if self.op.readd:
+      _RedistributeAncillaryFiles(self)
       self.context.ReaddNode(new_node)
     else:
+      _RedistributeAncillaryFiles(self, additional_nodes=node)
       self.context.AddNode(new_node)
 
 
@@ -2197,11 +2312,13 @@ class LUSetNodeParams(LogicalUnit):
     self.op.node_name = node_name
     _CheckBooleanOpField(self.op, 'master_candidate')
     _CheckBooleanOpField(self.op, 'offline')
-    if self.op.master_candidate is None and self.op.offline is None:
+    _CheckBooleanOpField(self.op, 'drained')
+    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
+    if all_mods.count(None) == 3:
       raise errors.OpPrereqError("Please pass at least one modification")
-    if self.op.offline == True and self.op.master_candidate == True:
-      raise errors.OpPrereqError("Can't set the node into offline and"
-                                 " master_candidate at the same time")
+    if all_mods.count(True) > 1:
+      raise errors.OpPrereqError("Can't set the node into more than one"
+                                 " state at the same time")
 
   def ExpandNames(self):
     self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
@@ -2216,6 +2333,7 @@ class LUSetNodeParams(LogicalUnit):
       "OP_TARGET": self.op.node_name,
       "MASTER_CANDIDATE": str(self.op.master_candidate),
       "OFFLINE": str(self.op.offline),
+      "DRAINED": str(self.op.drained),
       }
     nl = [self.cfg.GetMasterNode(),
           self.op.node_name]
@@ -2229,12 +2347,12 @@ class LUSetNodeParams(LogicalUnit):
     """
     node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
 
-    if ((self.op.master_candidate == False or self.op.offline == True)
-        and node.master_candidate):
+    if ((self.op.master_candidate == False or self.op.offline == True or
+         self.op.drained == True) and node.master_candidate):
       # we will demote the node from master_candidate
       if self.op.node_name == self.cfg.GetMasterNode():
         raise errors.OpPrereqError("The master node has to be a"
-                                   " master candidate and online")
+                                   " master candidate, online and not drained")
       cp_size = self.cfg.GetClusterInfo().candidate_pool_size
       num_candidates, _ = self.cfg.GetMasterCandidateStats()
       if num_candidates <= cp_size:
@@ -2245,10 +2363,11 @@ class LUSetNodeParams(LogicalUnit):
         else:
           raise errors.OpPrereqError(msg)
 
-    if (self.op.master_candidate == True and node.offline and
-        not self.op.offline == False):
-      raise errors.OpPrereqError("Can't set an offline node to"
-                                 " master_candidate")
+    if (self.op.master_candidate == True and
+        ((node.offline and not self.op.offline == False) or
+         (node.drained and not self.op.drained == False))):
+      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
+                                 " to master_candidate" % node.name)
 
     return
 
@@ -2259,29 +2378,46 @@ class LUSetNodeParams(LogicalUnit):
     node = self.node
 
     result = []
+    changed_mc = False
 
     if self.op.offline is not None:
       node.offline = self.op.offline
       result.append(("offline", str(self.op.offline)))
-      if self.op.offline == True and node.master_candidate:
-        node.master_candidate = False
-        result.append(("master_candidate", "auto-demotion due to offline"))
+      if self.op.offline == True:
+        if node.master_candidate:
+          node.master_candidate = False
+          changed_mc = True
+          result.append(("master_candidate", "auto-demotion due to offline"))
+        if node.drained:
+          node.drained = False
+          result.append(("drained", "clear drained status due to offline"))
 
     if self.op.master_candidate is not None:
       node.master_candidate = self.op.master_candidate
+      changed_mc = True
       result.append(("master_candidate", str(self.op.master_candidate)))
       if self.op.master_candidate == False:
         rrc = self.rpc.call_node_demote_from_mc(node.name)
-        if (rrc.failed or not isinstance(rrc.data, (tuple, list))
-            or len(rrc.data) != 2):
-          self.LogWarning("Node rpc error: %s" % rrc.error)
-        elif not rrc.data[0]:
-          self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
+        msg = rrc.RemoteFailMsg()
+        if msg:
+          self.LogWarning("Node failed to demote itself: %s" % msg)
+
+    if self.op.drained is not None:
+      node.drained = self.op.drained
+      result.append(("drained", str(self.op.drained)))
+      if self.op.drained == True:
+        if node.master_candidate:
+          node.master_candidate = False
+          changed_mc = True
+          result.append(("master_candidate", "auto-demotion due to drain"))
+        if node.offline:
+          node.offline = False
+          result.append(("offline", "clear offline status due to drain"))
 
     # this will trigger configuration file update, if needed
     self.cfg.Update(node)
     # this will trigger job queue propagation or cleanup
-    if self.op.node_name != self.cfg.GetMasterNode():
+    if changed_mc:
       self.context.ReaddNode(node)
 
     return result
@@ -2319,9 +2455,14 @@ class LUQueryClusterInfo(NoHooksLU):
       "master": cluster.master_node,
       "default_hypervisor": cluster.default_hypervisor,
       "enabled_hypervisors": cluster.enabled_hypervisors,
-      "hvparams": cluster.hvparams,
+      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
+                        for hypervisor in cluster.enabled_hypervisors]),
       "beparams": cluster.beparams,
       "candidate_pool_size": cluster.candidate_pool_size,
+      "default_bridge": cluster.default_bridge,
+      "master_netdev": cluster.master_netdev,
+      "volume_group_name": cluster.volume_group_name,
+      "file_storage_dir": cluster.file_storage_dir,
       }
 
     return result
@@ -2439,10 +2580,11 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
-      if result.failed or not result:
+      msg = result.RemoteFailMsg()
+      if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
-                           " (is_primary=False, pass=1)",
-                           inst_disk.iv_name, node)
+                           " (is_primary=False, pass=1): %s",
+                           inst_disk.iv_name, node, msg)
         if not ignore_secondaries:
           disks_ok = False
 
@@ -2455,12 +2597,14 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
         continue
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
-      if result.failed or not result:
+      msg = result.RemoteFailMsg()
+      if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
-                           " (is_primary=True, pass=2)",
-                           inst_disk.iv_name, node)
+                           " (is_primary=True, pass=2): %s",
+                           inst_disk.iv_name, node, msg)
         disks_ok = False
-    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
+    device_info.append((instance.primary_node, inst_disk.iv_name,
+                        result.payload))
 
   # leave the disks configured for the primary node
   # this is a workaround that would be fixed better by
@@ -2550,17 +2694,18 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
   ignored.
 
   """
-  result = True
+  all_result = True
   for disk in instance.disks:
     for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(top_disk, node)
       result = lu.rpc.call_blockdev_shutdown(node, top_disk)
-      if result.failed or not result.data:
-        logging.error("Could not shutdown block device %s on node %s",
-                      disk.iv_name, node)
+      msg = result.RemoteFailMsg()
+      if msg:
+        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
+                      disk.iv_name, node, msg)
         if not ignore_primary or node != instance.primary_node:
-          result = False
-  return result
+          all_result = False
+  return all_result
 
 
 def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
@@ -2632,15 +2777,48 @@ class LUStartupInstance(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    # extra beparams
+    self.beparams = getattr(self.op, "beparams", {})
+    if self.beparams:
+      if not isinstance(self.beparams, dict):
+        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+                                   " dict" % (type(self.beparams), ))
+      # fill the beparams dict
+      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+      self.op.beparams = self.beparams
+
+    # extra hvparams
+    self.hvparams = getattr(self.op, "hvparams", {})
+    if self.hvparams:
+      if not isinstance(self.hvparams, dict):
+        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+                                   " dict" % (type(self.hvparams), ))
+
+      # check hypervisor parameter syntax (locally)
+      cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
+                                    instance.hvparams)
+      filled_hvp.update(self.hvparams)
+      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type.CheckParameterSyntax(filled_hvp)
+      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+      self.op.hvparams = self.hvparams
+
     _CheckNodeOnline(self, instance.primary_node)
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
     # check bridges existance
     _CheckInstanceBridgesExist(self, instance)
 
-    _CheckNodeFreeMemory(self, instance.primary_node,
-                         "starting instance %s" % instance.name,
-                         bep[constants.BE_MEMORY], instance.hypervisor)
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    remote_info.Raise()
+    if not remote_info.data:
+      _CheckNodeFreeMemory(self, instance.primary_node,
+                           "starting instance %s" % instance.name,
+                           bep[constants.BE_MEMORY], instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -2648,7 +2826,6 @@ class LUStartupInstance(LogicalUnit):
     """
     instance = self.instance
     force = self.op.force
-    extra_args = getattr(self.op, "extra_args", "")
 
     self.cfg.MarkInstanceUp(instance.name)
 
@@ -2656,7 +2833,8 @@ class LUStartupInstance(LogicalUnit):
 
     _StartInstanceDisks(self, instance, force)
 
-    result = self.rpc.call_instance_start(node_current, instance, extra_args)
+    result = self.rpc.call_instance_start(node_current, instance,
+                                          self.hvparams, self.beparams)
     msg = result.RemoteFailMsg()
     if msg:
       _ShutdownInstanceDisks(self, instance)
@@ -2690,6 +2868,7 @@ class LURebootInstance(LogicalUnit):
     """
     env = {
       "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+      "REBOOT_TYPE": self.op.reboot_type,
       }
     env.update(_BuildInstanceHookEnvByObject(self, self.instance))
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
@@ -2717,22 +2896,27 @@ class LURebootInstance(LogicalUnit):
     instance = self.instance
     ignore_secondaries = self.op.ignore_secondaries
     reboot_type = self.op.reboot_type
-    extra_args = getattr(self.op, "extra_args", "")
 
     node_current = instance.primary_node
 
     if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
                        constants.INSTANCE_REBOOT_HARD]:
+      for disk in instance.disks:
+        self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
-                                             reboot_type, extra_args)
-      if result.failed or not result.data:
-        raise errors.OpExecError("Could not reboot instance")
+                                             reboot_type)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Could not reboot instance: %s" % msg)
     else:
-      if not self.rpc.call_instance_shutdown(node_current, instance):
-        raise errors.OpExecError("could not shutdown instance for full reboot")
+      result = self.rpc.call_instance_shutdown(node_current, instance)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Could not shutdown instance for"
+                                 " full reboot: %s" % msg)
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current, instance, extra_args)
+      result = self.rpc.call_instance_start(node_current, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -2783,8 +2967,9 @@ class LUShutdownInstance(LogicalUnit):
     node_current = instance.primary_node
     self.cfg.MarkInstanceDown(instance.name)
     result = self.rpc.call_instance_shutdown(node_current, instance)
-    if result.failed or not result.data:
-      self.proc.LogWarning("Could not shutdown instance")
+    msg = result.RemoteFailMsg()
+    if msg:
+      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
 
     _ShutdownInstanceDisks(self, instance)
 
@@ -2831,7 +3016,8 @@ class LUReinstallInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    if remote_info.failed or remote_info.data:
+    remote_info.Raise()
+    if remote_info.data:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
                                   instance.primary_node))
@@ -2866,7 +3052,7 @@ class LUReinstallInstance(LogicalUnit):
     _StartInstanceDisks(self, inst, None)
     try:
       feedback_fn("Running the instance OS create scripts...")
-      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
+      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
       msg = result.RemoteFailMsg()
       if msg:
         raise errors.OpExecError("Could not install OS for instance %s"
@@ -3034,12 +3220,14 @@ class LURemoveInstance(LogicalUnit):
                  instance.name, instance.primary_node)
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
-    if result.failed or not result.data:
+    msg = result.RemoteFailMsg()
+    if msg:
       if self.op.ignore_failures:
-        feedback_fn("Warning: can't shutdown instance")
+        feedback_fn("Warning: can't shutdown instance: %s" % msg)
       else:
-        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                                 (instance.name, instance.primary_node))
+        raise errors.OpExecError("Could not shutdown instance %s on"
+                                 " node %s: %s" %
+                                 (instance.name, instance.primary_node, msg))
 
     logging.info("Removing block devices for instance %s", instance.name)
 
@@ -3059,18 +3247,18 @@ class LUQueryInstances(NoHooksLU):
   """Logical unit for querying instances.
 
   """
-  _OP_REQP = ["output_fields", "names"]
+  _OP_REQP = ["output_fields", "names", "use_locking"]
   REQ_BGL = False
   _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
-                                    "admin_state", "admin_ram",
+                                    "admin_state",
                                     "disk_template", "ip", "mac", "bridge",
                                     "sda_size", "sdb_size", "vcpus", "tags",
                                     "network_port", "beparams",
-                                    "(disk).(size)/([0-9]+)",
-                                    "(disk).(sizes)",
-                                    "(nic).(mac|ip|bridge)/([0-9]+)",
-                                    "(nic).(macs|ips|bridges)",
-                                    "(disk|nic).(count)",
+                                    r"(disk)\.(size)/([0-9]+)",
+                                    r"(disk)\.(sizes)", "disk_usage",
+                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
+                                    r"(nic)\.(macs|ips|bridges)",
+                                    r"(disk|nic)\.(count)",
                                     "serial_no", "hypervisor", "hvparams",] +
                                   ["hv/%s" % name
                                    for name in constants.HVS_PARAMETERS] +
@@ -3093,7 +3281,8 @@ class LUQueryInstances(NoHooksLU):
     else:
       self.wanted = locking.ALL_SET
 
-    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
+    self.do_locking = self.do_node_query and self.op.use_locking
     if self.do_locking:
       self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
       self.needed_locks[locking.LEVEL_NODE] = []
@@ -3114,19 +3303,25 @@ class LUQueryInstances(NoHooksLU):
 
     """
     all_info = self.cfg.GetAllInstancesInfo()
-    if self.do_locking:
-      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
-    elif self.wanted != locking.ALL_SET:
-      instance_names = self.wanted
-      missing = set(instance_names).difference(all_info.keys())
-      if missing:
-        raise errors.OpExecError(
-          "Some instances were removed before retrieving their data: %s"
-          % missing)
+    if self.wanted == locking.ALL_SET:
+      # caller didn't specify instance names, so ordering is not important
+      if self.do_locking:
+        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+      else:
+        instance_names = all_info.keys()
+      instance_names = utils.NiceSort(instance_names)
     else:
-      instance_names = all_info.keys()
+      # caller did specify names, so we must keep the ordering
+      if self.do_locking:
+        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
+      else:
+        tgt_set = all_info.keys()
+      missing = set(self.wanted).difference(tgt_set)
+      if missing:
+        raise errors.OpExecError("Some instances were removed before"
+                                 " retrieving their data: %s" % missing)
+      instance_names = self.wanted
 
-    instance_names = utils.NiceSort(instance_names)
     instance_list = [all_info[iname] for iname in instance_names]
 
     # begin data gathering
@@ -3136,7 +3331,7 @@ class LUQueryInstances(NoHooksLU):
 
     bad_nodes = []
     off_nodes = []
-    if self.do_locking:
+    if self.do_node_query:
       live_data = {}
       node_data = self.rpc.call_all_instances_info(nodes, hv_list)
       for name in nodes:
@@ -3217,6 +3412,9 @@ class LUQueryInstances(NoHooksLU):
             val = instance.FindDisk(idx).size
           except errors.OpPrereqError:
             val = None
+        elif field == "disk_usage": # total disk usage per node
+          disk_sizes = [{'size': disk.size} for disk in instance.disks]
+          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
         elif field == "tags":
           val = list(instance.GetTags())
         elif field == "serial_no":
@@ -3336,6 +3534,7 @@ class LUFailoverInstance(LogicalUnit):
 
     target_node = secondary_nodes[0]
     _CheckNodeOnline(self, target_node)
+    _CheckNodeNotDrained(self, target_node)
     # check memory requirements on the secondary node
     _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
                          instance.name, bep[constants.BE_MEMORY],
@@ -3375,15 +3574,17 @@ class LUFailoverInstance(LogicalUnit):
                  instance.name, source_node)
 
     result = self.rpc.call_instance_shutdown(source_node, instance)
-    if result.failed or not result.data:
+    msg = result.RemoteFailMsg()
+    if msg:
       if self.op.ignore_consistency:
         self.proc.LogWarning("Could not shutdown instance %s on node %s."
-                             " Proceeding"
-                             " anyway. Please make sure node %s is down",
-                             instance.name, source_node, source_node)
+                             " Proceeding anyway. Please make sure node"
+                             " %s is down. Error details: %s",
+                             instance.name, source_node, source_node, msg)
       else:
-        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                                 (instance.name, source_node))
+        raise errors.OpExecError("Could not shutdown instance %s on"
+                                 " node %s: %s" %
+                                 (instance.name, source_node, msg))
 
     feedback_fn("* deactivating the instance's disks on source node")
     if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
@@ -3406,7 +3607,7 @@ class LUFailoverInstance(LogicalUnit):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       feedback_fn("* starting the instance on the target node")
-      result = self.rpc.call_instance_start(target_node, instance, None)
+      result = self.rpc.call_instance_start(target_node, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -3443,6 +3644,8 @@ class LUMigrateInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env["MIGRATE_LIVE"] = self.op.live
+    env["MIGRATE_CLEANUP"] = self.op.cleanup
     nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
     return env, nl, nl
 
@@ -3464,8 +3667,8 @@ class LUMigrateInstance(LogicalUnit):
 
     secondary_nodes = instance.secondary_nodes
     if not secondary_nodes:
-      raise errors.ProgrammerError("no secondary node but using "
-                                   "drbd8 disk template")
+      raise errors.ConfigurationError("No secondary node but using"
+                                      " drbd8 disk template")
 
     i_be = self.cfg.GetClusterInfo().FillBE(instance)
 
@@ -3484,6 +3687,7 @@ class LUMigrateInstance(LogicalUnit):
                                  (brlist, target_node))
 
     if not self.op.cleanup:
+      _CheckNodeNotDrained(self, target_node)
       result = self.rpc.call_instance_migratable(instance.primary_node,
                                                  instance)
       msg = result.RemoteFailMsg()
@@ -3512,7 +3716,7 @@ class LUMigrateInstance(LogicalUnit):
         if msg:
           raise errors.OpExecError("Cannot resync disks on node %s: %s" %
                                    (node, msg))
-        node_done, node_percent = nres.data[1]
+        node_done, node_percent = nres.payload
         all_done = all_done and node_done
         if node_percent is not None:
           min_percent = min(min_percent, node_percent)
@@ -3698,11 +3902,11 @@ class LUMigrateInstance(LogicalUnit):
     msg = result.RemoteFailMsg()
     if msg:
       log_err = ("Failed fetching source migration information from %s: %s" %
-                  (source_node, msg))
+                 (source_node, msg))
       logging.error(log_err)
       raise errors.OpExecError(log_err)
 
-    self.migration_info = migration_info = result.data[1]
+    self.migration_info = migration_info = result.payload
 
     # Then switch the disks to master/master mode
     self._EnsureSecondary(target_node)
@@ -3805,7 +4009,7 @@ def _CreateBlockDev(lu, node, instance, device, force_create,
       (this will be represented as a LVM tag)
   @type force_open: boolean
   @param force_open: this parameter will be passes to the
-      L{backend.CreateBlockDevice} function where it specifies
+      L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
 
@@ -3840,7 +4044,7 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
       (this will be represented as a LVM tag)
   @type force_open: boolean
   @param force_open: this parameter will be passes to the
-      L{backend.CreateBlockDevice} function where it specifies
+      L{backend.BlockdevCreate} function where it specifies
       whether we run on primary or not, and it affects both
       the child assembly and the device own Open() execution
 
@@ -3854,7 +4058,7 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
                              " node %s for instance %s: %s" %
                              (device, node, instance.name, msg))
   if device.physical_id is None:
-    device.physical_id = result.data[1]
+    device.physical_id = result.payload
 
 
 def _GenerateUniqueNames(lu, exts):
@@ -3949,7 +4153,7 @@ def _GenerateDiskTemplate(lu, template_name,
                               iv_name="disk/%d" % disk_index,
                               logical_id=(file_driver,
                                           "%s/disk%d" % (file_storage_dir,
-                                                         idx)),
+                                                         disk_index)),
                               mode=disk["mode"])
       disks.append(disk_dev)
   else:
@@ -4020,15 +4224,15 @@ def _RemoveDisks(lu, instance):
   """
   logging.info("Removing block devices for instance %s", instance.name)
 
-  result = True
+  all_result = True
   for device in instance.disks:
     for node, disk in device.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(disk, node)
-      result = lu.rpc.call_blockdev_remove(node, disk)
-      if result.failed or not result.data:
-        lu.proc.LogWarning("Could not remove block device %s on node %s,"
-                           " continuing anyway", device.iv_name, node)
-        result = False
+      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+      if msg:
+        lu.LogWarning("Could not remove block device %s on node %s,"
+                      " continuing anyway: %s", device.iv_name, node, msg)
+        all_result = False
 
   if instance.disk_template == constants.DT_FILE:
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
@@ -4036,9 +4240,9 @@ def _RemoveDisks(lu, instance):
                                                  file_storage_dir)
     if result.failed or not result.data:
       logging.error("Could not remove directory '%s'", file_storage_dir)
-      result = False
+      all_result = False
 
-  return result
+  return all_result
 
 
 def _ComputeDiskSize(disk_template, disks):
@@ -4083,13 +4287,12 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
                                                   hvparams)
   for node in nodenames:
     info = hvinfo[node]
-    info.Raise()
-    if not info.data or not isinstance(info.data, (tuple, list)):
-      raise errors.OpPrereqError("Cannot get current information"
-                                 " from node '%s' (%s)" % (node, info.data))
-    if not info.data[0]:
-      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
-                                 " %s" % info.data[1])
+    if info.offline:
+      continue
+    msg = info.RemoteFailMsg()
+    if msg:
+      raise errors.OpPrereqError("Hypervisor parameter validation"
+                                 " failed on node %s: %s" % (node, msg))
 
 
 class LUCreateInstance(LogicalUnit):
@@ -4149,14 +4352,14 @@ class LUCreateInstance(LogicalUnit):
                                   ",".join(enabled_hvs)))
 
     # check hypervisor parameter syntax (locally)
-
+    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
     filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
                                   self.op.hvparams)
     hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
 
     # fill and remember the beparams dict
-    utils.CheckBEParams(self.op.beparams)
+    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
     self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
                                     self.op.beparams)
 
@@ -4313,23 +4516,24 @@ class LUCreateInstance(LogicalUnit):
 
     """
     env = {
-      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
-      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
-      "INSTANCE_ADD_MODE": self.op.mode,
+      "ADD_MODE": self.op.mode,
       }
     if self.op.mode == constants.INSTANCE_IMPORT:
-      env["INSTANCE_SRC_NODE"] = self.op.src_node
-      env["INSTANCE_SRC_PATH"] = self.op.src_path
-      env["INSTANCE_SRC_IMAGES"] = self.src_images
+      env["SRC_NODE"] = self.op.src_node
+      env["SRC_PATH"] = self.op.src_path
+      env["SRC_IMAGES"] = self.src_images
 
-    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
+    env.update(_BuildInstanceHookEnv(
+      name=self.op.instance_name,
       primary_node=self.op.pnode,
       secondary_nodes=self.secondaries,
-      status=self.instance_status,
+      status=self.op.start,
       os_type=self.op.os_type,
       memory=self.be_full[constants.BE_MEMORY],
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
+      disk_template=self.op.disk_template,
+      disks=[(d["size"], d["mode"]) for d in self.disks],
     ))
 
     nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
@@ -4346,7 +4550,6 @@ class LUCreateInstance(LogicalUnit):
       raise errors.OpPrereqError("Cluster does not support lvm-based"
                                  " instances")
 
-
     if self.op.mode == constants.INSTANCE_IMPORT:
       src_node = self.op.src_node
       src_path = self.op.src_path
@@ -4412,6 +4615,7 @@ class LUCreateInstance(LogicalUnit):
             nic_mac_ini = 'nic%d_mac' % idx
             nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
 
+    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
     # ip ping checks (we use the same ip that was resolved in ExpandNames)
     if self.op.start and not self.op.ip_check:
       raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
@@ -4422,6 +4626,18 @@ class LUCreateInstance(LogicalUnit):
         raise errors.OpPrereqError("IP %s of instance %s already in use" %
                                    (self.check_ip, self.op.instance_name))
 
+    #### mac address generation
+    # By generating here the mac address both the allocator and the hooks get
+    # the real final mac address rather than the 'auto' or 'generate' value.
+    # There is a race condition between the generation and the instance object
+    # creation, which means that we know the mac is valid now, but we're not
+    # sure it will be when we actually add the instance. If things go bad
+    # adding the instance will abort because of a duplicate mac, and the
+    # creation job will fail.
+    for nic in self.nics:
+      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+        nic.mac = self.cfg.GenerateMAC()
+
     #### allocator run
 
     if self.op.iallocator is not None:
@@ -4436,6 +4652,9 @@ class LUCreateInstance(LogicalUnit):
     if pnode.offline:
       raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
                                  pnode.name)
+    if pnode.drained:
+      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
+                                 pnode.name)
 
     self.secondaries = []
 
@@ -4447,8 +4666,9 @@ class LUCreateInstance(LogicalUnit):
       if self.op.snode == pnode.name:
         raise errors.OpPrereqError("The secondary node cannot be"
                                    " the primary node.")
-      self.secondaries.append(self.op.snode)
       _CheckNodeOnline(self, self.op.snode)
+      _CheckNodeNotDrained(self, self.op.snode)
+      self.secondaries.append(self.op.snode)
 
     nodenames = [pnode.name] + self.secondaries
 
@@ -4500,8 +4720,6 @@ class LUCreateInstance(LogicalUnit):
                            self.be_full[constants.BE_MEMORY],
                            self.op.hypervisor)
 
-    self.instance_status = self.op.start
-
   def Exec(self, feedback_fn):
     """Create and add the instance to the cluster.
 
@@ -4509,10 +4727,6 @@ class LUCreateInstance(LogicalUnit):
     instance = self.op.instance_name
     pnode_name = self.pnode.name
 
-    for nic in self.nics:
-      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        nic.mac = self.cfg.GenerateMAC()
-
     ht_kind = self.op.hypervisor
     if ht_kind in constants.HTS_REQ_PORT:
       network_port = self.cfg.AllocatePort()
@@ -4547,7 +4761,7 @@ class LUCreateInstance(LogicalUnit):
                             primary_node=pnode_name,
                             nics=self.nics, disks=disks,
                             disk_template=self.op.disk_template,
-                            admin_up=self.instance_status,
+                            admin_up=False,
                             network_port=network_port,
                             beparams=self.op.beparams,
                             hvparams=self.op.hvparams,
@@ -4606,7 +4820,7 @@ class LUCreateInstance(LogicalUnit):
     if iobj.disk_template != constants.DT_DISKLESS:
       if self.op.mode == constants.INSTANCE_CREATE:
         feedback_fn("* running the instance OS create scripts...")
-        result = self.rpc.call_instance_os_add(pnode_name, iobj)
+        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
         msg = result.RemoteFailMsg()
         if msg:
           raise errors.OpExecError("Could not add os for instance %s"
@@ -4633,9 +4847,11 @@ class LUCreateInstance(LogicalUnit):
                                      % self.op.mode)
 
     if self.op.start:
+      iobj.admin_up = True
+      self.cfg.Update(iobj)
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
-      result = self.rpc.call_instance_start(pnode_name, iobj, None)
+      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -4683,7 +4899,12 @@ class LUConnectConsole(NoHooksLU):
     logging.debug("Connecting to console of %s on %s", instance.name, node)
 
     hyper = hypervisor.GetHypervisor(instance.hypervisor)
-    console_cmd = hyper.GetShellCommandForConsole(instance)
+    cluster = self.cfg.GetClusterInfo()
+    # beparams and hvparams are passed separately, to avoid editing the
+    # instance and then saving the defaults in the instance itself.
+    hvparams = cluster.FillHV(instance)
+    beparams = cluster.FillBE(instance)
+    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
 
     # build ssh cmdline
     return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
@@ -4840,6 +5061,7 @@ class LUReplaceDisks(LogicalUnit):
       n1 = self.new_node = remote_node
       n2 = self.oth_node = instance.primary_node
       self.tgt_node = self.sec_node
+      _CheckNodeNotDrained(self, remote_node)
     else:
       raise errors.ProgrammerError("Unhandled disk replace mode")
 
@@ -4902,9 +5124,13 @@ class LUReplaceDisks(LogicalUnit):
       for node in tgt_node, oth_node:
         info("checking disk/%d on %s" % (idx, node))
         cfg.SetDiskID(dev, node)
-        if not self.rpc.call_blockdev_find(node, dev):
-          raise errors.OpExecError("Can't find disk/%d on node %s" %
-                                   (idx, node))
+        result = self.rpc.call_blockdev_find(node, dev)
+        msg = result.RemoteFailMsg()
+        if not msg and not result.payload:
+          msg = "disk not found"
+        if msg:
+          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+                                   (idx, node, msg))
 
     # Step: check other node consistency
     self.proc.LogStep(2, steps_total, "check peer consistency")
@@ -4967,8 +5193,9 @@ class LUReplaceDisks(LogicalUnit):
       # build the rename list based on what LVs exist on the node
       rlist = []
       for to_ren in old_lvs:
-        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
-        if not find_res.failed and find_res.data is not None: # device exists
+        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
+        if not result.RemoteFailMsg() and result.payload:
+          # device exists
           rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
 
       info("renaming the old LVs on the target node")
@@ -4997,10 +5224,10 @@ class LUReplaceDisks(LogicalUnit):
       result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
       if result.failed or not result.data:
         for new_lv in new_lvs:
-          result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
-          if result.failed or not result.data:
-            warning("Can't rollback device %s", hint="manually cleanup unused"
-                    " logical volumes")
+          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
+          if msg:
+            warning("Can't rollback device %s: %s", dev, msg,
+                    hint="cleanup manually the unused logical volumes")
         raise errors.OpExecError("Can't add local storage to drbd")
 
       dev.children = new_lvs
@@ -5018,7 +5245,13 @@ class LUReplaceDisks(LogicalUnit):
     for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
       cfg.SetDiskID(dev, instance.primary_node)
       result = self.rpc.call_blockdev_find(instance.primary_node, dev)
-      if result.failed or result.data[5]:
+      msg = result.RemoteFailMsg()
+      if not msg and not result.payload:
+        msg = "disk not found"
+      if msg:
+        raise errors.OpExecError("Can't find DRBD device %s: %s" %
+                                 (name, msg))
+      if result.payload[5]:
         raise errors.OpExecError("DRBD device %s is degraded!" % name)
 
     # Step: remove old storage
@@ -5027,9 +5260,10 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for %s" % name)
       for lv in old_lvs:
         cfg.SetDiskID(lv, tgt_node)
-        result = self.rpc.call_blockdev_remove(tgt_node, lv)
-        if result.failed or not result.data:
-          warning("Can't remove old LV", hint="manually remove unused LVs")
+        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
+        if msg:
+          warning("Can't remove old LV: %s" % msg,
+                  hint="manually remove unused LVs")
           continue
 
   def _ExecD8Secondary(self, feedback_fn):
@@ -5082,10 +5316,12 @@ class LUReplaceDisks(LogicalUnit):
       info("checking disk/%d on %s" % (idx, pri_node))
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Can't find disk/%d on node %s" %
-                                 (idx, pri_node))
+      msg = result.RemoteFailMsg()
+      if not msg and not result.payload:
+        msg = "disk not found"
+      if msg:
+        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
+                                 (idx, pri_node, msg))
 
     # Step: check other node consistency
     self.proc.LogStep(2, steps_total, "check peer consistency")
@@ -5140,7 +5376,7 @@ class LUReplaceDisks(LogicalUnit):
       try:
         _CreateSingleBlockDev(self, new_node, instance, new_drbd,
                               _GetInstanceInfoText(instance), False)
-      except errors.BlockDeviceError:
+      except errors.GenericError:
         self.cfg.ReleaseDRBDMinors(instance.name)
         raise
 
@@ -5148,9 +5384,10 @@ class LUReplaceDisks(LogicalUnit):
       # we have new devices, shutdown the drbd on the old secondary
       info("shutting down drbd for disk/%d on old node" % idx)
       cfg.SetDiskID(dev, old_node)
-      result = self.rpc.call_blockdev_shutdown(old_node, dev)
-      if result.failed or not result.data:
-        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
+      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
+      if msg:
+        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
+                (idx, msg),
                 hint="Please cleanup this device manually as soon as possible")
 
     info("detaching primary drbds from the network (=> standalone)")
@@ -5194,8 +5431,13 @@ class LUReplaceDisks(LogicalUnit):
     for idx, (dev, old_lvs, _) in iv_names.iteritems():
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      result.Raise()
-      if result.data[5]:
+      msg = result.RemoteFailMsg()
+      if not msg and not result.payload:
+        msg = "disk not found"
+      if msg:
+        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
+                                 (idx, msg))
+      if result.payload[5]:
         raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
 
     self.proc.LogStep(6, steps_total, "removing old storage")
@@ -5203,9 +5445,9 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for disk/%d" % idx)
       for lv in old_lvs:
         cfg.SetDiskID(lv, old_node)
-        result = self.rpc.call_blockdev_remove(old_node, lv)
-        if result.failed or not result.data:
-          warning("Can't remove LV on old secondary",
+        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
+        if msg:
+          warning("Can't remove LV on old secondary: %s", msg,
                   hint="Cleanup stale volumes by hand")
 
   def Exec(self, feedback_fn):
@@ -5316,13 +5558,10 @@ class LUGrowDisk(LogicalUnit):
     for node in instance.all_nodes:
       self.cfg.SetDiskID(disk, node)
       result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
-      result.Raise()
-      if (not result.data or not isinstance(result.data, (list, tuple)) or
-          len(result.data) != 2):
-        raise errors.OpExecError("Grow request failed to node %s" % node)
-      elif not result.data[0]:
+      msg = result.RemoteFailMsg()
+      if msg:
         raise errors.OpExecError("Grow request failed to node %s: %s" %
-                                 (node, result.data[1]))
+                                 (node, msg))
     disk.RecordGrow(self.op.amount)
     self.cfg.Update(instance)
     if self.op.wait_for_sync:
@@ -5386,8 +5625,14 @@ class LUQueryInstanceData(NoHooksLU):
     if not static:
       self.cfg.SetDiskID(dev, instance.primary_node)
       dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
-      dev_pstatus.Raise()
-      dev_pstatus = dev_pstatus.data
+      if dev_pstatus.offline:
+        dev_pstatus = None
+      else:
+        msg = dev_pstatus.RemoteFailMsg()
+        if msg:
+          raise errors.OpExecError("Can't compute disk status for %s: %s" %
+                                   (instance.name, msg))
+        dev_pstatus = dev_pstatus.payload
     else:
       dev_pstatus = None
 
@@ -5401,8 +5646,14 @@ class LUQueryInstanceData(NoHooksLU):
     if snode and not static:
       self.cfg.SetDiskID(dev, snode)
       dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
-      dev_sstatus.Raise()
-      dev_sstatus = dev_sstatus.data
+      if dev_sstatus.offline:
+        dev_sstatus = None
+      else:
+        msg = dev_sstatus.RemoteFailMsg()
+        if msg:
+          raise errors.OpExecError("Can't compute disk status for %s: %s" %
+                                   (instance.name, msg))
+        dev_sstatus = dev_sstatus.payload
     else:
       dev_sstatus = None
 
@@ -5497,8 +5748,6 @@ class LUSetInstanceParams(LogicalUnit):
             self.op.hvparams or self.op.beparams):
       raise errors.OpPrereqError("No changes submitted")
 
-    utils.CheckBEParams(self.op.beparams)
-
     # Disk validation
     disk_addremove = 0
     for disk_op, disk_dict in self.op.disks:
@@ -5548,24 +5797,29 @@ class LUSetInstanceParams(LogicalUnit):
       # nic_dict should be a dict
       nic_ip = nic_dict.get('ip', None)
       if nic_ip is not None:
-        if nic_ip.lower() == "none":
+        if nic_ip.lower() == constants.VALUE_NONE:
           nic_dict['ip'] = None
         else:
           if not utils.IsValidIP(nic_ip):
             raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
-      # we can only check None bridges and assign the default one
-      nic_bridge = nic_dict.get('bridge', None)
-      if nic_bridge is None:
-        nic_dict['bridge'] = self.cfg.GetDefBridge()
-      # but we can validate MACs
-      nic_mac = nic_dict.get('mac', None)
-      if nic_mac is not None:
-        if self.cfg.IsMacInUse(nic_mac):
-          raise errors.OpPrereqError("MAC address %s already in use"
-                                     " in cluster" % nic_mac)
+
+      if nic_op == constants.DDM_ADD:
+        nic_bridge = nic_dict.get('bridge', None)
+        if nic_bridge is None:
+          nic_dict['bridge'] = self.cfg.GetDefBridge()
+        nic_mac = nic_dict.get('mac', None)
+        if nic_mac is None:
+          nic_dict['mac'] = constants.VALUE_AUTO
+
+      if 'mac' in nic_dict:
+        nic_mac = nic_dict['mac']
         if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
           if not utils.IsValidMac(nic_mac):
             raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
+        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
+          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
+                                     " modifying an existing nic")
+
     if nic_addremove > 1:
       raise errors.OpPrereqError("Only one NIC add or remove operation"
                                  " supported at a time")
@@ -5590,7 +5844,37 @@ class LUSetInstanceParams(LogicalUnit):
       args['memory'] = self.be_new[constants.BE_MEMORY]
     if constants.BE_VCPUS in self.be_new:
       args['vcpus'] = self.be_new[constants.BE_VCPUS]
-    # FIXME: readd disk/nic changes
+    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
+    # information at all.
+    if self.op.nics:
+      args['nics'] = []
+      nic_override = dict(self.op.nics)
+      for idx, nic in enumerate(self.instance.nics):
+        if idx in nic_override:
+          this_nic_override = nic_override[idx]
+        else:
+          this_nic_override = {}
+        if 'ip' in this_nic_override:
+          ip = this_nic_override['ip']
+        else:
+          ip = nic.ip
+        if 'bridge' in this_nic_override:
+          bridge = this_nic_override['bridge']
+        else:
+          bridge = nic.bridge
+        if 'mac' in this_nic_override:
+          mac = this_nic_override['mac']
+        else:
+          mac = nic.mac
+        args['nics'].append((ip, bridge, mac))
+      if constants.DDM_ADD in nic_override:
+        ip = nic_override[constants.DDM_ADD].get('ip', None)
+        bridge = nic_override[constants.DDM_ADD]['bridge']
+        mac = nic_override[constants.DDM_ADD]['mac']
+        args['nics'].append((ip, bridge, mac))
+      elif constants.DDM_REMOVE in nic_override:
+        del args['nics'][-1]
+
     env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return env, nl, nl
@@ -5620,11 +5904,10 @@ class LUSetInstanceParams(LogicalUnit):
             del i_hvdict[key]
           except KeyError:
             pass
-        elif val == constants.VALUE_NONE:
-          i_hvdict[key] = None
         else:
           i_hvdict[key] = val
       cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
       hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
                                 i_hvdict)
       # local check
@@ -5648,6 +5931,7 @@ class LUSetInstanceParams(LogicalUnit):
         else:
           i_bedict[key] = val
       cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
       be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
                                 i_bedict)
       self.be_new = be_new # the new actual values
@@ -5671,7 +5955,7 @@ class LUSetInstanceParams(LogicalUnit):
         self.warn.append("Can't get info from primary node %s" % pnode)
       else:
         if not instance_info.failed and instance_info.data:
-          current_mem = instance_info.data['memory']
+          current_mem = int(instance_info.data['memory'])
         else:
           # Assume instance not running
           # (there is a slight race condition here, but it's not very probable,
@@ -5706,8 +5990,10 @@ class LUSetInstanceParams(LogicalUnit):
           raise errors.OpPrereqError("Invalid NIC index %s, valid values"
                                      " are 0 to %d" %
                                      (nic_op, len(instance.nics)))
-      nic_bridge = nic_dict.get('bridge', None)
-      if nic_bridge is not None:
+      if 'bridge' in nic_dict:
+        nic_bridge = nic_dict['bridge']
+        if nic_bridge is None:
+          raise errors.OpPrereqError('Cannot set the nic bridge to None')
         if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
           msg = ("Bridge '%s' doesn't exist on one of"
                  " the instance nodes" % nic_bridge)
@@ -5715,6 +6001,18 @@ class LUSetInstanceParams(LogicalUnit):
             self.warn.append(msg)
           else:
             raise errors.OpPrereqError(msg)
+      if 'mac' in nic_dict:
+        nic_mac = nic_dict['mac']
+        if nic_mac is None:
+          raise errors.OpPrereqError('Cannot set the nic mac to None')
+        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+          # otherwise generate the mac
+          nic_dict['mac'] = self.cfg.GenerateMAC()
+        else:
+          # or validate/reserve the current one
+          if self.cfg.IsMacInUse(nic_mac):
+            raise errors.OpPrereqError("MAC address %s already in use"
+                                       " in cluster" % nic_mac)
 
     # DISK processing
     if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
@@ -5767,10 +6065,10 @@ class LUSetInstanceParams(LogicalUnit):
         device_idx = len(instance.disks)
         for node, disk in device.ComputeNodeTree(instance.primary_node):
           self.cfg.SetDiskID(disk, node)
-          rpc_result = self.rpc.call_blockdev_remove(node, disk)
-          if rpc_result.failed or not rpc_result.data:
-            self.proc.LogWarning("Could not remove disk/%d on node %s,"
-                                 " continuing anyway", device_idx, node)
+          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+          if msg:
+            self.LogWarning("Could not remove disk/%d on node %s: %s,"
+                            " continuing anyway", device_idx, node, msg)
         result.append(("disk/%d" % device_idx, "remove"))
       elif disk_op == constants.DDM_ADD:
         # add a new disk
@@ -5817,15 +6115,11 @@ class LUSetInstanceParams(LogicalUnit):
         del instance.nics[-1]
         result.append(("nic.%d" % len(instance.nics), "remove"))
       elif nic_op == constants.DDM_ADD:
-        # add a new nic
-        if 'mac' not in nic_dict:
-          mac = constants.VALUE_GENERATE
-        else:
-          mac = nic_dict['mac']
-        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-          mac = self.cfg.GenerateMAC()
+        # mac and bridge should be set, by now
+        mac = nic_dict['mac']
+        bridge = nic_dict['bridge']
         new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
-                              bridge=nic_dict.get('bridge', None))
+                              bridge=bridge)
         instance.nics.append(new_nic)
         result.append(("nic.%d" % (len(instance.nics) - 1),
                        "add:mac=%s,ip=%s,bridge=%s" %
@@ -5839,7 +6133,7 @@ class LUSetInstanceParams(LogicalUnit):
 
     # hvparams changes
     if self.op.hvparams:
-      instance.hvparams = self.hv_new
+      instance.hvparams = self.hv_inst
       for key, val in self.op.hvparams.iteritems():
         result.append(("hv/%s" % key, val))
 
@@ -5955,6 +6249,7 @@ class LUExportInstance(LogicalUnit):
       # This is wrong node name, not a non-locked node
       raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
     _CheckNodeOnline(self, self.dst_node.name)
+    _CheckNodeNotDrained(self, self.dst_node.name)
 
     # instance disk type verification
     for disk in self.instance.disks:
@@ -5972,10 +6267,11 @@ class LUExportInstance(LogicalUnit):
     if self.op.shutdown:
       # shutdown the instance, but not the disks
       result = self.rpc.call_instance_shutdown(src_node, instance)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                                 (instance.name, src_node))
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Could not shutdown instance %s on"
+                                 " node %s: %s" %
+                                 (instance.name, src_node, msg))
 
     vgname = self.cfg.GetVGName()
 
@@ -6003,7 +6299,7 @@ class LUExportInstance(LogicalUnit):
 
     finally:
       if self.op.shutdown and instance.admin_up:
-        result = self.rpc.call_instance_start(src_node, instance, None)
+        result = self.rpc.call_instance_start(src_node, instance, None, None)
         msg = result.RemoteFailMsg()
         if msg:
           _ShutdownInstanceDisks(self, instance)
@@ -6020,10 +6316,10 @@ class LUExportInstance(LogicalUnit):
           self.LogWarning("Could not export block device %s from node %s to"
                           " node %s", dev.logical_id[1], src_node,
                           dst_node.name)
-        result = self.rpc.call_blockdev_remove(src_node, dev)
-        if result.failed or not result.data:
+        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+        if msg:
           self.LogWarning("Could not remove snapshot block device %s from node"
-                          " %s", dev.logical_id[1], src_node)
+                          " %s: %s", dev.logical_id[1], src_node, msg)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
     if result.failed or not result.data:
@@ -6375,7 +6671,7 @@ class IAllocator(object):
     cluster_info = cfg.GetClusterInfo()
     # cluster data
     data = {
-      "version": 1,
+      "version": constants.IALLOCATOR_VERSION,
       "cluster_name": cfg.GetClusterName(),
       "cluster_tags": list(cluster_info.GetTags()),
       "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
@@ -6405,6 +6701,7 @@ class IAllocator(object):
         "primary_ip": ninfo.primary_ip,
         "secondary_ip": ninfo.secondary_ip,
         "offline": ninfo.offline,
+        "drained": ninfo.drained,
         "master_candidate": ninfo.master_candidate,
         }
 
@@ -6471,6 +6768,8 @@ class IAllocator(object):
         "disk_template": iinfo.disk_template,
         "hypervisor": iinfo.hypervisor,
         }
+      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
+                                                 pir["disks"])
       instance_data[iinfo.name] = pir
 
     data["instances"] = instance_data