LUCreateInstance: the node list as return value
[ganeti-local] / lib / cmdlib.py
index 2d51ad2..03660c1 100644 (file)
 import os
 import os.path
 import time
-import tempfile
 import re
 import platform
 import logging
 import copy
-import random
 
 from ganeti import ssh
 from ganeti import utils
@@ -40,7 +38,6 @@ from ganeti import hypervisor
 from ganeti import locking
 from ganeti import constants
 from ganeti import objects
-from ganeti import opcodes
 from ganeti import serializer
 from ganeti import ssconf
 
@@ -59,6 +56,9 @@ class LogicalUnit(object):
 
   Note that all commands require root permissions.
 
+  @ivar dry_run_result: the value (if any) that will be returned to the caller
+      in dry-run mode (signalled by opcode dry_run parameter)
+
   """
   HPATH = None
   HTYPE = None
@@ -89,6 +89,8 @@ class LogicalUnit(object):
     # logging
     self.LogWarning = processor.LogWarning
     self.LogInfo = processor.LogInfo
+    # support for dry-run
+    self.dry_run_result = None
 
     for attr_name in self._OP_REQP:
       attr_val = getattr(op, attr_name, None)
@@ -453,7 +455,8 @@ def _CheckNodeNotDrained(lu, node):
 
 
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                          memory, vcpus, nics, disk_template, disks):
+                          memory, vcpus, nics, disk_template, disks,
+                          bep, hvp, hypervisor):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -473,12 +476,18 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @type vcpus: string
   @param vcpus: the count of VCPUs the instance has
   @type nics: list
-  @param nics: list of tuples (ip, bridge, mac) representing
-      the NICs the instance  has
+  @param nics: list of tuples (ip, mac, mode, link) representing
+      the NICs the instance has
   @type disk_template: string
   @param disk_template: the distk template of the instance
   @type disks: list
   @param disks: the list of (size, mode) pairs
+  @type bep: dict
+  @param bep: the backend parameters for the instance
+  @type hvp: dict
+  @param hvp: the hypervisor parameters for the instance
+  @type hypervisor: string
+  @param hypervisor: the hypervisor for the instance
   @rtype: dict
   @return: the hook environment for this instance
 
@@ -497,6 +506,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_MEMORY": memory,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
+    "INSTANCE_HYPERVISOR": hypervisor,
   }
 
   if nics:
@@ -525,12 +535,17 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   env["INSTANCE_DISK_COUNT"] = disk_count
 
+  for source, kind in [(bep, "BE"), (hvp, "HV")]:
+    for key, value in source.items():
+      env["INSTANCE_%s_%s" % (kind, key)] = value
+
   return env
 
-def _PreBuildNICHooksList(lu, nics):
+def _NICListToTuple(lu, nics):
   """Build a list of nic information tuples.
 
-  This list is suitable to be passed to _BuildInstanceHookEnv.
+  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
+  value in LUQueryInstanceData.
 
   @type lu:  L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -564,7 +579,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   @return: the hook environment dictionary
 
   """
-  bep = lu.cfg.GetClusterInfo().FillBE(instance)
+  cluster = lu.cfg.GetClusterInfo()
+  bep = cluster.FillBE(instance)
+  hvp = cluster.FillHV(instance)
   args = {
     'name': instance.name,
     'primary_node': instance.primary_node,
@@ -573,9 +590,12 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     'status': instance.admin_up,
     'memory': bep[constants.BE_MEMORY],
     'vcpus': bep[constants.BE_VCPUS],
-    'nics': _PreBuildNICHooksList(lu, instance.nics),
+    'nics': _NICListToTuple(lu, instance.nics),
     'disk_template': instance.disk_template,
     'disks': [(disk.size, disk.mode) for disk in instance.disks],
+    'bep': bep,
+    'hvp': hvp,
+    'hypervisor': instance.hypervisor,
   }
   if override:
     args.update(override)
@@ -610,10 +630,8 @@ def _CheckNicsBridgesExist(lu, target_nics, target_node,
             if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
   if brlist:
     result = lu.rpc.call_bridges_exist(target_node, brlist)
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking bridges on destination node"
-                                 " '%s': %s" % (target_node, msg))
+    result.Raise("Error checking bridges on destination node '%s'" %
+                 target_node, prereq=True)
 
 
 def _CheckInstanceBridgesExist(lu, instance, node=None):
@@ -621,7 +639,7 @@ def _CheckInstanceBridgesExist(lu, instance, node=None):
 
   """
   if node is None:
-    node=instance.primary_node
+    node = instance.primary_node
   _CheckNicsBridgesExist(lu, instance.nics, node)
 
 
@@ -656,9 +674,7 @@ class LUDestroyCluster(NoHooksLU):
     """
     master = self.cfg.GetMasterNode()
     result = self.rpc.call_node_stop_master(master, False)
-    result.Raise()
-    if not result.data:
-      raise errors.OpExecError("Could not disable the master role")
+    result.Raise("Could not disable the master role")
     priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
     utils.CreateBackup(priv_key)
     utils.CreateBackup(pub_key)
@@ -1022,7 +1038,6 @@ class LUVerifyCluster(LogicalUnit):
 
     for node_i in nodeinfo:
       node = node_i.name
-      nresult = all_nvinfo[node].data
 
       if node_i.offline:
         feedback_fn("* Skipping offline node %s" % (node,))
@@ -1040,11 +1055,13 @@ class LUVerifyCluster(LogicalUnit):
         ntype = "regular"
       feedback_fn("* Verifying node %s (%s)" % (node, ntype))
 
-      if all_nvinfo[node].failed or not isinstance(nresult, dict):
-        feedback_fn("  - ERROR: connection to %s failed" % (node,))
+      msg = all_nvinfo[node].fail_msg
+      if msg:
+        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
         bad = True
         continue
 
+      nresult = all_nvinfo[node].payload
       node_drbd = {}
       for minor, instance in all_drbd_map[node].items():
         if instance not in instanceinfo:
@@ -1241,14 +1258,16 @@ class LUVerifyCluster(LogicalUnit):
         for node_name in hooks_results:
           show_node_header = True
           res = hooks_results[node_name]
-          if res.failed or res.data is False or not isinstance(res.data, list):
+          msg = res.fail_msg
+          if msg:
             if res.offline:
               # no need to warn or set fail return value
               continue
-            feedback_fn("    Communication failure in hooks execution")
+            feedback_fn("    Communication failure in hooks execution: %s" %
+                        msg)
             lu_result = 1
             continue
-          for script, hkr, output in res.data:
+          for script, hkr, output in res.payload:
             if hkr == constants.HKR_FAIL:
               # The node header is only shown once, if there are
               # failing hooks on that node
@@ -1324,7 +1343,7 @@ class LUVerifyDisks(NoHooksLU):
       node_res = node_lvs[node]
       if node_res.offline:
         continue
-      msg = node_res.RemoteFailMsg()
+      msg = node_res.fail_msg
       if msg:
         logging.warning("Error enumerating LVs on node %s: %s", node, msg)
         res_nodes[node] = msg
@@ -1397,8 +1416,7 @@ class LURenameCluster(LogicalUnit):
     # shutdown the master IP
     master = self.cfg.GetMasterNode()
     result = self.rpc.call_node_stop_master(master, False)
-    if result.failed or not result.data:
-      raise errors.OpExecError("Could not disable the master role")
+    result.Raise("Could not disable the master role")
 
     try:
       cluster = self.cfg.GetClusterInfo()
@@ -1416,17 +1434,18 @@ class LURenameCluster(LogicalUnit):
       result = self.rpc.call_upload_file(node_list,
                                          constants.SSH_KNOWN_HOSTS_FILE)
       for to_node, to_result in result.iteritems():
-         msg = to_result.RemoteFailMsg()
-         if msg:
-           msg = ("Copy of file %s to node %s failed: %s" %
-                   (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
-           self.proc.LogWarning(msg)
+        msg = to_result.fail_msg
+        if msg:
+          msg = ("Copy of file %s to node %s failed: %s" %
+                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
+          self.proc.LogWarning(msg)
 
     finally:
       result = self.rpc.call_node_start_master(master, False)
-      if result.failed or not result.data:
+      msg = result.fail_msg
+      if msg:
         self.LogWarning("Could not re-enable the master role on"
-                        " the master, please restart manually.")
+                        " the master, please restart manually: %s", msg)
 
 
 def _RecursiveCheckIfLVMBased(disk):
@@ -1509,7 +1528,7 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.vg_name:
       vglist = self.rpc.call_vg_list(node_list)
       for node in node_list:
-        msg = vglist[node].RemoteFailMsg()
+        msg = vglist[node].fail_msg
         if msg:
           # ignoring down node
           self.LogWarning("Error while gathering data on node %s"
@@ -1631,11 +1650,11 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None):
     if os.path.exists(fname):
       result = lu.rpc.call_upload_file(dist_nodes, fname)
       for to_node, to_result in result.items():
-         msg = to_result.RemoteFailMsg()
-         if msg:
-           msg = ("Copy of file %s to node %s failed: %s" %
-                   (fname, to_node, msg))
-           lu.proc.LogWarning(msg)
+        msg = to_result.fail_msg
+        if msg:
+          msg = ("Copy of file %s to node %s failed: %s" %
+                 (fname, to_node, msg))
+          lu.proc.LogWarning(msg)
 
 
 class LURedistributeConfig(NoHooksLU):
@@ -1682,12 +1701,13 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
     lu.cfg.SetDiskID(dev, node)
 
   retries = 0
+  degr_retries = 10 # in seconds, as we sleep 1 second each time
   while True:
     max_time = 0
     done = True
     cumul_degraded = False
     rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
-    msg = rstats.RemoteFailMsg()
+    msg = rstats.fail_msg
     if msg:
       lu.LogWarning("Can't get any data from node %s: %s", node, msg)
       retries += 1
@@ -1715,6 +1735,16 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
           rem_time = "no time estimate"
         lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
                         (instance.disks[i].iv_name, perc_done, rem_time))
+
+    # if we're done but degraded, let's do a few small retries, to
+    # make sure we see a stable and not transient situation; therefore
+    # we force restart of the loop
+    if (done or oneshot) and cumul_degraded and degr_retries > 0:
+      logging.info("Degraded disks found, %d retries left", degr_retries)
+      degr_retries -= 1
+      time.sleep(1)
+      continue
+
     if done or oneshot:
       break
 
@@ -1742,7 +1772,7 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
   result = True
   if on_primary or dev.AssembleOnSecondary():
     rstats = lu.rpc.call_blockdev_find(node, dev)
-    msg = rstats.RemoteFailMsg()
+    msg = rstats.fail_msg
     if msg:
       lu.LogWarning("Can't find disk on node %s: %s", node, msg)
       result = False
@@ -1796,10 +1826,11 @@ class LUDiagnoseOS(NoHooksLU):
 
     @rtype: dict
     @return: a dictionary with osnames as keys and as value another map, with
-        nodes as keys and list of OS objects as values, eg::
+        nodes as keys and tuples of (path, status, diagnose) as values, eg::
 
-          {"debian-etch": {"node1": [<object>,...],
-                           "node2": [<object>,]}
+          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
+                                     (/srv/..., False, "invalid api")],
+                           "node2": [(/srv/..., True, "")]}
           }
 
     """
@@ -1808,18 +1839,18 @@ class LUDiagnoseOS(NoHooksLU):
     # level), so that nodes with a non-responding node daemon don't
     # make all OSes invalid
     good_nodes = [node_name for node_name in rlist
-                  if not rlist[node_name].failed]
-    for node_name, nr in rlist.iteritems():
-      if nr.failed or not nr.data:
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
         continue
-      for os_obj in nr.data:
-        if os_obj.name not in all_os:
+      for name, path, status, diagnose in nr.payload:
+        if name not in all_os:
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
-          all_os[os_obj.name] = {}
+          all_os[name] = {}
           for nname in good_nodes:
-            all_os[os_obj.name][nname] = []
-        all_os[os_obj.name][node_name].append(os_obj)
+            all_os[name][nname] = []
+        all_os[name][node_name].append((path, status, diagnose))
     return all_os
 
   def Exec(self, feedback_fn):
@@ -1828,21 +1859,20 @@ class LUDiagnoseOS(NoHooksLU):
     """
     valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
     node_data = self.rpc.call_os_diagnose(valid_nodes)
-    if node_data == False:
-      raise errors.OpExecError("Can't gather the list of OSes")
     pol = self._DiagnoseByOS(valid_nodes, node_data)
     output = []
-    for os_name, os_data in pol.iteritems():
+    for os_name, os_data in pol.items():
       row = []
       for field in self.op.output_fields:
         if field == "name":
           val = os_name
         elif field == "valid":
-          val = utils.all([osl and osl[0] for osl in os_data.values()])
+          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
         elif field == "node_status":
+          # this is just a copy of the dict
           val = {}
-          for node_name, nos_list in os_data.iteritems():
-            val[node_name] = [(v.status, v.path) for v in nos_list]
+          for node_name, nos_list in os_data.items():
+            val[node_name] = nos_list
         else:
           raise errors.ParameterError(field)
         row.append(val)
@@ -1914,7 +1944,11 @@ class LURemoveNode(LogicalUnit):
 
     self.context.RemoveNode(node.name)
 
-    self.rpc.call_node_leave_cluster(node.name)
+    result = self.rpc.call_node_leave_cluster(node.name)
+    msg = result.fail_msg
+    if msg:
+      self.LogWarning("Errors encountered on the remote node while leaving"
+                      " the cluster: %s", msg)
 
     # Promote nodes to master candidate as needed
     _AdjustCandidatePool(self)
@@ -1999,8 +2033,8 @@ class LUQueryNodes(NoHooksLU):
                                           self.cfg.GetHypervisorType())
       for name in nodenames:
         nodeinfo = node_data[name]
-        if not nodeinfo.failed and nodeinfo.data:
-          nodeinfo = nodeinfo.data
+        if not nodeinfo.fail_msg and nodeinfo.payload:
+          nodeinfo = nodeinfo.payload
           fn = utils.TryConvert
           live_data[name] = {
             "mtotal": fn(int, nodeinfo.get('memory_total', None)),
@@ -2122,10 +2156,15 @@ class LUQueryNodeVolumes(NoHooksLU):
 
     output = []
     for node in nodenames:
-      if node not in volumes or volumes[node].failed or not volumes[node].data:
+      nresult = volumes[node]
+      if nresult.offline:
+        continue
+      msg = nresult.fail_msg
+      if msg:
+        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
         continue
 
-      node_vols = volumes[node].data[:]
+      node_vols = nresult.payload[:]
       node_vols.sort(key=lambda vol: vol['dev'])
 
       for vol in node_vols:
@@ -2275,17 +2314,14 @@ class LUAddNode(LogicalUnit):
 
     # check connectivity
     result = self.rpc.call_version([node])[node]
-    result.Raise()
-    if result.data:
-      if constants.PROTOCOL_VERSION == result.data:
-        logging.info("Communication to node %s fine, sw version %s match",
-                     node, result.data)
-      else:
-        raise errors.OpExecError("Version mismatch master version %s,"
-                                 " node version %s" %
-                                 (constants.PROTOCOL_VERSION, result.data))
+    result.Raise("Can't get version information from node %s" % node)
+    if constants.PROTOCOL_VERSION == result.payload:
+      logging.info("Communication to node %s fine, sw version %s match",
+                   node, result.payload)
     else:
-      raise errors.OpExecError("Cannot get version from the new node")
+      raise errors.OpExecError("Version mismatch master version %s,"
+                               " node version %s" %
+                               (constants.PROTOCOL_VERSION, result.payload))
 
     # setup ssh on node
     logging.info("Copy ssh key to node %s", node)
@@ -2305,11 +2341,7 @@ class LUAddNode(LogicalUnit):
     result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
                                     keyarray[2],
                                     keyarray[3], keyarray[4], keyarray[5])
-
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Cannot transfer ssh keys to the"
-                               " new node: %s" % msg)
+    result.Raise("Cannot transfer ssh keys to the new node")
 
     # Add node to our /etc/hosts, and add key to known_hosts
     if self.cfg.GetClusterInfo().modify_etc_hosts:
@@ -2318,10 +2350,8 @@ class LUAddNode(LogicalUnit):
     if new_node.secondary_ip != new_node.primary_ip:
       result = self.rpc.call_node_has_ip_address(new_node.name,
                                                  new_node.secondary_ip)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("Failure checking secondary ip"
-                                   " on node %s: %s" % (new_node.name, msg))
+      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
+                   prereq=True)
       if not result.payload:
         raise errors.OpExecError("Node claims it doesn't have the secondary ip"
                                  " you gave (%s). Please fix and re-run this"
@@ -2336,13 +2366,12 @@ class LUAddNode(LogicalUnit):
     result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
                                        self.cfg.GetClusterName())
     for verifier in node_verify_list:
-      if result[verifier].failed or not result[verifier].data:
-        raise errors.OpExecError("Cannot communicate with %s's node daemon"
-                                 " for remote verification" % verifier)
-      if result[verifier].data['nodelist']:
-        for failed in result[verifier].data['nodelist']:
+      result[verifier].Raise("Cannot communicate with node %s" % verifier)
+      nl_payload = result[verifier].payload['nodelist']
+      if nl_payload:
+        for failed in nl_payload:
           feedback_fn("ssh/hostname verification failed %s -> %s" %
-                      (verifier, result[verifier].data['nodelist'][failed]))
+                      (verifier, nl_payload[failed]))
         raise errors.OpExecError("ssh/hostname verification failed.")
 
     if self.op.readd:
@@ -2455,7 +2484,7 @@ class LUSetNodeParams(LogicalUnit):
       result.append(("master_candidate", str(self.op.master_candidate)))
       if self.op.master_candidate == False:
         rrc = self.rpc.call_node_demote_from_mc(node.name)
-        msg = rrc.RemoteFailMsg()
+        msg = rrc.fail_msg
         if msg:
           self.LogWarning("Node failed to demote itself: %s" % msg)
 
@@ -2519,9 +2548,7 @@ class LUPowercycleNode(NoHooksLU):
     """
     result = self.rpc.call_node_powercycle(self.op.node_name,
                                            self.cfg.GetHypervisorType())
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
+    result.Raise("Failed to schedule the reboot")
     return result.payload
 
 
@@ -2557,8 +2584,8 @@ class LUQueryClusterInfo(NoHooksLU):
       "master": cluster.master_node,
       "default_hypervisor": cluster.default_hypervisor,
       "enabled_hypervisors": cluster.enabled_hypervisors,
-      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
-                        for hypervisor in cluster.enabled_hypervisors]),
+      "hvparams": dict([(hvname, cluster.hvparams[hvname])
+                        for hvname in cluster.enabled_hypervisors]),
       "beparams": cluster.beparams,
       "nicparams": cluster.nicparams,
       "candidate_pool_size": cluster.candidate_pool_size,
@@ -2682,7 +2709,7 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
                            " (is_primary=False, pass=1): %s",
@@ -2699,7 +2726,7 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
         continue
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
                            " (is_primary=True, pass=2): %s",
@@ -2774,11 +2801,8 @@ def _SafeShutdownInstanceDisks(lu, instance):
 
   """
   pnode = instance.primary_node
-  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])
-  ins_l = ins_l[pnode]
-  msg = ins_l.RemoteFailMsg()
-  if msg:
-    raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg))
+  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
+  ins_l.Raise("Can't contact node %s" % pnode)
 
   if instance.name in ins_l.payload:
     raise errors.OpExecError("Instance is running, can't shutdown"
@@ -2801,7 +2825,7 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
     for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(top_disk, node)
       result = lu.rpc.call_blockdev_shutdown(node, top_disk)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.LogWarning("Could not shutdown block device %s on node %s: %s",
                       disk.iv_name, node, msg)
@@ -2833,15 +2857,15 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
 
   """
   nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
-  nodeinfo[node].Raise()
-  free_mem = nodeinfo[node].data.get('memory_free')
+  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
+  free_mem = nodeinfo[node].payload.get('memory_free', None)
   if not isinstance(free_mem, int):
     raise errors.OpPrereqError("Can't compute free memory on node %s, result"
-                             " was '%s'" % (node, free_mem))
+                               " was '%s'" % (node, free_mem))
   if requested > free_mem:
     raise errors.OpPrereqError("Not enough memory on node %s for %s:"
-                             " needed %s MiB, available %s MiB" %
-                             (node, reason, requested, free_mem))
+                               " needed %s MiB, available %s MiB" %
+                               (node, reason, requested, free_mem))
 
 
 class LUStartupInstance(LogicalUnit):
@@ -2916,10 +2940,8 @@ class LUStartupInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if not remote_info.payload: # not running already
       _CheckNodeFreeMemory(self, instance.primary_node,
                            "starting instance %s" % instance.name,
@@ -2940,7 +2962,7 @@ class LUStartupInstance(LogicalUnit):
 
     result = self.rpc.call_instance_start(node_current, instance,
                                           self.hvparams, self.beparams)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       _ShutdownInstanceDisks(self, instance)
       raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -3010,19 +3032,14 @@ class LURebootInstance(LogicalUnit):
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
                                              reboot_type)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not reboot instance: %s" % msg)
+      result.Raise("Could not reboot instance")
     else:
       result = self.rpc.call_instance_shutdown(node_current, instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not shutdown instance for"
-                                 " full reboot: %s" % msg)
+      result.Raise("Could not shutdown instance for full reboot")
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current, instance, None, None)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance for"
@@ -3072,7 +3089,7 @@ class LUShutdownInstance(LogicalUnit):
     node_current = instance.primary_node
     self.cfg.MarkInstanceDown(instance.name)
     result = self.rpc.call_instance_shutdown(node_current, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       self.proc.LogWarning("Could not shutdown instance: %s" % msg)
 
@@ -3121,10 +3138,8 @@ class LUReinstallInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -3139,10 +3154,8 @@ class LUReinstallInstance(LogicalUnit):
         raise errors.OpPrereqError("Primary node '%s' is unknown" %
                                    self.op.pnode)
       result = self.rpc.call_os_get(pnode.name, self.op.os_type)
-      result.Raise()
-      if not isinstance(result.data, objects.OS):
-        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
-                                   " primary node"  % self.op.os_type)
+      result.Raise("OS '%s' not in supported OS list for primary node %s" %
+                   (self.op.os_type, pnode.name), prereq=True)
 
     self.instance = instance
 
@@ -3161,11 +3174,8 @@ class LUReinstallInstance(LogicalUnit):
     try:
       feedback_fn("Running the instance OS create scripts...")
       result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not install OS for instance %s"
-                                 " on node %s: %s" %
-                                 (inst.name, inst.primary_node, msg))
+      result.Raise("Could not install OS for instance %s on node %s" %
+                   (inst.name, inst.primary_node))
     finally:
       _ShutdownInstanceDisks(self, inst)
 
@@ -3208,10 +3218,8 @@ class LURenameInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -3256,25 +3264,16 @@ class LURenameInstance(LogicalUnit):
       result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
                                                      old_file_storage_dir,
                                                      new_file_storage_dir)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Could not connect to node '%s' to rename"
-                                 " directory '%s' to '%s' (but the instance"
-                                 " has been renamed in Ganeti)" % (
-                                 inst.primary_node, old_file_storage_dir,
-                                 new_file_storage_dir))
-
-      if not result.data[0]:
-        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
-                                 " (but the instance has been renamed in"
-                                 " Ganeti)" % (old_file_storage_dir,
-                                               new_file_storage_dir))
+      result.Raise("Could not rename on node %s directory '%s' to '%s'"
+                   " (but the instance has been renamed in Ganeti)" %
+                   (inst.primary_node, old_file_storage_dir,
+                    new_file_storage_dir))
 
     _StartInstanceDisks(self, inst, None)
     try:
       result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
                                                  old_name)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         msg = ("Could not run OS rename script for instance %s on node %s"
                " (but the instance has been renamed in Ganeti): %s" %
@@ -3331,7 +3330,7 @@ class LURemoveInstance(LogicalUnit):
                  instance.name, instance.primary_node)
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       if self.op.ignore_failures:
         feedback_fn("Warning: can't shutdown instance: %s" % msg)
@@ -3363,12 +3362,14 @@ class LUQueryInstances(NoHooksLU):
   _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
                                     "admin_state",
                                     "disk_template", "ip", "mac", "bridge",
+                                    "nic_mode", "nic_link",
                                     "sda_size", "sdb_size", "vcpus", "tags",
                                     "network_port", "beparams",
                                     r"(disk)\.(size)/([0-9]+)",
                                     r"(disk)\.(sizes)", "disk_usage",
-                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
-                                    r"(nic)\.(macs|ips|bridges)",
+                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
+                                    r"(nic)\.(bridge)/([0-9]+)",
+                                    r"(nic)\.(macs|ips|modes|links|bridges)",
                                     r"(disk|nic)\.(count)",
                                     "serial_no", "hypervisor", "hvparams",] +
                                   ["hv/%s" % name
@@ -3450,7 +3451,7 @@ class LUQueryInstances(NoHooksLU):
         if result.offline:
           # offline nodes will be in both lists
           off_nodes.append(name)
-        if result.failed or result.RemoteFailMsg():
+        if result.failed or result.fail_msg:
           bad_nodes.append(name)
         else:
           if result.payload:
@@ -3464,10 +3465,13 @@ class LUQueryInstances(NoHooksLU):
     HVPREFIX = "hv/"
     BEPREFIX = "be/"
     output = []
+    cluster = self.cfg.GetClusterInfo()
     for instance in instance_list:
       iout = []
-      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
-      i_be = self.cfg.GetClusterInfo().FillBE(instance)
+      i_hv = cluster.FillHV(instance)
+      i_be = cluster.FillBE(instance)
+      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
+                                 nic.nicparams) for nic in instance.nics]
       for field in self.op.output_fields:
         st_match = self._FIELDS_STATIC.Matches(field)
         if field == "name":
@@ -3512,11 +3516,31 @@ class LUQueryInstances(NoHooksLU):
         elif field == "disk_template":
           val = instance.disk_template
         elif field == "ip":
-          val = instance.nics[0].ip
+          if instance.nics:
+            val = instance.nics[0].ip
+          else:
+            val = None
+        elif field == "nic_mode":
+          if instance.nics:
+            val = i_nicp[0][constants.NIC_MODE]
+          else:
+            val = None
+        elif field == "nic_link":
+          if instance.nics:
+            val = i_nicp[0][constants.NIC_LINK]
+          else:
+            val = None
         elif field == "bridge":
-          val = instance.nics[0].bridge
+          if (instance.nics and
+              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
+            val = i_nicp[0][constants.NIC_LINK]
+          else:
+            val = None
         elif field == "mac":
-          val = instance.nics[0].mac
+          if instance.nics:
+            val = instance.nics[0].mac
+          else:
+            val = None
         elif field == "sda_size" or field == "sdb_size":
           idx = ord(field[2]) - ord('a')
           try:
@@ -3566,8 +3590,17 @@ class LUQueryInstances(NoHooksLU):
               val = [nic.mac for nic in instance.nics]
             elif st_groups[1] == "ips":
               val = [nic.ip for nic in instance.nics]
+            elif st_groups[1] == "modes":
+              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
+            elif st_groups[1] == "links":
+              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
             elif st_groups[1] == "bridges":
-              val = [nic.bridge for nic in instance.nics]
+              val = []
+              for nicp in i_nicp:
+                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+                  val.append(nicp[constants.NIC_LINK])
+                else:
+                  val.append(None)
             else:
               # index-based item
               nic_idx = int(st_groups[2])
@@ -3578,8 +3611,16 @@ class LUQueryInstances(NoHooksLU):
                   val = instance.nics[nic_idx].mac
                 elif st_groups[1] == "ip":
                   val = instance.nics[nic_idx].ip
+                elif st_groups[1] == "mode":
+                  val = i_nicp[nic_idx][constants.NIC_MODE]
+                elif st_groups[1] == "link":
+                  val = i_nicp[nic_idx][constants.NIC_LINK]
                 elif st_groups[1] == "bridge":
-                  val = instance.nics[nic_idx].bridge
+                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
+                  if nic_mode == constants.NIC_MODE_BRIDGED:
+                    val = i_nicp[nic_idx][constants.NIC_LINK]
+                  else:
+                    val = None
                 else:
                   assert False, "Unhandled NIC parameter"
           else:
@@ -3646,10 +3687,15 @@ class LUFailoverInstance(LogicalUnit):
     target_node = secondary_nodes[0]
     _CheckNodeOnline(self, target_node)
     _CheckNodeNotDrained(self, target_node)
-    # check memory requirements on the secondary node
-    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
-                         instance.name, bep[constants.BE_MEMORY],
-                         instance.hypervisor)
+    if instance.admin_up:
+      # check memory requirements on the secondary node
+      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+                           instance.name, bep[constants.BE_MEMORY],
+                           instance.hypervisor)
+    else:
+      self.LogInfo("Not checking memory on the secondary node as"
+                   " instance will not be started")
+
     # check bridge existance
     _CheckInstanceBridgesExist(self, instance, node=target_node)
 
@@ -3678,7 +3724,7 @@ class LUFailoverInstance(LogicalUnit):
                  instance.name, source_node)
 
     result = self.rpc.call_instance_shutdown(source_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
         self.proc.LogWarning("Could not shutdown instance %s on node %s."
@@ -3712,7 +3758,7 @@ class LUFailoverInstance(LogicalUnit):
 
       feedback_fn("* starting the instance on the target node")
       result = self.rpc.call_instance_start(target_node, instance, None, None)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
@@ -3789,10 +3835,7 @@ class LUMigrateInstance(LogicalUnit):
       _CheckNodeNotDrained(self, target_node)
       result = self.rpc.call_instance_migratable(instance.primary_node,
                                                  instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
-                                   msg)
+      result.Raise("Can't migrate, please use failover", prereq=True)
 
     self.instance = instance
 
@@ -3811,10 +3854,7 @@ class LUMigrateInstance(LogicalUnit):
                                             self.instance.disks)
       min_percent = 100
       for node, nres in result.items():
-        msg = nres.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
-                                   (node, msg))
+        nres.Raise("Cannot resync disks on node %s" % node)
         node_done, node_percent = nres.payload
         all_done = all_done and node_done
         if node_percent is not None:
@@ -3835,10 +3875,7 @@ class LUMigrateInstance(LogicalUnit):
 
     result = self.rpc.call_blockdev_close(node, self.instance.name,
                                           self.instance.disks)
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
-                               " error %s" % (node, msg))
+    result.Raise("Cannot change disk to secondary on node %s" % node)
 
   def _GoStandalone(self):
     """Disconnect from the network.
@@ -3848,10 +3885,7 @@ class LUMigrateInstance(LogicalUnit):
     result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
                                                self.instance.disks)
     for node, nres in result.items():
-      msg = nres.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Cannot disconnect disks node %s,"
-                                 " error %s" % (node, msg))
+      nres.Raise("Cannot disconnect disks node %s" % node)
 
   def _GoReconnect(self, multimaster):
     """Reconnect to the network.
@@ -3866,10 +3900,7 @@ class LUMigrateInstance(LogicalUnit):
                                            self.instance.disks,
                                            self.instance.name, multimaster)
     for node, nres in result.items():
-      msg = nres.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Cannot change disks config on node %s,"
-                                 " error: %s" % (node, msg))
+      nres.Raise("Cannot change disks config on node %s" % node)
 
   def _ExecCleanup(self):
     """Try to cleanup after a failed migration.
@@ -3894,9 +3925,7 @@ class LUMigrateInstance(LogicalUnit):
                      " a bad state)")
     ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
     for node, result in ins_l.items():
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't contact node %s: %s" % (node, msg))
+      result.Raise("Can't contact node %s" % node)
 
     runningon_source = instance.name in ins_l[source_node].payload
     runningon_target = instance.name in ins_l[target_node].payload
@@ -3966,7 +3995,7 @@ class LUMigrateInstance(LogicalUnit):
                                                     instance,
                                                     migration_info,
                                                     False)
-    abort_msg = abort_result.RemoteFailMsg()
+    abort_msg = abort_result.fail_msg
     if abort_msg:
       logging.error("Aborting migration failed on target node %s: %s" %
                     (target_node, abort_msg))
@@ -3998,7 +4027,7 @@ class LUMigrateInstance(LogicalUnit):
 
     # First get the migration information from the remote node
     result = self.rpc.call_migration_info(source_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       log_err = ("Failed fetching source migration information from %s: %s" %
                  (source_node, msg))
@@ -4019,7 +4048,7 @@ class LUMigrateInstance(LogicalUnit):
                                            migration_info,
                                            self.nodes_ip[target_node])
 
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance pre-migration failed, trying to revert"
                     " disk status: %s", msg)
@@ -4033,7 +4062,7 @@ class LUMigrateInstance(LogicalUnit):
     result = self.rpc.call_instance_migrate(source_node, instance,
                                             self.nodes_ip[target_node],
                                             self.op.live)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance migration failed, trying to revert"
                     " disk status: %s", msg)
@@ -4051,7 +4080,7 @@ class LUMigrateInstance(LogicalUnit):
                                               instance,
                                               migration_info,
                                               True)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance migration succeeded, but finalization failed:"
                     " %s" % msg)
@@ -4151,11 +4180,8 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
   lu.cfg.SetDiskID(device, node)
   result = lu.rpc.call_blockdev_create(node, device, device.size,
                                        instance.name, force_open, info)
-  msg = result.RemoteFailMsg()
-  if msg:
-    raise errors.OpExecError("Can't create block device %s on"
-                             " node %s for instance %s: %s" %
-                             (device, node, instance.name, msg))
+  result.Raise("Can't create block device %s on"
+               " node %s for instance %s" % (device, node, instance.name))
   if device.physical_id is None:
     device.physical_id = result.payload
 
@@ -4287,12 +4313,8 @@ def _CreateDisks(lu, instance):
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
 
-    if result.failed or not result.data:
-      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
-
-    if not result.data[0]:
-      raise errors.OpExecError("Failed to create directory '%s'" %
-                               file_storage_dir)
+    result.Raise("Failed to create directory '%s' on"
+                 " node %s: %s" % (file_storage_dir, pnode))
 
   # Note: this needs to be kept in sync with adding of disks in
   # LUSetInstanceParams
@@ -4327,7 +4349,7 @@ def _RemoveDisks(lu, instance):
   for device in instance.disks:
     for node, disk in device.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(disk, node)
-      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
       if msg:
         lu.LogWarning("Could not remove block device %s on node %s,"
                       " continuing anyway: %s", device.iv_name, node, msg)
@@ -4337,8 +4359,10 @@ def _RemoveDisks(lu, instance):
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
                                                  file_storage_dir)
-    if result.failed or not result.data:
-      logging.error("Could not remove directory '%s'", file_storage_dir)
+    msg = result.fail_msg
+    if msg:
+      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
+                    file_storage_dir, instance.primary_node, msg)
       all_result = False
 
   return all_result
@@ -4388,10 +4412,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
     info = hvinfo[node]
     if info.offline:
       continue
-    msg = info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Hypervisor parameter validation"
-                                 " failed on node %s: %s" % (node, msg))
+    info.Raise("Hypervisor parameter validation failed on node %s" % node)
 
 
 class LUCreateInstance(LogicalUnit):
@@ -4456,6 +4477,7 @@ class LUCreateInstance(LogicalUnit):
                                   self.op.hvparams)
     hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
+    self.hv_full = filled_hvp
 
     # fill and remember the beparams dict
     utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
@@ -4516,7 +4538,8 @@ class LUCreateInstance(LogicalUnit):
       bridge = nic.get("bridge", None)
       link = nic.get("link", None)
       if bridge and link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
+                                   " at the same time")
       elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
         raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
       elif bridge:
@@ -4660,9 +4683,12 @@ class LUCreateInstance(LogicalUnit):
       os_type=self.op.os_type,
       memory=self.be_full[constants.BE_MEMORY],
       vcpus=self.be_full[constants.BE_VCPUS],
-      nics=_PreBuildNICHooksList(self, self.nics),
+      nics=_NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
       disks=[(d["size"], d["mode"]) for d in self.disks],
+      bep=self.be_full,
+      hvp=self.hv_full,
+      hypervisor=self.op.hypervisor,
     ))
 
     nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
@@ -4688,7 +4714,7 @@ class LUCreateInstance(LogicalUnit):
         exp_list = self.rpc.call_export_list(locked_nodes)
         found = False
         for node in exp_list:
-          if exp_list[node].RemoteFailMsg():
+          if exp_list[node].fail_msg:
             continue
           if src_path in exp_list[node].payload:
             found = True
@@ -4702,10 +4728,7 @@ class LUCreateInstance(LogicalUnit):
 
       _CheckNodeOnline(self, src_node)
       result = self.rpc.call_export_info(src_node, src_path)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("No export or invalid export found in"
-                                   " dir %s: %s" % (src_path, msg))
+      result.Raise("No export or invalid export found in dir %s" % src_path)
 
       export_info = objects.SerializableConfigParser.Loads(str(result.payload))
       if not export_info.has_section(constants.INISECT_EXP):
@@ -4813,28 +4836,23 @@ class LUCreateInstance(LogicalUnit):
                                          self.op.hypervisor)
       for node in nodenames:
         info = nodeinfo[node]
-        info.Raise()
-        info = info.data
-        if not info:
-          raise errors.OpPrereqError("Cannot get current information"
-                                     " from node '%s'" % node)
+        info.Raise("Cannot get current information from node %s" % node)
+        info = info.payload
         vg_free = info.get('vg_free', None)
         if not isinstance(vg_free, int):
           raise errors.OpPrereqError("Can't compute free disk space on"
                                      " node %s" % node)
-        if req_size > info['vg_free']:
+        if req_size > vg_free:
           raise errors.OpPrereqError("Not enough disk space on target node %s."
                                      " %d MB available, %d MB required" %
-                                     (node, info['vg_free'], req_size))
+                                     (node, vg_free, req_size))
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
     # os verification
     result = self.rpc.call_os_get(pnode.name, self.op.os_type)
-    result.Raise()
-    if not isinstance(result.data, objects.OS):
-      raise errors.OpPrereqError("OS '%s' not in supported os list for"
-                                 " primary node"  % self.op.os_type)
+    result.Raise("OS '%s' not in supported os list for primary node %s" %
+                 (self.op.os_type, pnode.name), prereq=True)
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
@@ -4845,6 +4863,8 @@ class LUCreateInstance(LogicalUnit):
                            self.be_full[constants.BE_MEMORY],
                            self.op.hypervisor)
 
+    self.dry_run_result = list(nodenames)
+
   def Exec(self, feedback_fn):
     """Create and add the instance to the cluster.
 
@@ -4946,11 +4966,8 @@ class LUCreateInstance(LogicalUnit):
       if self.op.mode == constants.INSTANCE_CREATE:
         feedback_fn("* running the instance OS create scripts...")
         result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
-        msg = result.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Could not add os for instance %s"
-                                   " on node %s: %s" %
-                                   (instance, pnode_name, msg))
+        result.Raise("Could not add os for instance %s"
+                     " on node %s" % (instance, pnode_name))
 
       elif self.op.mode == constants.INSTANCE_IMPORT:
         feedback_fn("* running the instance OS import scripts...")
@@ -4960,7 +4977,7 @@ class LUCreateInstance(LogicalUnit):
         import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
                                                          src_node, src_images,
                                                          cluster_name)
-        msg = import_result.RemoteFailMsg()
+        msg = import_result.fail_msg
         if msg:
           self.LogWarning("Error while importing the disk images for instance"
                           " %s on node %s: %s" % (instance, pnode_name, msg))
@@ -4975,9 +4992,9 @@ class LUCreateInstance(LogicalUnit):
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
       result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not start instance: %s" % msg)
+      result.Raise("Could not start instance")
+
+    return list(iobj.all_nodes)
 
 
 class LUConnectConsole(NoHooksLU):
@@ -5014,10 +5031,7 @@ class LUConnectConsole(NoHooksLU):
 
     node_insts = self.rpc.call_instance_list([node],
                                              [instance.hypervisor])[node]
-    msg = node_insts.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Can't get node information from %s: %s" %
-                               (node, msg))
+    node_insts.Raise("Can't get node information from %s" % node)
 
     if instance.name not in node_insts.payload:
       raise errors.OpExecError("Instance %s is not running." % instance.name)
@@ -5241,9 +5255,7 @@ class LUReplaceDisks(LogicalUnit):
       raise errors.OpExecError("Can't list volume groups on the nodes")
     for node in oth_node, tgt_node:
       res = results[node]
-      msg = res.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      res.Raise("Error checking node %s" % node)
       if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
@@ -5254,7 +5266,7 @@ class LUReplaceDisks(LogicalUnit):
         info("checking disk/%d on %s" % (idx, node))
         cfg.SetDiskID(dev, node)
         result = self.rpc.call_blockdev_find(node, dev)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if not msg and not result.payload:
           msg = "disk not found"
         if msg:
@@ -5302,11 +5314,8 @@ class LUReplaceDisks(LogicalUnit):
     for dev, old_lvs, new_lvs in iv_names.itervalues():
       info("detaching %s drbd from local storage" % dev.iv_name)
       result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't detach drbd from local storage on node"
-                                 " %s for device %s: %s" %
-                                 (tgt_node, dev.iv_name, msg))
+      result.Raise("Can't detach drbd from local storage on node"
+                   " %s for device %s" % (tgt_node, dev.iv_name))
       #dev.children = []
       #cfg.Update(instance)
 
@@ -5324,24 +5333,18 @@ class LUReplaceDisks(LogicalUnit):
       rlist = []
       for to_ren in old_lvs:
         result = self.rpc.call_blockdev_find(tgt_node, to_ren)
-        if not result.RemoteFailMsg() and result.payload:
+        if not result.fail_msg and result.payload:
           # device exists
           rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
 
       info("renaming the old LVs on the target node")
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
-                                 (tgt_node, msg))
+      result.Raise("Can't rename old LVs on node %s" % tgt_node)
       # now we rename the new LVs to the old LVs
       info("renaming the new LVs on the target node")
       rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
-                                 (tgt_node, msg))
+      result.Raise("Can't rename new LVs on node %s" % tgt_node)
 
       for old, new in zip(old_lvs, new_lvs):
         new.logical_id = old.logical_id
@@ -5354,12 +5357,12 @@ class LUReplaceDisks(LogicalUnit):
       # now that the new lvs have the old name, we can add them to the device
       info("adding new mirror component on %s" % tgt_node)
       result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         for new_lv in new_lvs:
-          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
-          if msg:
-            warning("Can't rollback device %s: %s", dev, msg,
+          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
+          if msg2:
+            warning("Can't rollback device %s: %s", dev, msg2,
                     hint="cleanup manually the unused logical volumes")
         raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
 
@@ -5378,7 +5381,7 @@ class LUReplaceDisks(LogicalUnit):
     for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
       cfg.SetDiskID(dev, instance.primary_node)
       result = self.rpc.call_blockdev_find(instance.primary_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5393,7 +5396,7 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for %s" % name)
       for lv in old_lvs:
         cfg.SetDiskID(lv, tgt_node)
-        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
+        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
         if msg:
           warning("Can't remove old LV: %s" % msg,
                   hint="manually remove unused LVs")
@@ -5440,9 +5443,7 @@ class LUReplaceDisks(LogicalUnit):
     results = self.rpc.call_vg_list([pri_node, new_node])
     for node in pri_node, new_node:
       res = results[node]
-      msg = res.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      res.Raise("Error checking node %s" % node)
       if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
@@ -5452,7 +5453,7 @@ class LUReplaceDisks(LogicalUnit):
       info("checking disk/%d on %s" % (idx, pri_node))
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5508,7 +5509,8 @@ class LUReplaceDisks(LogicalUnit):
                     new_net_id)
       new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
                               logical_id=new_alone_id,
-                              children=dev.children)
+                              children=dev.children,
+                              size=dev.size)
       try:
         _CreateSingleBlockDev(self, new_node, instance, new_drbd,
                               _GetInstanceInfoText(instance), False)
@@ -5520,7 +5522,7 @@ class LUReplaceDisks(LogicalUnit):
       # we have new devices, shutdown the drbd on the old secondary
       info("shutting down drbd for disk/%d on old node" % idx)
       cfg.SetDiskID(dev, old_node)
-      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
+      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
       if msg:
         warning("Failed to shutdown drbd for disk/%d on old node: %s" %
                 (idx, msg),
@@ -5530,7 +5532,7 @@ class LUReplaceDisks(LogicalUnit):
     result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
                                                instance.disks)[pri_node]
 
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       # detaches didn't succeed (unlikely)
       self.cfg.ReleaseDRBDMinors(instance.name)
@@ -5551,7 +5553,7 @@ class LUReplaceDisks(LogicalUnit):
                                            instance.disks, instance.name,
                                            False)
     for to_node, to_result in result.items():
-      msg = to_result.RemoteFailMsg()
+      msg = to_result.fail_msg
       if msg:
         warning("can't attach drbd disks on node %s: %s", to_node, msg,
                 hint="please do a gnt-instance info to see the"
@@ -5567,7 +5569,7 @@ class LUReplaceDisks(LogicalUnit):
     for idx, (dev, old_lvs, _) in iv_names.iteritems():
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5581,7 +5583,7 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for disk/%d" % idx)
       for lv in old_lvs:
         cfg.SetDiskID(lv, old_node)
-        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
+        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
         if msg:
           warning("Can't remove LV on old secondary: %s", msg,
                   hint="Cleanup stale volumes by hand")
@@ -5673,10 +5675,8 @@ class LUGrowDisk(LogicalUnit):
                                        instance.hypervisor)
     for node in nodenames:
       info = nodeinfo[node]
-      if info.failed or not info.data:
-        raise errors.OpPrereqError("Cannot get current information"
-                                   " from node '%s'" % node)
-      vg_free = info.data.get('vg_free', None)
+      info.Raise("Cannot get current information from node %s" % node)
+      vg_free = info.payload.get('vg_free', None)
       if not isinstance(vg_free, int):
         raise errors.OpPrereqError("Can't compute free disk space on"
                                    " node %s" % node)
@@ -5694,10 +5694,7 @@ class LUGrowDisk(LogicalUnit):
     for node in instance.all_nodes:
       self.cfg.SetDiskID(disk, node)
       result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Grow request failed to node %s: %s" %
-                                 (node, msg))
+      result.Raise("Grow request failed to node %s" % node)
     disk.RecordGrow(self.op.amount)
     self.cfg.Update(instance)
     if self.op.wait_for_sync:
@@ -5764,10 +5761,7 @@ class LUQueryInstanceData(NoHooksLU):
       if dev_pstatus.offline:
         dev_pstatus = None
       else:
-        msg = dev_pstatus.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't compute disk status for %s: %s" %
-                                   (instance.name, msg))
+        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
         dev_pstatus = dev_pstatus.payload
     else:
       dev_pstatus = None
@@ -5785,10 +5779,7 @@ class LUQueryInstanceData(NoHooksLU):
       if dev_sstatus.offline:
         dev_sstatus = None
       else:
-        msg = dev_sstatus.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't compute disk status for %s: %s" %
-                                   (instance.name, msg))
+        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
         dev_sstatus = dev_sstatus.payload
     else:
       dev_sstatus = None
@@ -5823,10 +5814,7 @@ class LUQueryInstanceData(NoHooksLU):
         remote_info = self.rpc.call_instance_info(instance.primary_node,
                                                   instance.name,
                                                   instance.hypervisor)
-        msg = remote_info.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Error checking node %s: %s" %
-                                   (instance.primary_node, msg))
+        remote_info.Raise("Error checking node %s" % instance.primary_node)
         remote_info = remote_info.payload
         if remote_info and "state" in remote_info:
           remote_state = "up"
@@ -5849,7 +5837,8 @@ class LUQueryInstanceData(NoHooksLU):
         "pnode": instance.primary_node,
         "snodes": instance.secondary_nodes,
         "os": instance.os,
-        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
+        # this happens to be the same format used for hooks
+        "nics": _NICListToTuple(self, instance.nics),
         "disks": disks,
         "hypervisor": instance.hypervisor,
         "network_port": instance.network_port,
@@ -5945,7 +5934,8 @@ class LUSetInstanceParams(LogicalUnit):
       nic_bridge = nic_dict.get('bridge', None)
       nic_link = nic_dict.get('link', None)
       if nic_bridge and nic_link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
+                                   " at the same time")
       elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
         nic_dict['bridge'] = None
       elif nic_link and nic_link.lower() == constants.VALUE_NONE:
@@ -6034,11 +6024,11 @@ class LUSetInstanceParams(LogicalUnit):
     """Return the new params dict for the given params.
 
     @type old_params: dict
-    @type old_params: old parameters
+    @param old_params: old parameters
     @type update_dict: dict
-    @type update_dict: dict containing new parameter values,
-                       or constants.VALUE_DEFAULT to reset the
-                       parameter to its default value
+    @param update_dict: dict containing new parameter values,
+                        or constants.VALUE_DEFAULT to reset the
+                        parameter to its default value
     @type default_values: dict
     @param default_values: default values for the filled parameters
     @type parameter_types: dict
@@ -6115,12 +6105,18 @@ class LUSetInstanceParams(LogicalUnit):
                                                   instance.hypervisor)
       nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
                                          instance.hypervisor)
-      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
+      pninfo = nodeinfo[pnode]
+      msg = pninfo.fail_msg
+      if msg:
         # Assume the primary node is unreachable and go ahead
-        self.warn.append("Can't get info from primary node %s" % pnode)
-      elif instance_info.RemoteFailMsg():
+        self.warn.append("Can't get info from primary node %s: %s" %
+                         (pnode,  msg))
+      elif not isinstance(pninfo.payload.get('memory_free', None), int):
+        self.warn.append("Node data from primary node %s doesn't contain"
+                         " free memory information" % pnode)
+      elif instance_info.fail_msg:
         self.warn.append("Can't get instance runtime information: %s" %
-                        instance_info.RemoteFailMsg())
+                        instance_info.fail_msg)
       else:
         if instance_info.payload:
           current_mem = int(instance_info.payload['memory'])
@@ -6130,19 +6126,24 @@ class LUSetInstanceParams(LogicalUnit):
           # and we have no other way to check)
           current_mem = 0
         miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
-                    nodeinfo[pnode].data['memory_free'])
+                    pninfo.payload['memory_free'])
         if miss_mem > 0:
           raise errors.OpPrereqError("This change will prevent the instance"
                                      " from starting, due to %d MB of memory"
                                      " missing on its primary node" % miss_mem)
 
       if be_new[constants.BE_AUTO_BALANCE]:
-        for node, nres in nodeinfo.iteritems():
+        for node, nres in nodeinfo.items():
           if node not in instance.secondary_nodes:
             continue
-          if nres.failed or not isinstance(nres.data, dict):
-            self.warn.append("Can't get info from secondary node %s" % node)
-          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
+          msg = nres.fail_msg
+          if msg:
+            self.warn.append("Can't get info from secondary node %s: %s" %
+                             (node, msg))
+          elif not isinstance(nres.payload.get('memory_free', None), int):
+            self.warn.append("Secondary node %s didn't return free"
+                             " memory information" % node)
+          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
             self.warn.append("Not enough memory to failover instance to"
                              " secondary node %s" % node)
 
@@ -6184,8 +6185,7 @@ class LUSetInstanceParams(LogicalUnit):
 
       if new_nic_mode == constants.NIC_MODE_BRIDGED:
         nic_bridge = new_filled_nic_params[constants.NIC_LINK]
-        result = self.rpc.call_bridges_exist(pnode, [nic_bridge])
-        msg = result.RemoteFailMsg()
+        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
         if msg:
           msg = "Error checking bridges on node %s: %s" % (pnode, msg)
           if self.force:
@@ -6224,7 +6224,7 @@ class LUSetInstanceParams(LogicalUnit):
                                      " an instance")
         ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
         ins_l = ins_l[pnode]
-        msg = ins_l.RemoteFailMsg()
+        msg = ins_l.fail_msg
         if msg:
           raise errors.OpPrereqError("Can't contact node %s: %s" %
                                      (pnode, msg))
@@ -6267,7 +6267,7 @@ class LUSetInstanceParams(LogicalUnit):
         device_idx = len(instance.disks)
         for node, disk in device.ComputeNodeTree(instance.primary_node):
           self.cfg.SetDiskID(disk, node)
-          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
           if msg:
             self.LogWarning("Could not remove disk/%d on node %s: %s,"
                             " continuing anyway", device_idx, node, msg)
@@ -6389,7 +6389,7 @@ class LUQueryExports(NoHooksLU):
     rpcresult = self.rpc.call_export_list(self.nodes)
     result = {}
     for node in rpcresult:
-      if rpcresult[node].RemoteFailMsg():
+      if rpcresult[node].fail_msg:
         result[node] = False
       else:
         result[node] = rpcresult[node].payload
@@ -6474,11 +6474,8 @@ class LUExportInstance(LogicalUnit):
     if self.op.shutdown:
       # shutdown the instance, but not the disks
       result = self.rpc.call_instance_shutdown(src_node, instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not shutdown instance %s on"
-                                 " node %s: %s" %
-                                 (instance.name, src_node, msg))
+      result.Raise("Could not shutdown instance %s on"
+                   " node %s" % (instance.name, src_node))
 
     vgname = self.cfg.GetVGName()
 
@@ -6490,13 +6487,13 @@ class LUExportInstance(LogicalUnit):
       self.cfg.SetDiskID(disk, src_node)
 
     try:
-      for disk in instance.disks:
+      for idx, disk in enumerate(instance.disks):
         # result.payload will be a snapshot of an lvm leaf of the one we passed
         result = self.rpc.call_blockdev_snapshot(src_node, disk)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
-          self.LogWarning("Could not snapshot block device %s on node %s: %s",
-                          disk.logical_id[1], src_node, msg)
+          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
+                          idx, src_node, msg)
           snap_disks.append(False)
         else:
           disk_id = (vgname, result.payload)
@@ -6508,7 +6505,7 @@ class LUExportInstance(LogicalUnit):
     finally:
       if self.op.shutdown and instance.admin_up:
         result = self.rpc.call_instance_start(src_node, instance, None, None)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
           _ShutdownInstanceDisks(self, instance)
           raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -6520,18 +6517,17 @@ class LUExportInstance(LogicalUnit):
       if dev:
         result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
                                                instance, cluster_name, idx)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
-          self.LogWarning("Could not export block device %s from node %s to"
-                          " node %s: %s", dev.logical_id[1], src_node,
-                          dst_node.name, msg)
-        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+          self.LogWarning("Could not export disk/%s from node %s to"
+                          " node %s: %s", idx, src_node, dst_node.name, msg)
+        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
         if msg:
-          self.LogWarning("Could not remove snapshot block device %s from node"
-                          " %s: %s", dev.logical_id[1], src_node, msg)
+          self.LogWarning("Could not remove snapshot for disk/%d from node"
+                          " %s: %s", idx, src_node, msg)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       self.LogWarning("Could not finalize export for instance %s"
                       " on node %s: %s", instance.name, dst_node.name, msg)
@@ -6546,10 +6542,10 @@ class LUExportInstance(LogicalUnit):
     if nodelist:
       exportlist = self.rpc.call_export_list(nodelist)
       for node in exportlist:
-        if exportlist[node].RemoteFailMsg():
+        if exportlist[node].fail_msg:
           continue
         if iname in exportlist[node].payload:
-          msg = self.rpc.call_export_remove(node, iname).RemoteFailMsg()
+          msg = self.rpc.call_export_remove(node, iname).fail_msg
           if msg:
             self.LogWarning("Could not remove older export for instance %s"
                             " on node %s: %s", iname, node, msg)
@@ -6590,14 +6586,14 @@ class LURemoveExport(NoHooksLU):
     exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
     for node in exportlist:
-      msg = exportlist[node].RemoteFailMsg()
+      msg = exportlist[node].fail_msg
       if msg:
         self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
         continue
       if instance_name in exportlist[node].payload:
         found = True
         result = self.rpc.call_export_remove(node, instance_name)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
           logging.error("Could not remove export for instance %s"
                         " on node %s: %s", instance_name, node, msg)
@@ -6812,13 +6808,8 @@ class LUTestDelay(NoHooksLU):
         raise errors.OpExecError("Error during master delay test")
     if self.op.on_nodes:
       result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
-      if not result:
-        raise errors.OpExecError("Complete failure from rpc call")
       for node, node_result in result.items():
-        node_result.Raise()
-        if not node_result.data:
-          raise errors.OpExecError("Failure during rpc call to node %s,"
-                                   " result: %s" % (node, node_result.data))
+        node_result.Raise("Failure during rpc call to node %s" % node)
 
 
 class IAllocator(object):
@@ -6920,24 +6911,19 @@ class IAllocator(object):
         }
 
       if not ninfo.offline:
-        nresult.Raise()
-        if not isinstance(nresult.data, dict):
-          raise errors.OpExecError("Can't get data for node %s" % nname)
-        msg = node_iinfo[nname].RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't get node instance info"
-                                   " from node %s: %s" % (nname, msg))
-        remote_info = nresult.data
+        nresult.Raise("Can't get data for node %s" % nname)
+        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
+                                nname)
+        remote_info = nresult.payload
         for attr in ['memory_total', 'memory_free', 'memory_dom0',
                      'vg_size', 'vg_free', 'cpu_total']:
           if attr not in remote_info:
             raise errors.OpExecError("Node '%s' didn't return attribute"
                                      " '%s'" % (nname, attr))
-          try:
-            remote_info[attr] = int(remote_info[attr])
-          except ValueError, err:
+          if not isinstance(remote_info[attr], int):
             raise errors.OpExecError("Node '%s' returned invalid value"
-                                     " for '%s': %s" % (nname, attr, err))
+                                     " for '%s': %s" %
+                                     (nname, attr, remote_info[attr]))
         # compute memory used by primary instances
         i_p_mem = i_p_up_mem = 0
         for iinfo, beinfo in i_list:
@@ -7094,19 +7080,9 @@ class IAllocator(object):
     data = self.in_text
 
     result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
-    result.Raise()
-
-    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
-      raise errors.OpExecError("Invalid result from master iallocator runner")
-
-    rcode, stdout, stderr, fail = result.data
+    result.Raise("Failure while running the iallocator script")
 
-    if rcode == constants.IARUN_NOTFOUND:
-      raise errors.OpExecError("Can't find allocator '%s'" % name)
-    elif rcode == constants.IARUN_FAILURE:
-      raise errors.OpExecError("Instance allocator call failed: %s,"
-                               " output: %s" % (fail, stdout+stderr))
-    self.out_text = stdout
+    self.out_text = result.payload
     if validate:
       self._ValidateResult()