Update instance query for NIC parameters
[ganeti-local] / lib / cmdlib.py
index 5e97749..58318e3 100644 (file)
@@ -453,7 +453,8 @@ def _CheckNodeNotDrained(lu, node):
 
 
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                          memory, vcpus, nics, disk_template, disks):
+                          memory, vcpus, nics, disk_template, disks,
+                          bep, hvp, hypervisor):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -479,6 +480,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @param disk_template: the distk template of the instance
   @type disks: list
   @param disks: the list of (size, mode) pairs
+  @type bep: dict
+  @param bep: the backend parameters for the instance
+  @type hvp: dict
+  @param hvp: the hypervisor parameters for the instance
+  @type hypervisor: string
+  @param hypervisor: the hypervisor for the instance
   @rtype: dict
   @return: the hook environment for this instance
 
@@ -497,6 +504,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_MEMORY": memory,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
+    "INSTANCE_HYPERVISOR": hypervisor,
   }
 
   if nics:
@@ -525,12 +533,17 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   env["INSTANCE_DISK_COUNT"] = disk_count
 
+  for source, kind in [(bep, "BE"), (hvp, "HV")]:
+    for key, value in source.items():
+      env["INSTANCE_%s_%s" % (kind, key)] = value
+
   return env
 
-def _PreBuildNICHooksList(lu, nics):
+def _NICListToTuple(lu, nics):
   """Build a list of nic information tuples.
 
-  This list is suitable to be passed to _BuildInstanceHookEnv.
+  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
+  value in LUQueryInstanceData.
 
   @type lu:  L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -564,7 +577,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   @return: the hook environment dictionary
 
   """
-  bep = lu.cfg.GetClusterInfo().FillBE(instance)
+  cluster = lu.cfg.GetClusterInfo()
+  bep = cluster.FillBE(instance)
+  hvp = cluster.FillHV(instance)
   args = {
     'name': instance.name,
     'primary_node': instance.primary_node,
@@ -573,9 +588,12 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     'status': instance.admin_up,
     'memory': bep[constants.BE_MEMORY],
     'vcpus': bep[constants.BE_VCPUS],
-    'nics': _PreBuildNICHooksList(lu, instance.nics),
+    'nics': _NICListToTuple(lu, instance.nics),
     'disk_template': instance.disk_template,
     'disks': [(disk.size, disk.mode) for disk in instance.disks],
+    'bep': bep,
+    'hvp': hvp,
+    'hypervisor': instance.hypervisor,
   }
   if override:
     args.update(override)
@@ -610,10 +628,8 @@ def _CheckNicsBridgesExist(lu, target_nics, target_node,
             if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
   if brlist:
     result = lu.rpc.call_bridges_exist(target_node, brlist)
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking bridges on destination node"
-                                 " '%s': %s" % (target_node, msg))
+    result.Raise("Error checking bridges on destination node '%s'" %
+                 target_node, prereq=True)
 
 
 def _CheckInstanceBridgesExist(lu, instance, node=None):
@@ -656,9 +672,7 @@ class LUDestroyCluster(NoHooksLU):
     """
     master = self.cfg.GetMasterNode()
     result = self.rpc.call_node_stop_master(master, False)
-    result.Raise()
-    if not result.data:
-      raise errors.OpExecError("Could not disable the master role")
+    result.Raise("Could not disable the master role")
     priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
     utils.CreateBackup(priv_key)
     utils.CreateBackup(pub_key)
@@ -1022,7 +1036,6 @@ class LUVerifyCluster(LogicalUnit):
 
     for node_i in nodeinfo:
       node = node_i.name
-      nresult = all_nvinfo[node].data
 
       if node_i.offline:
         feedback_fn("* Skipping offline node %s" % (node,))
@@ -1040,11 +1053,13 @@ class LUVerifyCluster(LogicalUnit):
         ntype = "regular"
       feedback_fn("* Verifying node %s (%s)" % (node, ntype))
 
-      if all_nvinfo[node].failed or not isinstance(nresult, dict):
-        feedback_fn("  - ERROR: connection to %s failed" % (node,))
+      msg = all_nvinfo[node].fail_msg
+      if msg:
+        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
         bad = True
         continue
 
+      nresult = all_nvinfo[node].payload
       node_drbd = {}
       for minor, instance in all_drbd_map[node].items():
         if instance not in instanceinfo:
@@ -1241,14 +1256,16 @@ class LUVerifyCluster(LogicalUnit):
         for node_name in hooks_results:
           show_node_header = True
           res = hooks_results[node_name]
-          if res.failed or res.data is False or not isinstance(res.data, list):
+          msg = res.fail_msg
+          if msg:
             if res.offline:
               # no need to warn or set fail return value
               continue
-            feedback_fn("    Communication failure in hooks execution")
+            feedback_fn("    Communication failure in hooks execution: %s" %
+                        msg)
             lu_result = 1
             continue
-          for script, hkr, output in res.data:
+          for script, hkr, output in res.payload:
             if hkr == constants.HKR_FAIL:
               # The node header is only shown once, if there are
               # failing hooks on that node
@@ -1324,7 +1341,7 @@ class LUVerifyDisks(NoHooksLU):
       node_res = node_lvs[node]
       if node_res.offline:
         continue
-      msg = node_res.RemoteFailMsg()
+      msg = node_res.fail_msg
       if msg:
         logging.warning("Error enumerating LVs on node %s: %s", node, msg)
         res_nodes[node] = msg
@@ -1397,8 +1414,7 @@ class LURenameCluster(LogicalUnit):
     # shutdown the master IP
     master = self.cfg.GetMasterNode()
     result = self.rpc.call_node_stop_master(master, False)
-    if result.failed or not result.data:
-      raise errors.OpExecError("Could not disable the master role")
+    result.Raise("Could not disable the master role")
 
     try:
       cluster = self.cfg.GetClusterInfo()
@@ -1416,17 +1432,18 @@ class LURenameCluster(LogicalUnit):
       result = self.rpc.call_upload_file(node_list,
                                          constants.SSH_KNOWN_HOSTS_FILE)
       for to_node, to_result in result.iteritems():
-         msg = to_result.RemoteFailMsg()
-         if msg:
-           msg = ("Copy of file %s to node %s failed: %s" %
-                   (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
-           self.proc.LogWarning(msg)
+        msg = to_result.fail_msg
+        if msg:
+          msg = ("Copy of file %s to node %s failed: %s" %
+                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
+          self.proc.LogWarning(msg)
 
     finally:
       result = self.rpc.call_node_start_master(master, False)
-      if result.failed or not result.data:
+      msg = result.fail_msg
+      if msg:
         self.LogWarning("Could not re-enable the master role on"
-                        " the master, please restart manually.")
+                        " the master, please restart manually: %s", msg)
 
 
 def _RecursiveCheckIfLVMBased(disk):
@@ -1509,7 +1526,7 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.vg_name:
       vglist = self.rpc.call_vg_list(node_list)
       for node in node_list:
-        msg = vglist[node].RemoteFailMsg()
+        msg = vglist[node].fail_msg
         if msg:
           # ignoring down node
           self.LogWarning("Error while gathering data on node %s"
@@ -1631,11 +1648,11 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None):
     if os.path.exists(fname):
       result = lu.rpc.call_upload_file(dist_nodes, fname)
       for to_node, to_result in result.items():
-         msg = to_result.RemoteFailMsg()
-         if msg:
-           msg = ("Copy of file %s to node %s failed: %s" %
-                   (fname, to_node, msg))
-           lu.proc.LogWarning(msg)
+        msg = to_result.fail_msg
+        if msg:
+          msg = ("Copy of file %s to node %s failed: %s" %
+                 (fname, to_node, msg))
+          lu.proc.LogWarning(msg)
 
 
 class LURedistributeConfig(NoHooksLU):
@@ -1682,12 +1699,13 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
     lu.cfg.SetDiskID(dev, node)
 
   retries = 0
+  degr_retries = 10 # in seconds, as we sleep 1 second each time
   while True:
     max_time = 0
     done = True
     cumul_degraded = False
     rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
-    msg = rstats.RemoteFailMsg()
+    msg = rstats.fail_msg
     if msg:
       lu.LogWarning("Can't get any data from node %s: %s", node, msg)
       retries += 1
@@ -1715,6 +1733,16 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
           rem_time = "no time estimate"
         lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
                         (instance.disks[i].iv_name, perc_done, rem_time))
+
+    # if we're done but degraded, let's do a few small retries, to
+    # make sure we see a stable and not transient situation; therefore
+    # we force restart of the loop
+    if (done or oneshot) and cumul_degraded and degr_retries > 0:
+      logging.info("Degraded disks found, %d retries left", degr_retries)
+      degr_retries -= 1
+      time.sleep(1)
+      continue
+
     if done or oneshot:
       break
 
@@ -1742,7 +1770,7 @@ def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
   result = True
   if on_primary or dev.AssembleOnSecondary():
     rstats = lu.rpc.call_blockdev_find(node, dev)
-    msg = rstats.RemoteFailMsg()
+    msg = rstats.fail_msg
     if msg:
       lu.LogWarning("Can't find disk on node %s: %s", node, msg)
       result = False
@@ -1796,10 +1824,11 @@ class LUDiagnoseOS(NoHooksLU):
 
     @rtype: dict
     @return: a dictionary with osnames as keys and as value another map, with
-        nodes as keys and list of OS objects as values, eg::
+        nodes as keys and tuples of (path, status, diagnose) as values, eg::
 
-          {"debian-etch": {"node1": [<object>,...],
-                           "node2": [<object>,]}
+          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
+                                     (/srv/..., False, "invalid api")],
+                           "node2": [(/srv/..., True, "")]}
           }
 
     """
@@ -1808,18 +1837,18 @@ class LUDiagnoseOS(NoHooksLU):
     # level), so that nodes with a non-responding node daemon don't
     # make all OSes invalid
     good_nodes = [node_name for node_name in rlist
-                  if not rlist[node_name].failed]
-    for node_name, nr in rlist.iteritems():
-      if nr.failed or not nr.data:
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
         continue
-      for os_obj in nr.data:
-        if os_obj.name not in all_os:
+      for name, path, status, diagnose in nr.payload:
+        if name not in all_os:
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
-          all_os[os_obj.name] = {}
+          all_os[name] = {}
           for nname in good_nodes:
-            all_os[os_obj.name][nname] = []
-        all_os[os_obj.name][node_name].append(os_obj)
+            all_os[name][nname] = []
+        all_os[name][node_name].append((path, status, diagnose))
     return all_os
 
   def Exec(self, feedback_fn):
@@ -1828,21 +1857,20 @@ class LUDiagnoseOS(NoHooksLU):
     """
     valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
     node_data = self.rpc.call_os_diagnose(valid_nodes)
-    if node_data == False:
-      raise errors.OpExecError("Can't gather the list of OSes")
     pol = self._DiagnoseByOS(valid_nodes, node_data)
     output = []
-    for os_name, os_data in pol.iteritems():
+    for os_name, os_data in pol.items():
       row = []
       for field in self.op.output_fields:
         if field == "name":
           val = os_name
         elif field == "valid":
-          val = utils.all([osl and osl[0] for osl in os_data.values()])
+          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
         elif field == "node_status":
+          # this is just a copy of the dict
           val = {}
-          for node_name, nos_list in os_data.iteritems():
-            val[node_name] = [(v.status, v.path) for v in nos_list]
+          for node_name, nos_list in os_data.items():
+            val[node_name] = nos_list
         else:
           raise errors.ParameterError(field)
         row.append(val)
@@ -1914,7 +1942,11 @@ class LURemoveNode(LogicalUnit):
 
     self.context.RemoveNode(node.name)
 
-    self.rpc.call_node_leave_cluster(node.name)
+    result = self.rpc.call_node_leave_cluster(node.name)
+    msg = result.fail_msg
+    if msg:
+      self.LogWarning("Errors encountered on the remote node while leaving"
+                      " the cluster: %s", msg)
 
     # Promote nodes to master candidate as needed
     _AdjustCandidatePool(self)
@@ -1999,7 +2031,7 @@ class LUQueryNodes(NoHooksLU):
                                           self.cfg.GetHypervisorType())
       for name in nodenames:
         nodeinfo = node_data[name]
-        if not nodeinfo.RemoteFailMsg() and nodeinfo.payload:
+        if not nodeinfo.fail_msg and nodeinfo.payload:
           nodeinfo = nodeinfo.payload
           fn = utils.TryConvert
           live_data[name] = {
@@ -2122,10 +2154,15 @@ class LUQueryNodeVolumes(NoHooksLU):
 
     output = []
     for node in nodenames:
-      if node not in volumes or volumes[node].failed or not volumes[node].data:
+      nresult = volumes[node]
+      if nresult.offline:
+        continue
+      msg = nresult.fail_msg
+      if msg:
+        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
         continue
 
-      node_vols = volumes[node].data[:]
+      node_vols = nresult.payload[:]
       node_vols.sort(key=lambda vol: vol['dev'])
 
       for vol in node_vols:
@@ -2275,17 +2312,14 @@ class LUAddNode(LogicalUnit):
 
     # check connectivity
     result = self.rpc.call_version([node])[node]
-    result.Raise()
-    if result.data:
-      if constants.PROTOCOL_VERSION == result.data:
-        logging.info("Communication to node %s fine, sw version %s match",
-                     node, result.data)
-      else:
-        raise errors.OpExecError("Version mismatch master version %s,"
-                                 " node version %s" %
-                                 (constants.PROTOCOL_VERSION, result.data))
+    result.Raise("Can't get version information from node %s" % node)
+    if constants.PROTOCOL_VERSION == result.payload:
+      logging.info("Communication to node %s fine, sw version %s match",
+                   node, result.payload)
     else:
-      raise errors.OpExecError("Cannot get version from the new node")
+      raise errors.OpExecError("Version mismatch master version %s,"
+                               " node version %s" %
+                               (constants.PROTOCOL_VERSION, result.payload))
 
     # setup ssh on node
     logging.info("Copy ssh key to node %s", node)
@@ -2305,11 +2339,7 @@ class LUAddNode(LogicalUnit):
     result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
                                     keyarray[2],
                                     keyarray[3], keyarray[4], keyarray[5])
-
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Cannot transfer ssh keys to the"
-                               " new node: %s" % msg)
+    result.Raise("Cannot transfer ssh keys to the new node")
 
     # Add node to our /etc/hosts, and add key to known_hosts
     if self.cfg.GetClusterInfo().modify_etc_hosts:
@@ -2318,10 +2348,8 @@ class LUAddNode(LogicalUnit):
     if new_node.secondary_ip != new_node.primary_ip:
       result = self.rpc.call_node_has_ip_address(new_node.name,
                                                  new_node.secondary_ip)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("Failure checking secondary ip"
-                                   " on node %s: %s" % (new_node.name, msg))
+      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
+                   prereq=True)
       if not result.payload:
         raise errors.OpExecError("Node claims it doesn't have the secondary ip"
                                  " you gave (%s). Please fix and re-run this"
@@ -2336,13 +2364,12 @@ class LUAddNode(LogicalUnit):
     result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
                                        self.cfg.GetClusterName())
     for verifier in node_verify_list:
-      if result[verifier].failed or not result[verifier].data:
-        raise errors.OpExecError("Cannot communicate with %s's node daemon"
-                                 " for remote verification" % verifier)
-      if result[verifier].data['nodelist']:
-        for failed in result[verifier].data['nodelist']:
+      result[verifier].Raise("Cannot communicate with node %s" % verifier)
+      nl_payload = result[verifier].payload['nodelist']
+      if nl_payload:
+        for failed in nl_payload:
           feedback_fn("ssh/hostname verification failed %s -> %s" %
-                      (verifier, result[verifier].data['nodelist'][failed]))
+                      (verifier, nl_payload[failed]))
         raise errors.OpExecError("ssh/hostname verification failed.")
 
     if self.op.readd:
@@ -2455,7 +2482,7 @@ class LUSetNodeParams(LogicalUnit):
       result.append(("master_candidate", str(self.op.master_candidate)))
       if self.op.master_candidate == False:
         rrc = self.rpc.call_node_demote_from_mc(node.name)
-        msg = rrc.RemoteFailMsg()
+        msg = rrc.fail_msg
         if msg:
           self.LogWarning("Node failed to demote itself: %s" % msg)
 
@@ -2519,9 +2546,7 @@ class LUPowercycleNode(NoHooksLU):
     """
     result = self.rpc.call_node_powercycle(self.op.node_name,
                                            self.cfg.GetHypervisorType())
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
+    result.Raise("Failed to schedule the reboot")
     return result.payload
 
 
@@ -2682,7 +2707,7 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
     for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
                            " (is_primary=False, pass=1): %s",
@@ -2699,7 +2724,7 @@ def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
         continue
       lu.cfg.SetDiskID(node_disk, node)
       result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.proc.LogWarning("Could not prepare block device %s on node %s"
                            " (is_primary=True, pass=2): %s",
@@ -2774,11 +2799,8 @@ def _SafeShutdownInstanceDisks(lu, instance):
 
   """
   pnode = instance.primary_node
-  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])
-  ins_l = ins_l[pnode]
-  msg = ins_l.RemoteFailMsg()
-  if msg:
-    raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg))
+  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
+  ins_l.Raise("Can't contact node %s" % pnode)
 
   if instance.name in ins_l.payload:
     raise errors.OpExecError("Instance is running, can't shutdown"
@@ -2801,7 +2823,7 @@ def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
     for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(top_disk, node)
       result = lu.rpc.call_blockdev_shutdown(node, top_disk)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         lu.LogWarning("Could not shutdown block device %s on node %s: %s",
                       disk.iv_name, node, msg)
@@ -2833,9 +2855,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
 
   """
   nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
-  msg = nodeinfo[node].RemoteFailMsg()
-  if msg:
-    raise errors.OpPrereqError("Can't get data from node %s: %s" % (node, msg))
+  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
   free_mem = nodeinfo[node].payload.get('memory_free', None)
   if not isinstance(free_mem, int):
     raise errors.OpPrereqError("Can't compute free memory on node %s, result"
@@ -2918,10 +2938,8 @@ class LUStartupInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if not remote_info.payload: # not running already
       _CheckNodeFreeMemory(self, instance.primary_node,
                            "starting instance %s" % instance.name,
@@ -2942,7 +2960,7 @@ class LUStartupInstance(LogicalUnit):
 
     result = self.rpc.call_instance_start(node_current, instance,
                                           self.hvparams, self.beparams)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       _ShutdownInstanceDisks(self, instance)
       raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -3012,19 +3030,14 @@ class LURebootInstance(LogicalUnit):
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
                                              reboot_type)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not reboot instance: %s" % msg)
+      result.Raise("Could not reboot instance")
     else:
       result = self.rpc.call_instance_shutdown(node_current, instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not shutdown instance for"
-                                 " full reboot: %s" % msg)
+      result.Raise("Could not shutdown instance for full reboot")
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current, instance, None, None)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance for"
@@ -3074,7 +3087,7 @@ class LUShutdownInstance(LogicalUnit):
     node_current = instance.primary_node
     self.cfg.MarkInstanceDown(instance.name)
     result = self.rpc.call_instance_shutdown(node_current, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       self.proc.LogWarning("Could not shutdown instance: %s" % msg)
 
@@ -3123,10 +3136,8 @@ class LUReinstallInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -3141,10 +3152,8 @@ class LUReinstallInstance(LogicalUnit):
         raise errors.OpPrereqError("Primary node '%s' is unknown" %
                                    self.op.pnode)
       result = self.rpc.call_os_get(pnode.name, self.op.os_type)
-      result.Raise()
-      if not isinstance(result.data, objects.OS):
-        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
-                                   " primary node"  % self.op.os_type)
+      result.Raise("OS '%s' not in supported OS list for primary node %s" %
+                   (self.op.os_type, pnode.name), prereq=True)
 
     self.instance = instance
 
@@ -3163,11 +3172,8 @@ class LUReinstallInstance(LogicalUnit):
     try:
       feedback_fn("Running the instance OS create scripts...")
       result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not install OS for instance %s"
-                                 " on node %s: %s" %
-                                 (inst.name, inst.primary_node, msg))
+      result.Raise("Could not install OS for instance %s on node %s" %
+                   (inst.name, inst.primary_node))
     finally:
       _ShutdownInstanceDisks(self, inst)
 
@@ -3210,10 +3216,8 @@ class LURenameInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    msg = remote_info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Error checking node %s: %s" %
-                                 (instance.primary_node, msg))
+    remote_info.Raise("Error checking node %s" % instance.primary_node,
+                      prereq=True)
     if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -3258,25 +3262,16 @@ class LURenameInstance(LogicalUnit):
       result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
                                                      old_file_storage_dir,
                                                      new_file_storage_dir)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Could not connect to node '%s' to rename"
-                                 " directory '%s' to '%s' (but the instance"
-                                 " has been renamed in Ganeti)" % (
-                                 inst.primary_node, old_file_storage_dir,
-                                 new_file_storage_dir))
-
-      if not result.data[0]:
-        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
-                                 " (but the instance has been renamed in"
-                                 " Ganeti)" % (old_file_storage_dir,
-                                               new_file_storage_dir))
+      result.Raise("Could not rename on node %s directory '%s' to '%s'"
+                   " (but the instance has been renamed in Ganeti)" %
+                   (inst.primary_node, old_file_storage_dir,
+                    new_file_storage_dir))
 
     _StartInstanceDisks(self, inst, None)
     try:
       result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
                                                  old_name)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         msg = ("Could not run OS rename script for instance %s on node %s"
                " (but the instance has been renamed in Ganeti): %s" %
@@ -3333,7 +3328,7 @@ class LURemoveInstance(LogicalUnit):
                  instance.name, instance.primary_node)
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       if self.op.ignore_failures:
         feedback_fn("Warning: can't shutdown instance: %s" % msg)
@@ -3365,12 +3360,14 @@ class LUQueryInstances(NoHooksLU):
   _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
                                     "admin_state",
                                     "disk_template", "ip", "mac", "bridge",
+                                    "nic_mode", "nic_link",
                                     "sda_size", "sdb_size", "vcpus", "tags",
                                     "network_port", "beparams",
                                     r"(disk)\.(size)/([0-9]+)",
                                     r"(disk)\.(sizes)", "disk_usage",
-                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
-                                    r"(nic)\.(macs|ips|bridges)",
+                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
+                                    r"(nic)\.(bridge)/([0-9]+)",
+                                    r"(nic)\.(macs|ips|modes|links|bridges)",
                                     r"(disk|nic)\.(count)",
                                     "serial_no", "hypervisor", "hvparams",] +
                                   ["hv/%s" % name
@@ -3452,7 +3449,7 @@ class LUQueryInstances(NoHooksLU):
         if result.offline:
           # offline nodes will be in both lists
           off_nodes.append(name)
-        if result.failed or result.RemoteFailMsg():
+        if result.failed or result.fail_msg:
           bad_nodes.append(name)
         else:
           if result.payload:
@@ -3466,10 +3463,13 @@ class LUQueryInstances(NoHooksLU):
     HVPREFIX = "hv/"
     BEPREFIX = "be/"
     output = []
+    cluster = self.cfg.GetClusterInfo()
     for instance in instance_list:
       iout = []
-      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
-      i_be = self.cfg.GetClusterInfo().FillBE(instance)
+      i_hv = cluster.FillHV(instance)
+      i_be = cluster.FillBE(instance)
+      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
+                                 nic.nicparams) for nic in instance.nics]
       for field in self.op.output_fields:
         st_match = self._FIELDS_STATIC.Matches(field)
         if field == "name":
@@ -3514,11 +3514,31 @@ class LUQueryInstances(NoHooksLU):
         elif field == "disk_template":
           val = instance.disk_template
         elif field == "ip":
-          val = instance.nics[0].ip
+          if instance.nics:
+            val = instance.nics[0].ip
+          else:
+            val = None
+        elif field == "nic_mode":
+          if instance.nics:
+            val = i_nicp[0][constants.NIC_MODE]
+          else:
+            val = None
+        elif field == "nic_link":
+          if instance.nics:
+            val = i_nicp[0][constants.NIC_LINK]
+          else:
+            val = None
         elif field == "bridge":
-          val = instance.nics[0].bridge
+          if (instance.nics and
+              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
+            val = i_nicp[0][constants.NIC_LINK]
+          else:
+            val = None
         elif field == "mac":
-          val = instance.nics[0].mac
+          if instance.nics:
+            val = instance.nics[0].mac
+          else:
+            val = None
         elif field == "sda_size" or field == "sdb_size":
           idx = ord(field[2]) - ord('a')
           try:
@@ -3568,8 +3588,17 @@ class LUQueryInstances(NoHooksLU):
               val = [nic.mac for nic in instance.nics]
             elif st_groups[1] == "ips":
               val = [nic.ip for nic in instance.nics]
+            elif st_groups[1] == "modes":
+              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
+            elif st_groups[1] == "links":
+              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
             elif st_groups[1] == "bridges":
-              val = [nic.bridge for nic in instance.nics]
+              val = []
+              for nicp in i_nicp:
+                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+                  val.append(nicp[constants.NIC_LINK])
+                else:
+                  val.append(None)
             else:
               # index-based item
               nic_idx = int(st_groups[2])
@@ -3580,8 +3609,16 @@ class LUQueryInstances(NoHooksLU):
                   val = instance.nics[nic_idx].mac
                 elif st_groups[1] == "ip":
                   val = instance.nics[nic_idx].ip
+                elif st_groups[1] == "mode":
+                  val = i_nicp[nic_idx][constants.NIC_MODE]
+                elif st_groups[1] == "link":
+                  val = i_nicp[nic_idx][constants.NIC_LINK]
                 elif st_groups[1] == "bridge":
-                  val = instance.nics[nic_idx].bridge
+                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
+                  if nic_mode == constants.NIC_MODE_BRIDGED:
+                    val = i_nicp[nic_idx][constants.NIC_LINK]
+                  else:
+                    val = None
                 else:
                   assert False, "Unhandled NIC parameter"
           else:
@@ -3648,10 +3685,15 @@ class LUFailoverInstance(LogicalUnit):
     target_node = secondary_nodes[0]
     _CheckNodeOnline(self, target_node)
     _CheckNodeNotDrained(self, target_node)
-    # check memory requirements on the secondary node
-    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
-                         instance.name, bep[constants.BE_MEMORY],
-                         instance.hypervisor)
+    if instance.admin_up:
+      # check memory requirements on the secondary node
+      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+                           instance.name, bep[constants.BE_MEMORY],
+                           instance.hypervisor)
+    else:
+      self.LogInfo("Not checking memory on the secondary node as"
+                   " instance will not be started")
+
     # check bridge existance
     _CheckInstanceBridgesExist(self, instance, node=target_node)
 
@@ -3680,7 +3722,7 @@ class LUFailoverInstance(LogicalUnit):
                  instance.name, source_node)
 
     result = self.rpc.call_instance_shutdown(source_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
         self.proc.LogWarning("Could not shutdown instance %s on node %s."
@@ -3714,7 +3756,7 @@ class LUFailoverInstance(LogicalUnit):
 
       feedback_fn("* starting the instance on the target node")
       result = self.rpc.call_instance_start(target_node, instance, None, None)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         _ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
@@ -3791,10 +3833,7 @@ class LUMigrateInstance(LogicalUnit):
       _CheckNodeNotDrained(self, target_node)
       result = self.rpc.call_instance_migratable(instance.primary_node,
                                                  instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
-                                   msg)
+      result.Raise("Can't migrate, please use failover", prereq=True)
 
     self.instance = instance
 
@@ -3813,10 +3852,7 @@ class LUMigrateInstance(LogicalUnit):
                                             self.instance.disks)
       min_percent = 100
       for node, nres in result.items():
-        msg = nres.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
-                                   (node, msg))
+        nres.Raise("Cannot resync disks on node %s" % node)
         node_done, node_percent = nres.payload
         all_done = all_done and node_done
         if node_percent is not None:
@@ -3837,10 +3873,7 @@ class LUMigrateInstance(LogicalUnit):
 
     result = self.rpc.call_blockdev_close(node, self.instance.name,
                                           self.instance.disks)
-    msg = result.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
-                               " error %s" % (node, msg))
+    result.Raise("Cannot change disk to secondary on node %s" % node)
 
   def _GoStandalone(self):
     """Disconnect from the network.
@@ -3850,10 +3883,7 @@ class LUMigrateInstance(LogicalUnit):
     result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
                                                self.instance.disks)
     for node, nres in result.items():
-      msg = nres.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Cannot disconnect disks node %s,"
-                                 " error %s" % (node, msg))
+      nres.Raise("Cannot disconnect disks node %s" % node)
 
   def _GoReconnect(self, multimaster):
     """Reconnect to the network.
@@ -3868,10 +3898,7 @@ class LUMigrateInstance(LogicalUnit):
                                            self.instance.disks,
                                            self.instance.name, multimaster)
     for node, nres in result.items():
-      msg = nres.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Cannot change disks config on node %s,"
-                                 " error: %s" % (node, msg))
+      nres.Raise("Cannot change disks config on node %s" % node)
 
   def _ExecCleanup(self):
     """Try to cleanup after a failed migration.
@@ -3896,9 +3923,7 @@ class LUMigrateInstance(LogicalUnit):
                      " a bad state)")
     ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
     for node, result in ins_l.items():
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't contact node %s: %s" % (node, msg))
+      result.Raise("Can't contact node %s" % node)
 
     runningon_source = instance.name in ins_l[source_node].payload
     runningon_target = instance.name in ins_l[target_node].payload
@@ -3968,7 +3993,7 @@ class LUMigrateInstance(LogicalUnit):
                                                     instance,
                                                     migration_info,
                                                     False)
-    abort_msg = abort_result.RemoteFailMsg()
+    abort_msg = abort_result.fail_msg
     if abort_msg:
       logging.error("Aborting migration failed on target node %s: %s" %
                     (target_node, abort_msg))
@@ -4000,7 +4025,7 @@ class LUMigrateInstance(LogicalUnit):
 
     # First get the migration information from the remote node
     result = self.rpc.call_migration_info(source_node, instance)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       log_err = ("Failed fetching source migration information from %s: %s" %
                  (source_node, msg))
@@ -4021,7 +4046,7 @@ class LUMigrateInstance(LogicalUnit):
                                            migration_info,
                                            self.nodes_ip[target_node])
 
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance pre-migration failed, trying to revert"
                     " disk status: %s", msg)
@@ -4035,7 +4060,7 @@ class LUMigrateInstance(LogicalUnit):
     result = self.rpc.call_instance_migrate(source_node, instance,
                                             self.nodes_ip[target_node],
                                             self.op.live)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance migration failed, trying to revert"
                     " disk status: %s", msg)
@@ -4053,7 +4078,7 @@ class LUMigrateInstance(LogicalUnit):
                                               instance,
                                               migration_info,
                                               True)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       logging.error("Instance migration succeeded, but finalization failed:"
                     " %s" % msg)
@@ -4153,11 +4178,8 @@ def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
   lu.cfg.SetDiskID(device, node)
   result = lu.rpc.call_blockdev_create(node, device, device.size,
                                        instance.name, force_open, info)
-  msg = result.RemoteFailMsg()
-  if msg:
-    raise errors.OpExecError("Can't create block device %s on"
-                             " node %s for instance %s: %s" %
-                             (device, node, instance.name, msg))
+  result.Raise("Can't create block device %s on"
+               " node %s for instance %s" % (device, node, instance.name))
   if device.physical_id is None:
     device.physical_id = result.payload
 
@@ -4289,12 +4311,8 @@ def _CreateDisks(lu, instance):
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
 
-    if result.failed or not result.data:
-      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
-
-    if not result.data[0]:
-      raise errors.OpExecError("Failed to create directory '%s'" %
-                               file_storage_dir)
+    result.Raise("Failed to create directory '%s' on"
+                 " node %s: %s" % (file_storage_dir, pnode))
 
   # Note: this needs to be kept in sync with adding of disks in
   # LUSetInstanceParams
@@ -4329,7 +4347,7 @@ def _RemoveDisks(lu, instance):
   for device in instance.disks:
     for node, disk in device.ComputeNodeTree(instance.primary_node):
       lu.cfg.SetDiskID(disk, node)
-      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
       if msg:
         lu.LogWarning("Could not remove block device %s on node %s,"
                       " continuing anyway: %s", device.iv_name, node, msg)
@@ -4339,8 +4357,10 @@ def _RemoveDisks(lu, instance):
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
                                                  file_storage_dir)
-    if result.failed or not result.data:
-      logging.error("Could not remove directory '%s'", file_storage_dir)
+    msg = result.fail_msg
+    if msg:
+      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
+                    file_storage_dir, instance.primary_node, msg)
       all_result = False
 
   return all_result
@@ -4390,10 +4410,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
     info = hvinfo[node]
     if info.offline:
       continue
-    msg = info.RemoteFailMsg()
-    if msg:
-      raise errors.OpPrereqError("Hypervisor parameter validation"
-                                 " failed on node %s: %s" % (node, msg))
+    info.Raise("Hypervisor parameter validation failed on node %s" % node)
 
 
 class LUCreateInstance(LogicalUnit):
@@ -4458,6 +4475,7 @@ class LUCreateInstance(LogicalUnit):
                                   self.op.hvparams)
     hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
+    self.hv_full = filled_hvp
 
     # fill and remember the beparams dict
     utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
@@ -4662,9 +4680,12 @@ class LUCreateInstance(LogicalUnit):
       os_type=self.op.os_type,
       memory=self.be_full[constants.BE_MEMORY],
       vcpus=self.be_full[constants.BE_VCPUS],
-      nics=_PreBuildNICHooksList(self, self.nics),
+      nics=_NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
       disks=[(d["size"], d["mode"]) for d in self.disks],
+      bep=self.be_full,
+      hvp=self.hv_full,
+      hypervisor=self.op.hypervisor,
     ))
 
     nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
@@ -4690,7 +4711,7 @@ class LUCreateInstance(LogicalUnit):
         exp_list = self.rpc.call_export_list(locked_nodes)
         found = False
         for node in exp_list:
-          if exp_list[node].RemoteFailMsg():
+          if exp_list[node].fail_msg:
             continue
           if src_path in exp_list[node].payload:
             found = True
@@ -4704,10 +4725,7 @@ class LUCreateInstance(LogicalUnit):
 
       _CheckNodeOnline(self, src_node)
       result = self.rpc.call_export_info(src_node, src_path)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("No export or invalid export found in"
-                                   " dir %s: %s" % (src_path, msg))
+      result.Raise("No export or invalid export found in dir %s" % src_path)
 
       export_info = objects.SerializableConfigParser.Loads(str(result.payload))
       if not export_info.has_section(constants.INISECT_EXP):
@@ -4815,10 +4833,7 @@ class LUCreateInstance(LogicalUnit):
                                          self.op.hypervisor)
       for node in nodenames:
         info = nodeinfo[node]
-        msg = info.RemoteFailMsg()
-        if msg:
-          raise errors.OpPrereqError("Cannot get current information"
-                                     " from node %s: %s" % (node, msg))
+        info.Raise("Cannot get current information from node %s" % node)
         info = info.payload
         vg_free = info.get('vg_free', None)
         if not isinstance(vg_free, int):
@@ -4833,10 +4848,8 @@ class LUCreateInstance(LogicalUnit):
 
     # os verification
     result = self.rpc.call_os_get(pnode.name, self.op.os_type)
-    result.Raise()
-    if not isinstance(result.data, objects.OS):
-      raise errors.OpPrereqError("OS '%s' not in supported os list for"
-                                 " primary node"  % self.op.os_type)
+    result.Raise("OS '%s' not in supported os list for primary node %s" %
+                 (self.op.os_type, pnode.name), prereq=True)
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
@@ -4948,11 +4961,8 @@ class LUCreateInstance(LogicalUnit):
       if self.op.mode == constants.INSTANCE_CREATE:
         feedback_fn("* running the instance OS create scripts...")
         result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
-        msg = result.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Could not add os for instance %s"
-                                   " on node %s: %s" %
-                                   (instance, pnode_name, msg))
+        result.Raise("Could not add os for instance %s"
+                     " on node %s" % (instance, pnode_name))
 
       elif self.op.mode == constants.INSTANCE_IMPORT:
         feedback_fn("* running the instance OS import scripts...")
@@ -4962,7 +4972,7 @@ class LUCreateInstance(LogicalUnit):
         import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
                                                          src_node, src_images,
                                                          cluster_name)
-        msg = import_result.RemoteFailMsg()
+        msg = import_result.fail_msg
         if msg:
           self.LogWarning("Error while importing the disk images for instance"
                           " %s on node %s: %s" % (instance, pnode_name, msg))
@@ -4977,9 +4987,7 @@ class LUCreateInstance(LogicalUnit):
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
       result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not start instance: %s" % msg)
+      result.Raise("Could not start instance")
 
 
 class LUConnectConsole(NoHooksLU):
@@ -5016,10 +5024,7 @@ class LUConnectConsole(NoHooksLU):
 
     node_insts = self.rpc.call_instance_list([node],
                                              [instance.hypervisor])[node]
-    msg = node_insts.RemoteFailMsg()
-    if msg:
-      raise errors.OpExecError("Can't get node information from %s: %s" %
-                               (node, msg))
+    node_insts.Raise("Can't get node information from %s" % node)
 
     if instance.name not in node_insts.payload:
       raise errors.OpExecError("Instance %s is not running." % instance.name)
@@ -5243,9 +5248,7 @@ class LUReplaceDisks(LogicalUnit):
       raise errors.OpExecError("Can't list volume groups on the nodes")
     for node in oth_node, tgt_node:
       res = results[node]
-      msg = res.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      res.Raise("Error checking node %s" % node)
       if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
@@ -5256,7 +5259,7 @@ class LUReplaceDisks(LogicalUnit):
         info("checking disk/%d on %s" % (idx, node))
         cfg.SetDiskID(dev, node)
         result = self.rpc.call_blockdev_find(node, dev)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if not msg and not result.payload:
           msg = "disk not found"
         if msg:
@@ -5304,11 +5307,8 @@ class LUReplaceDisks(LogicalUnit):
     for dev, old_lvs, new_lvs in iv_names.itervalues():
       info("detaching %s drbd from local storage" % dev.iv_name)
       result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't detach drbd from local storage on node"
-                                 " %s for device %s: %s" %
-                                 (tgt_node, dev.iv_name, msg))
+      result.Raise("Can't detach drbd from local storage on node"
+                   " %s for device %s" % (tgt_node, dev.iv_name))
       #dev.children = []
       #cfg.Update(instance)
 
@@ -5326,24 +5326,18 @@ class LUReplaceDisks(LogicalUnit):
       rlist = []
       for to_ren in old_lvs:
         result = self.rpc.call_blockdev_find(tgt_node, to_ren)
-        if not result.RemoteFailMsg() and result.payload:
+        if not result.fail_msg and result.payload:
           # device exists
           rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
 
       info("renaming the old LVs on the target node")
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
-                                 (tgt_node, msg))
+      result.Raise("Can't rename old LVs on node %s" % tgt_node)
       # now we rename the new LVs to the old LVs
       info("renaming the new LVs on the target node")
       rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
-                                 (tgt_node, msg))
+      result.Raise("Can't rename new LVs on node %s" % tgt_node)
 
       for old, new in zip(old_lvs, new_lvs):
         new.logical_id = old.logical_id
@@ -5356,12 +5350,12 @@ class LUReplaceDisks(LogicalUnit):
       # now that the new lvs have the old name, we can add them to the device
       info("adding new mirror component on %s" % tgt_node)
       result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if msg:
         for new_lv in new_lvs:
-          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
-          if msg:
-            warning("Can't rollback device %s: %s", dev, msg,
+          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
+          if msg2:
+            warning("Can't rollback device %s: %s", dev, msg2,
                     hint="cleanup manually the unused logical volumes")
         raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
 
@@ -5380,7 +5374,7 @@ class LUReplaceDisks(LogicalUnit):
     for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
       cfg.SetDiskID(dev, instance.primary_node)
       result = self.rpc.call_blockdev_find(instance.primary_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5395,7 +5389,7 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for %s" % name)
       for lv in old_lvs:
         cfg.SetDiskID(lv, tgt_node)
-        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
+        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
         if msg:
           warning("Can't remove old LV: %s" % msg,
                   hint="manually remove unused LVs")
@@ -5442,9 +5436,7 @@ class LUReplaceDisks(LogicalUnit):
     results = self.rpc.call_vg_list([pri_node, new_node])
     for node in pri_node, new_node:
       res = results[node]
-      msg = res.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      res.Raise("Error checking node %s" % node)
       if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
@@ -5454,7 +5446,7 @@ class LUReplaceDisks(LogicalUnit):
       info("checking disk/%d on %s" % (idx, pri_node))
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5510,7 +5502,8 @@ class LUReplaceDisks(LogicalUnit):
                     new_net_id)
       new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
                               logical_id=new_alone_id,
-                              children=dev.children)
+                              children=dev.children,
+                              size=dev.size)
       try:
         _CreateSingleBlockDev(self, new_node, instance, new_drbd,
                               _GetInstanceInfoText(instance), False)
@@ -5522,7 +5515,7 @@ class LUReplaceDisks(LogicalUnit):
       # we have new devices, shutdown the drbd on the old secondary
       info("shutting down drbd for disk/%d on old node" % idx)
       cfg.SetDiskID(dev, old_node)
-      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
+      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
       if msg:
         warning("Failed to shutdown drbd for disk/%d on old node: %s" %
                 (idx, msg),
@@ -5532,7 +5525,7 @@ class LUReplaceDisks(LogicalUnit):
     result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
                                                instance.disks)[pri_node]
 
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       # detaches didn't succeed (unlikely)
       self.cfg.ReleaseDRBDMinors(instance.name)
@@ -5553,7 +5546,7 @@ class LUReplaceDisks(LogicalUnit):
                                            instance.disks, instance.name,
                                            False)
     for to_node, to_result in result.items():
-      msg = to_result.RemoteFailMsg()
+      msg = to_result.fail_msg
       if msg:
         warning("can't attach drbd disks on node %s: %s", to_node, msg,
                 hint="please do a gnt-instance info to see the"
@@ -5569,7 +5562,7 @@ class LUReplaceDisks(LogicalUnit):
     for idx, (dev, old_lvs, _) in iv_names.iteritems():
       cfg.SetDiskID(dev, pri_node)
       result = self.rpc.call_blockdev_find(pri_node, dev)
-      msg = result.RemoteFailMsg()
+      msg = result.fail_msg
       if not msg and not result.payload:
         msg = "disk not found"
       if msg:
@@ -5583,7 +5576,7 @@ class LUReplaceDisks(LogicalUnit):
       info("remove logical volumes for disk/%d" % idx)
       for lv in old_lvs:
         cfg.SetDiskID(lv, old_node)
-        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
+        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
         if msg:
           warning("Can't remove LV on old secondary: %s", msg,
                   hint="Cleanup stale volumes by hand")
@@ -5675,10 +5668,7 @@ class LUGrowDisk(LogicalUnit):
                                        instance.hypervisor)
     for node in nodenames:
       info = nodeinfo[node]
-      msg = info.RemoteFailMsg()
-      if msg:
-        raise errors.OpPrereqError("Cannot get current information"
-                                   " from node %s:" % (node, msg))
+      info.Raise("Cannot get current information from node %s" % node)
       vg_free = info.payload.get('vg_free', None)
       if not isinstance(vg_free, int):
         raise errors.OpPrereqError("Can't compute free disk space on"
@@ -5697,10 +5687,7 @@ class LUGrowDisk(LogicalUnit):
     for node in instance.all_nodes:
       self.cfg.SetDiskID(disk, node)
       result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Grow request failed to node %s: %s" %
-                                 (node, msg))
+      result.Raise("Grow request failed to node %s" % node)
     disk.RecordGrow(self.op.amount)
     self.cfg.Update(instance)
     if self.op.wait_for_sync:
@@ -5767,10 +5754,7 @@ class LUQueryInstanceData(NoHooksLU):
       if dev_pstatus.offline:
         dev_pstatus = None
       else:
-        msg = dev_pstatus.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't compute disk status for %s: %s" %
-                                   (instance.name, msg))
+        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
         dev_pstatus = dev_pstatus.payload
     else:
       dev_pstatus = None
@@ -5788,10 +5772,7 @@ class LUQueryInstanceData(NoHooksLU):
       if dev_sstatus.offline:
         dev_sstatus = None
       else:
-        msg = dev_sstatus.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't compute disk status for %s: %s" %
-                                   (instance.name, msg))
+        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
         dev_sstatus = dev_sstatus.payload
     else:
       dev_sstatus = None
@@ -5826,10 +5807,7 @@ class LUQueryInstanceData(NoHooksLU):
         remote_info = self.rpc.call_instance_info(instance.primary_node,
                                                   instance.name,
                                                   instance.hypervisor)
-        msg = remote_info.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Error checking node %s: %s" %
-                                   (instance.primary_node, msg))
+        remote_info.Raise("Error checking node %s" % instance.primary_node)
         remote_info = remote_info.payload
         if remote_info and "state" in remote_info:
           remote_state = "up"
@@ -6119,7 +6097,7 @@ class LUSetInstanceParams(LogicalUnit):
       nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
                                          instance.hypervisor)
       pninfo = nodeinfo[pnode]
-      msg = pninfo.RemoteFailMsg()
+      msg = pninfo.fail_msg
       if msg:
         # Assume the primary node is unreachable and go ahead
         self.warn.append("Can't get info from primary node %s: %s" %
@@ -6127,9 +6105,9 @@ class LUSetInstanceParams(LogicalUnit):
       elif not isinstance(pninfo.payload.get('memory_free', None), int):
         self.warn.append("Node data from primary node %s doesn't contain"
                          " free memory information" % pnode)
-      elif instance_info.RemoteFailMsg():
+      elif instance_info.fail_msg:
         self.warn.append("Can't get instance runtime information: %s" %
-                        instance_info.RemoteFailMsg())
+                        instance_info.fail_msg)
       else:
         if instance_info.payload:
           current_mem = int(instance_info.payload['memory'])
@@ -6149,7 +6127,7 @@ class LUSetInstanceParams(LogicalUnit):
         for node, nres in nodeinfo.items():
           if node not in instance.secondary_nodes:
             continue
-          msg = nres.RemoteFailMsg()
+          msg = nres.fail_msg
           if msg:
             self.warn.append("Can't get info from secondary node %s: %s" %
                              (node, msg))
@@ -6198,8 +6176,7 @@ class LUSetInstanceParams(LogicalUnit):
 
       if new_nic_mode == constants.NIC_MODE_BRIDGED:
         nic_bridge = new_filled_nic_params[constants.NIC_LINK]
-        result = self.rpc.call_bridges_exist(pnode, [nic_bridge])
-        msg = result.RemoteFailMsg()
+        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
         if msg:
           msg = "Error checking bridges on node %s: %s" % (pnode, msg)
           if self.force:
@@ -6238,7 +6215,7 @@ class LUSetInstanceParams(LogicalUnit):
                                      " an instance")
         ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
         ins_l = ins_l[pnode]
-        msg = ins_l.RemoteFailMsg()
+        msg = ins_l.fail_msg
         if msg:
           raise errors.OpPrereqError("Can't contact node %s: %s" %
                                      (pnode, msg))
@@ -6281,7 +6258,7 @@ class LUSetInstanceParams(LogicalUnit):
         device_idx = len(instance.disks)
         for node, disk in device.ComputeNodeTree(instance.primary_node):
           self.cfg.SetDiskID(disk, node)
-          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
+          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
           if msg:
             self.LogWarning("Could not remove disk/%d on node %s: %s,"
                             " continuing anyway", device_idx, node, msg)
@@ -6403,7 +6380,7 @@ class LUQueryExports(NoHooksLU):
     rpcresult = self.rpc.call_export_list(self.nodes)
     result = {}
     for node in rpcresult:
-      if rpcresult[node].RemoteFailMsg():
+      if rpcresult[node].fail_msg:
         result[node] = False
       else:
         result[node] = rpcresult[node].payload
@@ -6488,11 +6465,8 @@ class LUExportInstance(LogicalUnit):
     if self.op.shutdown:
       # shutdown the instance, but not the disks
       result = self.rpc.call_instance_shutdown(src_node, instance)
-      msg = result.RemoteFailMsg()
-      if msg:
-        raise errors.OpExecError("Could not shutdown instance %s on"
-                                 " node %s: %s" %
-                                 (instance.name, src_node, msg))
+      result.Raise("Could not shutdown instance %s on"
+                   " node %s" % (instance.name, src_node))
 
     vgname = self.cfg.GetVGName()
 
@@ -6504,13 +6478,13 @@ class LUExportInstance(LogicalUnit):
       self.cfg.SetDiskID(disk, src_node)
 
     try:
-      for disk in instance.disks:
+      for idx, disk in enumerate(instance.disks):
         # result.payload will be a snapshot of an lvm leaf of the one we passed
         result = self.rpc.call_blockdev_snapshot(src_node, disk)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
-          self.LogWarning("Could not snapshot block device %s on node %s: %s",
-                          disk.logical_id[1], src_node, msg)
+          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
+                          idx, src_node, msg)
           snap_disks.append(False)
         else:
           disk_id = (vgname, result.payload)
@@ -6522,7 +6496,7 @@ class LUExportInstance(LogicalUnit):
     finally:
       if self.op.shutdown and instance.admin_up:
         result = self.rpc.call_instance_start(src_node, instance, None, None)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
           _ShutdownInstanceDisks(self, instance)
           raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -6534,18 +6508,17 @@ class LUExportInstance(LogicalUnit):
       if dev:
         result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
                                                instance, cluster_name, idx)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
-          self.LogWarning("Could not export block device %s from node %s to"
-                          " node %s: %s", dev.logical_id[1], src_node,
-                          dst_node.name, msg)
-        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+          self.LogWarning("Could not export disk/%s from node %s to"
+                          " node %s: %s", idx, src_node, dst_node.name, msg)
+        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
         if msg:
-          self.LogWarning("Could not remove snapshot block device %s from node"
-                          " %s: %s", dev.logical_id[1], src_node, msg)
+          self.LogWarning("Could not remove snapshot for disk/%d from node"
+                          " %s: %s", idx, src_node, msg)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
-    msg = result.RemoteFailMsg()
+    msg = result.fail_msg
     if msg:
       self.LogWarning("Could not finalize export for instance %s"
                       " on node %s: %s", instance.name, dst_node.name, msg)
@@ -6560,10 +6533,10 @@ class LUExportInstance(LogicalUnit):
     if nodelist:
       exportlist = self.rpc.call_export_list(nodelist)
       for node in exportlist:
-        if exportlist[node].RemoteFailMsg():
+        if exportlist[node].fail_msg:
           continue
         if iname in exportlist[node].payload:
-          msg = self.rpc.call_export_remove(node, iname).RemoteFailMsg()
+          msg = self.rpc.call_export_remove(node, iname).fail_msg
           if msg:
             self.LogWarning("Could not remove older export for instance %s"
                             " on node %s: %s", iname, node, msg)
@@ -6604,14 +6577,14 @@ class LURemoveExport(NoHooksLU):
     exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
     for node in exportlist:
-      msg = exportlist[node].RemoteFailMsg()
+      msg = exportlist[node].fail_msg
       if msg:
         self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
         continue
       if instance_name in exportlist[node].payload:
         found = True
         result = self.rpc.call_export_remove(node, instance_name)
-        msg = result.RemoteFailMsg()
+        msg = result.fail_msg
         if msg:
           logging.error("Could not remove export for instance %s"
                         " on node %s: %s", instance_name, node, msg)
@@ -6826,13 +6799,8 @@ class LUTestDelay(NoHooksLU):
         raise errors.OpExecError("Error during master delay test")
     if self.op.on_nodes:
       result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
-      if not result:
-        raise errors.OpExecError("Complete failure from rpc call")
       for node, node_result in result.items():
-        node_result.Raise()
-        if not node_result.data:
-          raise errors.OpExecError("Failure during rpc call to node %s,"
-                                   " result: %s" % (node, node_result.data))
+        node_result.Raise("Failure during rpc call to node %s" % node)
 
 
 class IAllocator(object):
@@ -6934,14 +6902,9 @@ class IAllocator(object):
         }
 
       if not ninfo.offline:
-        msg = nresult.RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't get data for node %s: %s" %
-                                   (nname, msg))
-        msg = node_iinfo[nname].RemoteFailMsg()
-        if msg:
-          raise errors.OpExecError("Can't get node instance info"
-                                   " from node %s: %s" % (nname, msg))
+        nresult.Raise("Can't get data for node %s" % nname)
+        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
+                                nname)
         remote_info = nresult.payload
         for attr in ['memory_total', 'memory_free', 'memory_dom0',
                      'vg_size', 'vg_free', 'cpu_total']:
@@ -7108,19 +7071,9 @@ class IAllocator(object):
     data = self.in_text
 
     result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
-    result.Raise()
-
-    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
-      raise errors.OpExecError("Invalid result from master iallocator runner")
-
-    rcode, stdout, stderr, fail = result.data
+    result.Raise("Failure while running the iallocator script")
 
-    if rcode == constants.IARUN_NOTFOUND:
-      raise errors.OpExecError("Can't find allocator '%s'" % name)
-    elif rcode == constants.IARUN_FAILURE:
-      raise errors.OpExecError("Instance allocator call failed: %s,"
-                               " output: %s" % (fail, stdout+stderr))
-    self.out_text = stdout
+    self.out_text = result.payload
     if validate:
       self._ValidateResult()