Convert node_start_master to new style result
[ganeti-local] / lib / cmdlib.py
index a3150b1..6d567bb 100644 (file)
@@ -25,7 +25,6 @@
 
 import os
 import os.path
-import sha
 import time
 import tempfile
 import re
@@ -502,12 +501,15 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   if nics:
     nic_count = len(nics)
-    for idx, (ip, bridge, mac) in enumerate(nics):
+    for idx, (ip, mac, mode, link) in enumerate(nics):
       if ip is None:
         ip = ""
       env["INSTANCE_NIC%d_IP" % idx] = ip
-      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
       env["INSTANCE_NIC%d_MAC" % idx] = mac
+      env["INSTANCE_NIC%d_MODE" % idx] = mode
+      env["INSTANCE_NIC%d_LINK" % idx] = link
+      if mode == constants.NIC_MODE_BRIDGED:
+        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
   else:
     nic_count = 0
 
@@ -525,6 +527,27 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   return env
 
+def _PreBuildNICHooksList(lu, nics):
+  """Build a list of nic information tuples.
+
+  This list is suitable to be passed to _BuildInstanceHookEnv.
+
+  @type lu:  L{LogicalUnit}
+  @param lu: the logical unit on whose behalf we execute
+  @type nics: list of L{objects.NIC}
+  @param nics: list of nics to convert to hooks tuples
+
+  """
+  hooks_nics = []
+  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
+  for nic in nics:
+    ip = nic.ip
+    mac = nic.mac
+    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
+    mode = filled_params[constants.NIC_MODE]
+    link = filled_params[constants.NIC_LINK]
+    hooks_nics.append((ip, mac, mode, link))
+  return hooks_nics
 
 def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   """Builds instance related env variables for hooks from an object.
@@ -550,7 +573,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     'status': instance.admin_up,
     'memory': bep[constants.BE_MEMORY],
     'vcpus': bep[constants.BE_VCPUS],
-    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
+    'nics': _PreBuildNICHooksList(lu, instance.nics),
     'disk_template': instance.disk_template,
     'disks': [(disk.size, disk.mode) for disk in instance.disks],
   }
@@ -575,18 +598,31 @@ def _AdjustCandidatePool(lu):
                (mc_now, mc_max))
 
 
-def _CheckInstanceBridgesExist(lu, instance):
+def _CheckNicsBridgesExist(lu, target_nics, target_node,
+                               profile=constants.PP_DEFAULT):
+  """Check that the brigdes needed by a list of nics exist.
+
+  """
+  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
+  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
+                for nic in target_nics]
+  brlist = [params[constants.NIC_LINK] for params in paramslist
+            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
+  if brlist:
+    result = lu.rpc.call_bridges_exist(target_node, brlist)
+    msg = result.RemoteFailMsg()
+    if msg:
+      raise errors.OpPrereqError("Error checking bridges on destination node"
+                                 " '%s': %s" % (target_node, msg))
+
+
+def _CheckInstanceBridgesExist(lu, instance, node=None):
   """Check that the brigdes needed by an instance exist.
 
   """
-  # check bridges existance
-  brlist = [nic.bridge for nic in instance.nics]
-  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
-  result.Raise()
-  if not result.data:
-    raise errors.OpPrereqError("One or more target bridges %s does not"
-                               " exist on destination node '%s'" %
-                               (brlist, instance.primary_node))
+  if node is None:
+    node=instance.primary_node
+  _CheckNicsBridgesExist(lu, instance.nics, node)
 
 
 class LUDestroyCluster(NoHooksLU):
@@ -986,7 +1022,6 @@ class LUVerifyCluster(LogicalUnit):
 
     for node_i in nodeinfo:
       node = node_i.name
-      nresult = all_nvinfo[node].data
 
       if node_i.offline:
         feedback_fn("* Skipping offline node %s" % (node,))
@@ -1004,15 +1039,25 @@ class LUVerifyCluster(LogicalUnit):
         ntype = "regular"
       feedback_fn("* Verifying node %s (%s)" % (node, ntype))
 
-      if all_nvinfo[node].failed or not isinstance(nresult, dict):
-        feedback_fn("  - ERROR: connection to %s failed" % (node,))
+      msg = all_nvinfo[node].RemoteFailMsg()
+      if msg:
+        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
         bad = True
         continue
 
+      nresult = all_nvinfo[node].payload
       node_drbd = {}
       for minor, instance in all_drbd_map[node].items():
-        instance = instanceinfo[instance]
-        node_drbd[minor] = (instance.name, instance.admin_up)
+        if instance not in instanceinfo:
+          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
+                      instance)
+          # ghost instance should not be running, but otherwise we
+          # don't give double warnings (both ghost instance and
+          # unallocated minor in use)
+          node_drbd[minor] = (instance, False)
+        else:
+          instance = instanceinfo[instance]
+          node_drbd[minor] = (instance.name, instance.admin_up)
       result = self._VerifyNode(node_i, file_names, local_checksums,
                                 nresult, feedback_fn, master_files,
                                 node_drbd, vg_name)
@@ -1065,9 +1110,17 @@ class LUVerifyCluster(LogicalUnit):
         }
         # FIXME: devise a free space model for file based instances as well
         if vg_name is not None:
+          if (constants.NV_VGLIST not in nresult or
+              vg_name not in nresult[constants.NV_VGLIST]):
+            feedback_fn("  - ERROR: node %s didn't return data for the"
+                        " volume group '%s' - it is either missing or broken" %
+                        (node, vg_name))
+            bad = True
+            continue
           node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
-      except ValueError:
-        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
+      except (ValueError, KeyError):
+        feedback_fn("  - ERROR: invalid nodeinfo value returned"
+                    " from node %s" % (node,))
         bad = True
         continue
 
@@ -1236,8 +1289,13 @@ class LUVerifyDisks(NoHooksLU):
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
 
+    @rtype: tuple of three items
+    @return: a tuple of (dict of node-to-node_error, list of instances
+        which need activate-disks, dict of instance: (node, volume) for
+        missing volumes
+
     """
-    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
+    result = res_nodes, res_instances, res_missing = {}, [], {}
 
     vg_name = self.cfg.GetVGName()
     nodes = utils.NiceSort(self.cfg.GetNodeList())
@@ -1264,23 +1322,17 @@ class LUVerifyDisks(NoHooksLU):
     to_act = set()
     for node in nodes:
       # node_volume
-      lvs = node_lvs[node]
-      if lvs.failed:
-        if not lvs.offline:
-          self.LogWarning("Connection to node %s failed: %s" %
-                          (node, lvs.data))
+      node_res = node_lvs[node]
+      if node_res.offline:
         continue
-      lvs = lvs.data
-      if isinstance(lvs, basestring):
-        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
-        res_nlvm[node] = lvs
-      elif not isinstance(lvs, dict):
-        logging.warning("Connection to node %s failed or invalid data"
-                        " returned", node)
-        res_nodes.append(node)
+      msg = node_res.RemoteFailMsg()
+      if msg:
+        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
+        res_nodes[node] = msg
         continue
 
-      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
+      lvs = node_res.payload
+      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
         inst = nv_dict.pop((node, lv_name), None)
         if (not lv_online and inst is not None
             and inst.name not in res_instances):
@@ -1365,15 +1417,18 @@ class LURenameCluster(LogicalUnit):
       result = self.rpc.call_upload_file(node_list,
                                          constants.SSH_KNOWN_HOSTS_FILE)
       for to_node, to_result in result.iteritems():
-        if to_result.failed or not to_result.data:
-          logging.error("Copy of file %s to node %s failed",
-                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
+         msg = to_result.RemoteFailMsg()
+         if msg:
+           msg = ("Copy of file %s to node %s failed: %s" %
+                   (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
+           self.proc.LogWarning(msg)
 
     finally:
       result = self.rpc.call_node_start_master(master, False)
-      if result.failed or not result.data:
+      msg = result.RemoteFailMsg()
+      if msg:
         self.LogWarning("Could not re-enable the master role on"
-                        " the master, please restart manually.")
+                        " the master, please restart manually: %s", msg)
 
 
 def _RecursiveCheckIfLVMBased(disk):
@@ -1401,7 +1456,7 @@ class LUSetClusterParams(LogicalUnit):
   _OP_REQP = []
   REQ_BGL = False
 
-  def CheckParameters(self):
+  def CheckArguments(self):
     """Check parameters
 
     """
@@ -1410,7 +1465,7 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.candidate_pool_size is not None:
       try:
         self.op.candidate_pool_size = int(self.op.candidate_pool_size)
-      except ValueError, err:
+      except (ValueError, TypeError), err:
         raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
                                    str(err))
       if self.op.candidate_pool_size < 1:
@@ -1456,11 +1511,13 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.vg_name:
       vglist = self.rpc.call_vg_list(node_list)
       for node in node_list:
-        if vglist[node].failed:
+        msg = vglist[node].RemoteFailMsg()
+        if msg:
           # ignoring down node
-          self.LogWarning("Node %s unreachable/error, ignoring" % node)
+          self.LogWarning("Error while gathering data on node %s"
+                          " (ignoring node): %s", node, msg)
           continue
-        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
+        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
                                               self.op.vg_name,
                                               constants.MIN_VG_SIZE)
         if vgstatus:
@@ -1468,14 +1525,20 @@ class LUSetClusterParams(LogicalUnit):
                                      (node, vgstatus))
 
     self.cluster = cluster = self.cfg.GetClusterInfo()
-    # validate beparams changes
+    # validate params changes
     if self.op.beparams:
       utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-      self.new_beparams = cluster.FillDict(
-        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
+      self.new_beparams = objects.FillDict(
+        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
+
+    if self.op.nicparams:
+      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
+      self.new_nicparams = objects.FillDict(
+        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
+      objects.NIC.CheckParameterSyntax(self.new_nicparams)
 
     # hypervisor list/parameters
-    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
+    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
     if self.op.hvparams:
       if not isinstance(self.op.hvparams, dict):
         raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
@@ -1507,8 +1570,11 @@ class LUSetClusterParams(LogicalUnit):
 
     """
     if self.op.vg_name is not None:
-      if self.op.vg_name != self.cfg.GetVGName():
-        self.cfg.SetVGName(self.op.vg_name)
+      new_volume = self.op.vg_name
+      if not new_volume:
+        new_volume = None
+      if new_volume != self.cfg.GetVGName():
+        self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
@@ -1517,7 +1583,10 @@ class LUSetClusterParams(LogicalUnit):
     if self.op.enabled_hypervisors is not None:
       self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
     if self.op.beparams:
-      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
+      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
+    if self.op.nicparams:
+      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+
     if self.op.candidate_pool_size is not None:
       self.cluster.candidate_pool_size = self.op.candidate_pool_size
 
@@ -1529,6 +1598,48 @@ class LUSetClusterParams(LogicalUnit):
       _AdjustCandidatePool(self)
 
 
+def _RedistributeAncillaryFiles(lu, additional_nodes=None):
+  """Distribute additional files which are part of the cluster configuration.
+
+  ConfigWriter takes care of distributing the config and ssconf files, but
+  there are more files which should be distributed to all nodes. This function
+  makes sure those are copied.
+
+  @param lu: calling logical unit
+  @param additional_nodes: list of nodes not in the config to distribute to
+
+  """
+  # 1. Gather target nodes
+  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
+  dist_nodes = lu.cfg.GetNodeList()
+  if additional_nodes is not None:
+    dist_nodes.extend(additional_nodes)
+  if myself.name in dist_nodes:
+    dist_nodes.remove(myself.name)
+  # 2. Gather files to distribute
+  dist_files = set([constants.ETC_HOSTS,
+                    constants.SSH_KNOWN_HOSTS_FILE,
+                    constants.RAPI_CERT_FILE,
+                    constants.RAPI_USERS_FILE,
+                   ])
+
+  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
+  for hv_name in enabled_hypervisors:
+    hv_class = hypervisor.GetHypervisor(hv_name)
+    dist_files.update(hv_class.GetAncillaryFiles())
+
+  # 3. Perform the files upload
+  for fname in dist_files:
+    if os.path.exists(fname):
+      result = lu.rpc.call_upload_file(dist_nodes, fname)
+      for to_node, to_result in result.items():
+         msg = to_result.RemoteFailMsg()
+         if msg:
+           msg = ("Copy of file %s to node %s failed: %s" %
+                   (fname, to_node, msg))
+           lu.proc.LogWarning(msg)
+
+
 class LURedistributeConfig(NoHooksLU):
   """Force the redistribution of cluster configuration.
 
@@ -1554,6 +1665,7 @@ class LURedistributeConfig(NoHooksLU):
 
     """
     self.cfg.Update(self.cfg.GetClusterInfo())
+    _RedistributeAncillaryFiles(self)
 
 
 def _WaitForSync(lu, instance, oneshot=False, unlock=False):
@@ -1577,15 +1689,16 @@ def _WaitForSync(lu, instance, oneshot=False, unlock=False):
     done = True
     cumul_degraded = False
     rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
-    if rstats.failed or not rstats.data:
-      lu.LogWarning("Can't get any data from node %s", node)
+    msg = rstats.RemoteFailMsg()
+    if msg:
+      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
       retries += 1
       if retries >= 10:
         raise errors.RemoteError("Can't contact node %s for mirror data,"
                                  " aborting." % node)
       time.sleep(6)
       continue
-    rstats = rstats.data
+    rstats = rstats.payload
     retries = 0
     for i, mstat in enumerate(rstats):
       if mstat is None:
@@ -1665,9 +1778,11 @@ class LUDiagnoseOS(NoHooksLU):
                        selected=self.op.output_fields)
 
     # Lock all nodes, in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
     self.needed_locks = {}
-    self.share_locks[locking.LEVEL_NODE] = 1
-    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1691,6 +1806,11 @@ class LUDiagnoseOS(NoHooksLU):
 
     """
     all_os = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].failed]
     for node_name, nr in rlist.iteritems():
       if nr.failed or not nr.data:
         continue
@@ -1699,7 +1819,7 @@ class LUDiagnoseOS(NoHooksLU):
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
           all_os[os_obj.name] = {}
-          for nname in node_list:
+          for nname in good_nodes:
             all_os[os_obj.name][nname] = []
         all_os[os_obj.name][node_name].append(os_obj)
     return all_os
@@ -1708,9 +1828,7 @@ class LUDiagnoseOS(NoHooksLU):
     """Compute the list of OSes.
 
     """
-    node_list = self.acquired_locks[locking.LEVEL_NODE]
-    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
-                   if node in node_list]
+    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
     node_data = self.rpc.call_os_diagnose(valid_nodes)
     if node_data == False:
       raise errors.OpExecError("Can't gather the list of OSes")
@@ -1883,8 +2001,8 @@ class LUQueryNodes(NoHooksLU):
                                           self.cfg.GetHypervisorType())
       for name in nodenames:
         nodeinfo = node_data[name]
-        if not nodeinfo.failed and nodeinfo.data:
-          nodeinfo = nodeinfo.data
+        if not nodeinfo.RemoteFailMsg() and nodeinfo.payload:
+          nodeinfo = nodeinfo.payload
           fn = utils.TryConvert
           live_data[name] = {
             "mtotal": fn(int, nodeinfo.get('memory_total', None)),
@@ -2196,12 +2314,17 @@ class LUAddNode(LogicalUnit):
                                " new node: %s" % msg)
 
     # Add node to our /etc/hosts, and add key to known_hosts
-    utils.AddHostToEtcHosts(new_node.name)
+    if self.cfg.GetClusterInfo().modify_etc_hosts:
+      utils.AddHostToEtcHosts(new_node.name)
 
     if new_node.secondary_ip != new_node.primary_ip:
       result = self.rpc.call_node_has_ip_address(new_node.name,
                                                  new_node.secondary_ip)
-      if result.failed or not result.data:
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpPrereqError("Failure checking secondary ip"
+                                   " on node %s: %s" % (new_node.name, msg))
+      if not result.payload:
         raise errors.OpExecError("Node claims it doesn't have the secondary ip"
                                  " you gave (%s). Please fix and re-run this"
                                  " command." % new_node.secondary_ip)
@@ -2215,44 +2338,22 @@ class LUAddNode(LogicalUnit):
     result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
                                        self.cfg.GetClusterName())
     for verifier in node_verify_list:
-      if result[verifier].failed or not result[verifier].data:
-        raise errors.OpExecError("Cannot communicate with %s's node daemon"
-                                 " for remote verification" % verifier)
-      if result[verifier].data['nodelist']:
-        for failed in result[verifier].data['nodelist']:
+      msg = result[verifier].RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Cannot communicate with node %s: %s" %
+                                 (verifier, msg))
+      nl_payload = result[verifier].payload['nodelist']
+      if nl_payload:
+        for failed in nl_payload:
           feedback_fn("ssh/hostname verification failed %s -> %s" %
-                      (verifier, result[verifier].data['nodelist'][failed]))
+                      (verifier, nl_payload[failed]))
         raise errors.OpExecError("ssh/hostname verification failed.")
 
-    # Distribute updated /etc/hosts and known_hosts to all nodes,
-    # including the node just added
-    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
-    dist_nodes = self.cfg.GetNodeList()
-    if not self.op.readd:
-      dist_nodes.append(node)
-    if myself.name in dist_nodes:
-      dist_nodes.remove(myself.name)
-
-    logging.debug("Copying hosts and known_hosts to all nodes")
-    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
-      result = self.rpc.call_upload_file(dist_nodes, fname)
-      for to_node, to_result in result.iteritems():
-        if to_result.failed or not to_result.data:
-          logging.error("Copy of file %s to node %s failed", fname, to_node)
-
-    to_copy = []
-    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
-    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
-      to_copy.append(constants.VNC_PASSWORD_FILE)
-
-    for fname in to_copy:
-      result = self.rpc.call_upload_file([node], fname)
-      if result[node].failed or not result[node]:
-        logging.error("Could not copy file %s to node %s", fname, node)
-
     if self.op.readd:
+      _RedistributeAncillaryFiles(self)
       self.context.ReaddNode(new_node)
     else:
+      _RedistributeAncillaryFiles(self, additional_nodes=[node])
       self.context.AddNode(new_node)
 
 
@@ -2327,7 +2428,7 @@ class LUSetNodeParams(LogicalUnit):
         ((node.offline and not self.op.offline == False) or
          (node.drained and not self.op.drained == False))):
       raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
-                                 " to master_candidate")
+                                 " to master_candidate" % node.name)
 
     return
 
@@ -2383,6 +2484,51 @@ class LUSetNodeParams(LogicalUnit):
     return result
 
 
+class LUPowercycleNode(NoHooksLU):
+  """Powercycles a node.
+
+  """
+  _OP_REQP = ["node_name", "force"]
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    node_name = self.cfg.ExpandNodeName(self.op.node_name)
+    if node_name is None:
+      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
+    self.op.node_name = node_name
+    if node_name == self.cfg.GetMasterNode() and not self.op.force:
+      raise errors.OpPrereqError("The node is the master and the force"
+                                 " parameter was not set")
+
+  def ExpandNames(self):
+    """Locking for PowercycleNode.
+
+    This is a last-resource option and shouldn't block on other
+    jobs. Therefore, we grab no locks.
+
+    """
+    self.needed_locks = {}
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This LU has no prereqs.
+
+    """
+    pass
+
+  def Exec(self, feedback_fn):
+    """Reboots a node.
+
+    """
+    result = self.rpc.call_node_powercycle(self.op.node_name,
+                                           self.cfg.GetHypervisorType())
+    msg = result.RemoteFailMsg()
+    if msg:
+      raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
+    return result.payload
+
+
 class LUQueryClusterInfo(NoHooksLU):
   """Query cluster configuration.
 
@@ -2418,7 +2564,11 @@ class LUQueryClusterInfo(NoHooksLU):
       "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
                         for hypervisor in cluster.enabled_hypervisors]),
       "beparams": cluster.beparams,
+      "nicparams": cluster.nicparams,
       "candidate_pool_size": cluster.candidate_pool_size,
+      "master_netdev": cluster.master_netdev,
+      "volume_group_name": cluster.volume_group_name,
+      "file_storage_dir": cluster.file_storage_dir,
       }
 
     return result
@@ -2627,14 +2777,14 @@ def _SafeShutdownInstanceDisks(lu, instance):
   _ShutdownInstanceDisks.
 
   """
-  ins_l = lu.rpc.call_instance_list([instance.primary_node],
-                                      [instance.hypervisor])
-  ins_l = ins_l[instance.primary_node]
-  if ins_l.failed or not isinstance(ins_l.data, list):
-    raise errors.OpExecError("Can't contact node '%s'" %
-                             instance.primary_node)
-
-  if instance.name in ins_l.data:
+  pnode = instance.primary_node
+  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])
+  ins_l = ins_l[pnode]
+  msg = ins_l.RemoteFailMsg()
+  if msg:
+    raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg))
+
+  if instance.name in ins_l.payload:
     raise errors.OpExecError("Instance is running, can't shutdown"
                              " block devices.")
 
@@ -2687,15 +2837,17 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
 
   """
   nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
-  nodeinfo[node].Raise()
-  free_mem = nodeinfo[node].data.get('memory_free')
+  msg = nodeinfo[node].RemoteFailMsg()
+  if msg:
+    raise errors.OpPrereqError("Can't get data from node %s: %s" % (node, msg))
+  free_mem = nodeinfo[node].payload.get('memory_free', None)
   if not isinstance(free_mem, int):
     raise errors.OpPrereqError("Can't compute free memory on node %s, result"
-                             " was '%s'" % (node, free_mem))
+                               " was '%s'" % (node, free_mem))
   if requested > free_mem:
     raise errors.OpPrereqError("Not enough memory on node %s for %s:"
-                             " needed %s MiB, available %s MiB" %
-                             (node, reason, requested, free_mem))
+                               " needed %s MiB, available %s MiB" %
+                               (node, reason, requested, free_mem))
 
 
 class LUStartupInstance(LogicalUnit):
@@ -2733,15 +2885,51 @@ class LUStartupInstance(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    # extra beparams
+    self.beparams = getattr(self.op, "beparams", {})
+    if self.beparams:
+      if not isinstance(self.beparams, dict):
+        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+                                   " dict" % (type(self.beparams), ))
+      # fill the beparams dict
+      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+      self.op.beparams = self.beparams
+
+    # extra hvparams
+    self.hvparams = getattr(self.op, "hvparams", {})
+    if self.hvparams:
+      if not isinstance(self.hvparams, dict):
+        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+                                   " dict" % (type(self.hvparams), ))
+
+      # check hypervisor parameter syntax (locally)
+      cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
+                                    instance.hvparams)
+      filled_hvp.update(self.hvparams)
+      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type.CheckParameterSyntax(filled_hvp)
+      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+      self.op.hvparams = self.hvparams
+
     _CheckNodeOnline(self, instance.primary_node)
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
     # check bridges existance
     _CheckInstanceBridgesExist(self, instance)
 
-    _CheckNodeFreeMemory(self, instance.primary_node,
-                         "starting instance %s" % instance.name,
-                         bep[constants.BE_MEMORY], instance.hypervisor)
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    msg = remote_info.RemoteFailMsg()
+    if msg:
+      raise errors.OpPrereqError("Error checking node %s: %s" %
+                                 (instance.primary_node, msg))
+    if not remote_info.payload: # not running already
+      _CheckNodeFreeMemory(self, instance.primary_node,
+                           "starting instance %s" % instance.name,
+                           bep[constants.BE_MEMORY], instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -2756,7 +2944,8 @@ class LUStartupInstance(LogicalUnit):
 
     _StartInstanceDisks(self, instance, force)
 
-    result = self.rpc.call_instance_start(node_current, instance)
+    result = self.rpc.call_instance_start(node_current, instance,
+                                          self.hvparams, self.beparams)
     msg = result.RemoteFailMsg()
     if msg:
       _ShutdownInstanceDisks(self, instance)
@@ -2838,7 +3027,7 @@ class LURebootInstance(LogicalUnit):
                                  " full reboot: %s" % msg)
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current, instance)
+      result = self.rpc.call_instance_start(node_current, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -2938,7 +3127,11 @@ class LUReinstallInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    if remote_info.failed or remote_info.data:
+    msg = remote_info.RemoteFailMsg()
+    if msg:
+      raise errors.OpPrereqError("Error checking node %s: %s" %
+                                 (instance.primary_node, msg))
+    if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
                                   instance.primary_node))
@@ -2973,7 +3166,7 @@ class LUReinstallInstance(LogicalUnit):
     _StartInstanceDisks(self, inst, None)
     try:
       feedback_fn("Running the instance OS create scripts...")
-      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
+      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
       msg = result.RemoteFailMsg()
       if msg:
         raise errors.OpExecError("Could not install OS for instance %s"
@@ -3021,8 +3214,11 @@ class LURenameInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    remote_info.Raise()
-    if remote_info.data:
+    msg = remote_info.RemoteFailMsg()
+    if msg:
+      raise errors.OpPrereqError("Error checking node %s: %s" %
+                                 (instance.primary_node, msg))
+    if remote_info.payload:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
                                   instance.primary_node))
@@ -3260,12 +3456,12 @@ class LUQueryInstances(NoHooksLU):
         if result.offline:
           # offline nodes will be in both lists
           off_nodes.append(name)
-        if result.failed:
+        if result.failed or result.RemoteFailMsg():
           bad_nodes.append(name)
         else:
-          if result.data:
-            live_data.update(result.data)
-            # else no instance is alive
+          if result.payload:
+            live_data.update(result.payload)
+          # else no instance is alive
     else:
       live_data = dict([(name, {}) for name in instance_names])
 
@@ -3460,15 +3656,8 @@ class LUFailoverInstance(LogicalUnit):
     _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
                          instance.name, bep[constants.BE_MEMORY],
                          instance.hypervisor)
-
     # check bridge existance
-    brlist = [nic.bridge for nic in instance.nics]
-    result = self.rpc.call_bridges_exist(target_node, brlist)
-    result.Raise()
-    if not result.data:
-      raise errors.OpPrereqError("One or more target bridges %s does not"
-                                 " exist on destination node '%s'" %
-                                 (brlist, target_node))
+    _CheckInstanceBridgesExist(self, instance, node=target_node)
 
   def Exec(self, feedback_fn):
     """Failover an instance.
@@ -3528,7 +3717,7 @@ class LUFailoverInstance(LogicalUnit):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       feedback_fn("* starting the instance on the target node")
-      result = self.rpc.call_instance_start(target_node, instance)
+      result = self.rpc.call_instance_start(target_node, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -3600,12 +3789,7 @@ class LUMigrateInstance(LogicalUnit):
                          instance.hypervisor)
 
     # check bridge existance
-    brlist = [nic.bridge for nic in instance.nics]
-    result = self.rpc.call_bridges_exist(target_node, brlist)
-    if result.failed or not result.data:
-      raise errors.OpPrereqError("One or more target bridges %s does not"
-                                 " exist on destination node '%s'" %
-                                 (brlist, target_node))
+    _CheckInstanceBridgesExist(self, instance, node=target_node)
 
     if not self.op.cleanup:
       _CheckNodeNotDrained(self, target_node)
@@ -3716,12 +3900,12 @@ class LUMigrateInstance(LogicalUnit):
                      " a bad state)")
     ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
     for node, result in ins_l.items():
-      result.Raise()
-      if not isinstance(result.data, list):
-        raise errors.OpExecError("Can't contact node '%s'" % node)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Can't contact node %s: %s" % (node, msg))
 
-    runningon_source = instance.name in ins_l[source_node].data
-    runningon_target = instance.name in ins_l[target_node].data
+    runningon_source = instance.name in ins_l[source_node].payload
+    runningon_target = instance.name in ins_l[target_node].payload
 
     if runningon_source and runningon_target:
       raise errors.OpExecError("Instance seems to be running on two nodes,"
@@ -4212,8 +4396,8 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
       continue
     msg = info.RemoteFailMsg()
     if msg:
-      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
-                                 " %s" % msg)
+      raise errors.OpPrereqError("Hypervisor parameter validation"
+                                 " failed on node %s: %s" % (node, msg))
 
 
 class LUCreateInstance(LogicalUnit):
@@ -4274,14 +4458,14 @@ class LUCreateInstance(LogicalUnit):
 
     # check hypervisor parameter syntax (locally)
     utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
-    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
+    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
                                   self.op.hvparams)
     hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
 
     # fill and remember the beparams dict
     utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
+    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
                                     self.op.beparams)
 
     #### instance parameters check
@@ -4300,10 +4484,21 @@ class LUCreateInstance(LogicalUnit):
 
     # NIC buildup
     self.nics = []
-    for nic in self.op.nics:
+    for idx, nic in enumerate(self.op.nics):
+      nic_mode_req = nic.get("mode", None)
+      nic_mode = nic_mode_req
+      if nic_mode is None:
+        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
+
+      # in routed mode, for the first nic, the default ip is 'auto'
+      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
+        default_ip_mode = constants.VALUE_AUTO
+      else:
+        default_ip_mode = constants.VALUE_NONE
+
       # ip validity checks
-      ip = nic.get("ip", None)
-      if ip is None or ip.lower() == "none":
+      ip = nic.get("ip", default_ip_mode)
+      if ip is None or ip.lower() == constants.VALUE_NONE:
         nic_ip = None
       elif ip.lower() == constants.VALUE_AUTO:
         nic_ip = hostname1.ip
@@ -4313,6 +4508,10 @@ class LUCreateInstance(LogicalUnit):
                                      " like a valid IP" % ip)
         nic_ip = ip
 
+      # TODO: check the ip for uniqueness !!
+      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
+        raise errors.OpPrereqError("Routed nic mode requires an ip address")
+
       # MAC address verification
       mac = nic.get("mac", constants.VALUE_AUTO)
       if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
@@ -4321,9 +4520,24 @@ class LUCreateInstance(LogicalUnit):
                                      mac)
       # bridge verification
       bridge = nic.get("bridge", None)
-      if bridge is None:
-        bridge = self.cfg.GetDefBridge()
-      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
+      link = nic.get("link", None)
+      if bridge and link:
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
+        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
+      elif bridge:
+        link = bridge
+
+      nicparams = {}
+      if nic_mode_req:
+        nicparams[constants.NIC_MODE] = nic_mode_req
+      if link:
+        nicparams[constants.NIC_LINK] = link
+
+      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
+                                      nicparams)
+      objects.NIC.CheckParameterSyntax(check_params)
+      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
 
     # disk checks/pre-build
     self.disks = []
@@ -4452,7 +4666,7 @@ class LUCreateInstance(LogicalUnit):
       os_type=self.op.os_type,
       memory=self.be_full[constants.BE_MEMORY],
       vcpus=self.be_full[constants.BE_VCPUS],
-      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
+      nics=_PreBuildNICHooksList(self, self.nics),
       disk_template=self.op.disk_template,
       disks=[(d["size"], d["mode"]) for d in self.disks],
     ))
@@ -4476,11 +4690,13 @@ class LUCreateInstance(LogicalUnit):
       src_path = self.op.src_path
 
       if src_node is None:
-        exp_list = self.rpc.call_export_list(
-          self.acquired_locks[locking.LEVEL_NODE])
+        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+        exp_list = self.rpc.call_export_list(locked_nodes)
         found = False
         for node in exp_list:
-          if not exp_list[node].failed and src_path in exp_list[node].data:
+          if exp_list[node].RemoteFailMsg():
+            continue
+          if src_path in exp_list[node].payload:
             found = True
             self.op.src_node = src_node = node
             self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
@@ -4492,11 +4708,12 @@ class LUCreateInstance(LogicalUnit):
 
       _CheckNodeOnline(self, src_node)
       result = self.rpc.call_export_info(src_node, src_path)
-      result.Raise()
-      if not result.data:
-        raise errors.OpPrereqError("No export found in dir %s" % src_path)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpPrereqError("No export or invalid export found in"
+                                   " dir %s: %s" % (src_path, msg))
 
-      export_info = result.data
+      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
       if not export_info.has_section(constants.INISECT_EXP):
         raise errors.ProgrammerError("Corrupted export config")
 
@@ -4602,19 +4819,19 @@ class LUCreateInstance(LogicalUnit):
                                          self.op.hypervisor)
       for node in nodenames:
         info = nodeinfo[node]
-        info.Raise()
-        info = info.data
-        if not info:
+        msg = info.RemoteFailMsg()
+        if msg:
           raise errors.OpPrereqError("Cannot get current information"
-                                     " from node '%s'" % node)
+                                     " from node %s: %s" % (node, msg))
+        info = info.payload
         vg_free = info.get('vg_free', None)
         if not isinstance(vg_free, int):
           raise errors.OpPrereqError("Can't compute free disk space on"
                                      " node %s" % node)
-        if req_size > info['vg_free']:
+        if req_size > vg_free:
           raise errors.OpPrereqError("Not enough disk space on target node %s."
                                      " %d MB available, %d MB required" %
-                                     (node, info['vg_free'], req_size))
+                                     (node, vg_free, req_size))
 
     _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
@@ -4625,14 +4842,7 @@ class LUCreateInstance(LogicalUnit):
       raise errors.OpPrereqError("OS '%s' not in supported os list for"
                                  " primary node"  % self.op.os_type)
 
-    # bridge check on primary node
-    bridges = [n.bridge for n in self.nics]
-    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
-    result.Raise()
-    if not result.data:
-      raise errors.OpPrereqError("One of the target bridges '%s' does not"
-                                 " exist on destination node '%s'" %
-                                 (",".join(bridges), pnode.name))
+    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
     # memory check on primary node
     if self.op.start:
@@ -4741,7 +4951,7 @@ class LUCreateInstance(LogicalUnit):
     if iobj.disk_template != constants.DT_DISKLESS:
       if self.op.mode == constants.INSTANCE_CREATE:
         feedback_fn("* running the instance OS create scripts...")
-        result = self.rpc.call_instance_os_add(pnode_name, iobj)
+        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
         msg = result.RemoteFailMsg()
         if msg:
           raise errors.OpExecError("Could not add os for instance %s"
@@ -4756,12 +4966,10 @@ class LUCreateInstance(LogicalUnit):
         import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
                                                          src_node, src_images,
                                                          cluster_name)
-        import_result.Raise()
-        for idx, result in enumerate(import_result.data):
-          if not result:
-            self.LogWarning("Could not import the image %s for instance"
-                            " %s, disk %d, on node %s" %
-                            (src_images[idx], instance, idx, pnode_name))
+        msg = import_result.RemoteFailMsg()
+        if msg:
+          self.LogWarning("Error while importing the disk images for instance"
+                          " %s on node %s: %s" % (instance, pnode_name, msg))
       else:
         # also checked in the prereq part
         raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
@@ -4772,7 +4980,7 @@ class LUCreateInstance(LogicalUnit):
       self.cfg.Update(iobj)
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
-      result = self.rpc.call_instance_start(pnode_name, iobj)
+      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -4812,9 +5020,12 @@ class LUConnectConsole(NoHooksLU):
 
     node_insts = self.rpc.call_instance_list([node],
                                              [instance.hypervisor])[node]
-    node_insts.Raise()
+    msg = node_insts.RemoteFailMsg()
+    if msg:
+      raise errors.OpExecError("Can't get node information from %s: %s" %
+                               (node, msg))
 
-    if instance.name not in node_insts.data:
+    if instance.name not in node_insts.payload:
       raise errors.OpExecError("Instance %s is not running." % instance.name)
 
     logging.debug("Connecting to console of %s on %s", instance.name, node)
@@ -5036,7 +5247,10 @@ class LUReplaceDisks(LogicalUnit):
       raise errors.OpExecError("Can't list volume groups on the nodes")
     for node in oth_node, tgt_node:
       res = results[node]
-      if res.failed or not res.data or my_vg not in res.data:
+      msg = res.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
     for idx, dev in enumerate(instance.disks):
@@ -5094,10 +5308,11 @@ class LUReplaceDisks(LogicalUnit):
     for dev, old_lvs, new_lvs in iv_names.itervalues():
       info("detaching %s drbd from local storage" % dev.iv_name)
       result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
-      result.Raise()
-      if not result.data:
+      msg = result.RemoteFailMsg()
+      if msg:
         raise errors.OpExecError("Can't detach drbd from local storage on node"
-                                 " %s for device %s" % (tgt_node, dev.iv_name))
+                                 " %s for device %s: %s" %
+                                 (tgt_node, dev.iv_name, msg))
       #dev.children = []
       #cfg.Update(instance)
 
@@ -5121,16 +5336,18 @@ class LUReplaceDisks(LogicalUnit):
 
       info("renaming the old LVs on the target node")
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
+                                 (tgt_node, msg))
       # now we rename the new LVs to the old LVs
       info("renaming the new LVs on the target node")
       rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
       result = self.rpc.call_blockdev_rename(tgt_node, rlist)
-      result.Raise()
-      if not result.data:
-        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
+      msg = result.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
+                                 (tgt_node, msg))
 
       for old, new in zip(old_lvs, new_lvs):
         new.logical_id = old.logical_id
@@ -5143,13 +5360,14 @@ class LUReplaceDisks(LogicalUnit):
       # now that the new lvs have the old name, we can add them to the device
       info("adding new mirror component on %s" % tgt_node)
       result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
-      if result.failed or not result.data:
+      msg = result.RemoteFailMsg()
+      if msg:
         for new_lv in new_lvs:
           msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
           if msg:
             warning("Can't rollback device %s: %s", dev, msg,
                     hint="cleanup manually the unused logical volumes")
-        raise errors.OpExecError("Can't add local storage to drbd")
+        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
 
       dev.children = new_lvs
       cfg.Update(instance)
@@ -5228,7 +5446,10 @@ class LUReplaceDisks(LogicalUnit):
     results = self.rpc.call_vg_list([pri_node, new_node])
     for node in pri_node, new_node:
       res = results[node]
-      if res.failed or not res.data or my_vg not in res.data:
+      msg = res.RemoteFailMsg()
+      if msg:
+        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
+      if my_vg not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on %s" %
                                  (my_vg, node))
     for idx, dev in enumerate(instance.disks):
@@ -5458,10 +5679,11 @@ class LUGrowDisk(LogicalUnit):
                                        instance.hypervisor)
     for node in nodenames:
       info = nodeinfo[node]
-      if info.failed or not info.data:
+      msg = info.RemoteFailMsg()
+      if msg:
         raise errors.OpPrereqError("Cannot get current information"
-                                   " from node '%s'" % node)
-      vg_free = info.data.get('vg_free', None)
+                                   " from node %s:" % (node, msg))
+      vg_free = info.payload.get('vg_free', None)
       if not isinstance(vg_free, int):
         raise errors.OpPrereqError("Can't compute free disk space on"
                                    " node %s" % node)
@@ -5608,8 +5830,11 @@ class LUQueryInstanceData(NoHooksLU):
         remote_info = self.rpc.call_instance_info(instance.primary_node,
                                                   instance.name,
                                                   instance.hypervisor)
-        remote_info.Raise()
-        remote_info = remote_info.data
+        msg = remote_info.RemoteFailMsg()
+        if msg:
+          raise errors.OpExecError("Error checking node %s: %s" %
+                                   (instance.primary_node, msg))
+        remote_info = remote_info.payload
         if remote_info and "state" in remote_info:
           remote_state = "up"
         else:
@@ -5724,10 +5949,16 @@ class LUSetInstanceParams(LogicalUnit):
           if not utils.IsValidIP(nic_ip):
             raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
 
+      nic_bridge = nic_dict.get('bridge', None)
+      nic_link = nic_dict.get('link', None)
+      if nic_bridge and nic_link:
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
+      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
+        nic_dict['bridge'] = None
+      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
+        nic_dict['link'] = None
+
       if nic_op == constants.DDM_ADD:
-        nic_bridge = nic_dict.get('bridge', None)
-        if nic_bridge is None:
-          nic_dict['bridge'] = self.cfg.GetDefBridge()
         nic_mac = nic_dict.get('mac', None)
         if nic_mac is None:
           nic_dict['mac'] = constants.VALUE_AUTO
@@ -5770,6 +6001,7 @@ class LUSetInstanceParams(LogicalUnit):
     if self.op.nics:
       args['nics'] = []
       nic_override = dict(self.op.nics)
+      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
       for idx, nic in enumerate(self.instance.nics):
         if idx in nic_override:
           this_nic_override = nic_override[idx]
@@ -5779,20 +6011,24 @@ class LUSetInstanceParams(LogicalUnit):
           ip = this_nic_override['ip']
         else:
           ip = nic.ip
-        if 'bridge' in this_nic_override:
-          bridge = this_nic_override['bridge']
-        else:
-          bridge = nic.bridge
         if 'mac' in this_nic_override:
           mac = this_nic_override['mac']
         else:
           mac = nic.mac
-        args['nics'].append((ip, bridge, mac))
+        if idx in self.nic_pnew:
+          nicparams = self.nic_pnew[idx]
+        else:
+          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
+        mode = nicparams[constants.NIC_MODE]
+        link = nicparams[constants.NIC_LINK]
+        args['nics'].append((ip, mac, mode, link))
       if constants.DDM_ADD in nic_override:
         ip = nic_override[constants.DDM_ADD].get('ip', None)
-        bridge = nic_override[constants.DDM_ADD]['bridge']
         mac = nic_override[constants.DDM_ADD]['mac']
-        args['nics'].append((ip, bridge, mac))
+        nicparams = self.nic_pnew[constants.DDM_ADD]
+        mode = nicparams[constants.NIC_MODE]
+        link = nicparams[constants.NIC_LINK]
+        args['nics'].append((ip, mac, mode, link))
       elif constants.DDM_REMOVE in nic_override:
         del args['nics'][-1]
 
@@ -5800,6 +6036,38 @@ class LUSetInstanceParams(LogicalUnit):
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return env, nl, nl
 
+  def _GetUpdatedParams(self, old_params, update_dict,
+                        default_values, parameter_types):
+    """Return the new params dict for the given params.
+
+    @type old_params: dict
+    @type old_params: old parameters
+    @type update_dict: dict
+    @type update_dict: dict containing new parameter values,
+                       or constants.VALUE_DEFAULT to reset the
+                       parameter to its default value
+    @type default_values: dict
+    @param default_values: default values for the filled parameters
+    @type parameter_types: dict
+    @param parameter_types: dict mapping target dict keys to types
+                            in constants.ENFORCEABLE_TYPES
+    @rtype: (dict, dict)
+    @return: (new_parameters, filled_parameters)
+
+    """
+    params_copy = copy.deepcopy(old_params)
+    for key, val in update_dict.iteritems():
+      if val == constants.VALUE_DEFAULT:
+        try:
+          del params_copy[key]
+        except KeyError:
+          pass
+      else:
+        params_copy[key] = val
+    utils.ForceDictType(params_copy, parameter_types)
+    params_filled = objects.FillDict(default_values, params_copy)
+    return (params_copy, params_filled)
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -5811,6 +6079,7 @@ class LUSetInstanceParams(LogicalUnit):
     # checking the new params on the primary/secondary nodes
 
     instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+    cluster = self.cluster = self.cfg.GetClusterInfo()
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     pnode = instance.primary_node
@@ -5818,19 +6087,10 @@ class LUSetInstanceParams(LogicalUnit):
 
     # hvparams processing
     if self.op.hvparams:
-      i_hvdict = copy.deepcopy(instance.hvparams)
-      for key, val in self.op.hvparams.iteritems():
-        if val == constants.VALUE_DEFAULT:
-          try:
-            del i_hvdict[key]
-          except KeyError:
-            pass
-        else:
-          i_hvdict[key] = val
-      cluster = self.cfg.GetClusterInfo()
-      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
-      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
-                                i_hvdict)
+      i_hvdict, hv_new = self._GetUpdatedParams(
+                             instance.hvparams, self.op.hvparams,
+                             cluster.hvparams[instance.hypervisor],
+                             constants.HVS_PARAMETER_TYPES)
       # local check
       hypervisor.GetHypervisor(
         instance.hypervisor).CheckParameterSyntax(hv_new)
@@ -5842,19 +6102,10 @@ class LUSetInstanceParams(LogicalUnit):
 
     # beparams processing
     if self.op.beparams:
-      i_bedict = copy.deepcopy(instance.beparams)
-      for key, val in self.op.beparams.iteritems():
-        if val == constants.VALUE_DEFAULT:
-          try:
-            del i_bedict[key]
-          except KeyError:
-            pass
-        else:
-          i_bedict[key] = val
-      cluster = self.cfg.GetClusterInfo()
-      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
-      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
-                                i_bedict)
+      i_bedict, be_new = self._GetUpdatedParams(
+                             instance.beparams, self.op.beparams,
+                             cluster.beparams[constants.PP_DEFAULT],
+                             constants.BES_PARAMETER_TYPES)
       self.be_new = be_new # the new actual values
       self.be_inst = i_bedict # the new dict (without defaults)
     else:
@@ -5871,35 +6122,51 @@ class LUSetInstanceParams(LogicalUnit):
                                                   instance.hypervisor)
       nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
                                          instance.hypervisor)
-      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
+      pninfo = nodeinfo[pnode]
+      msg = pninfo.RemoteFailMsg()
+      if msg:
         # Assume the primary node is unreachable and go ahead
-        self.warn.append("Can't get info from primary node %s" % pnode)
+        self.warn.append("Can't get info from primary node %s: %s" %
+                         (pnode,  msg))
+      elif not isinstance(pninfo.payload.get('memory_free', None), int):
+        self.warn.append("Node data from primary node %s doesn't contain"
+                         " free memory information" % pnode)
+      elif instance_info.RemoteFailMsg():
+        self.warn.append("Can't get instance runtime information: %s" %
+                        instance_info.RemoteFailMsg())
       else:
-        if not instance_info.failed and instance_info.data:
-          current_mem = instance_info.data['memory']
+        if instance_info.payload:
+          current_mem = int(instance_info.payload['memory'])
         else:
           # Assume instance not running
           # (there is a slight race condition here, but it's not very probable,
           # and we have no other way to check)
           current_mem = 0
         miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
-                    nodeinfo[pnode].data['memory_free'])
+                    pninfo.payload['memory_free'])
         if miss_mem > 0:
           raise errors.OpPrereqError("This change will prevent the instance"
                                      " from starting, due to %d MB of memory"
                                      " missing on its primary node" % miss_mem)
 
       if be_new[constants.BE_AUTO_BALANCE]:
-        for node, nres in nodeinfo.iteritems():
+        for node, nres in nodeinfo.items():
           if node not in instance.secondary_nodes:
             continue
-          if nres.failed or not isinstance(nres.data, dict):
-            self.warn.append("Can't get info from secondary node %s" % node)
-          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
+          msg = nres.RemoteFailMsg()
+          if msg:
+            self.warn.append("Can't get info from secondary node %s: %s" %
+                             (node, msg))
+          elif not isinstance(nres.payload.get('memory_free', None), int):
+            self.warn.append("Secondary node %s didn't return free"
+                             " memory information" % node)
+          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
             self.warn.append("Not enough memory to failover instance to"
                              " secondary node %s" % node)
 
     # NIC processing
+    self.nic_pnew = {}
+    self.nic_pinst = {}
     for nic_op, nic_dict in self.op.nics:
       if nic_op == constants.DDM_REMOVE:
         if not instance.nics:
@@ -5911,17 +6178,46 @@ class LUSetInstanceParams(LogicalUnit):
           raise errors.OpPrereqError("Invalid NIC index %s, valid values"
                                      " are 0 to %d" %
                                      (nic_op, len(instance.nics)))
+        old_nic_params = instance.nics[nic_op].nicparams
+        old_nic_ip = instance.nics[nic_op].ip
+      else:
+        old_nic_params = {}
+        old_nic_ip = None
+
+      update_params_dict = dict([(key, nic_dict[key])
+                                 for key in constants.NICS_PARAMETERS
+                                 if key in nic_dict])
+
       if 'bridge' in nic_dict:
-        nic_bridge = nic_dict['bridge']
-        if nic_bridge is None:
-          raise errors.OpPrereqError('Cannot set the nic bridge to None')
-        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
-          msg = ("Bridge '%s' doesn't exist on one of"
-                 " the instance nodes" % nic_bridge)
+        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
+
+      new_nic_params, new_filled_nic_params = \
+          self._GetUpdatedParams(old_nic_params, update_params_dict,
+                                 cluster.nicparams[constants.PP_DEFAULT],
+                                 constants.NICS_PARAMETER_TYPES)
+      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
+      self.nic_pinst[nic_op] = new_nic_params
+      self.nic_pnew[nic_op] = new_filled_nic_params
+      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
+
+      if new_nic_mode == constants.NIC_MODE_BRIDGED:
+        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
+        result = self.rpc.call_bridges_exist(pnode, [nic_bridge])
+        msg = result.RemoteFailMsg()
+        if msg:
+          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
           if self.force:
             self.warn.append(msg)
           else:
             raise errors.OpPrereqError(msg)
+      if new_nic_mode == constants.NIC_MODE_ROUTED:
+        if 'ip' in nic_dict:
+          nic_ip = nic_dict['ip']
+        else:
+          nic_ip = old_nic_ip
+        if nic_ip is None:
+          raise errors.OpPrereqError('Cannot set the nic ip to None'
+                                     ' on a routed nic')
       if 'mac' in nic_dict:
         nic_mac = nic_dict['mac']
         if nic_mac is None:
@@ -5946,9 +6242,11 @@ class LUSetInstanceParams(LogicalUnit):
                                      " an instance")
         ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
         ins_l = ins_l[pnode]
-        if ins_l.failed or not isinstance(ins_l.data, list):
-          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
-        if instance.name in ins_l.data:
+        msg = ins_l.RemoteFailMsg()
+        if msg:
+          raise errors.OpPrereqError("Can't contact node %s: %s" %
+                                     (pnode, msg))
+        if instance.name in ins_l.payload:
           raise errors.OpPrereqError("Instance is running, can't remove"
                                      " disks.")
 
@@ -5978,6 +6276,7 @@ class LUSetInstanceParams(LogicalUnit):
 
     result = []
     instance = self.instance
+    cluster = self.cluster
     # disk changes
     for disk_op, disk_dict in self.op.disks:
       if disk_op == constants.DDM_REMOVE:
@@ -6038,19 +6337,24 @@ class LUSetInstanceParams(LogicalUnit):
       elif nic_op == constants.DDM_ADD:
         # mac and bridge should be set, by now
         mac = nic_dict['mac']
-        bridge = nic_dict['bridge']
-        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
-                              bridge=bridge)
+        ip = nic_dict.get('ip', None)
+        nicparams = self.nic_pinst[constants.DDM_ADD]
+        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
         instance.nics.append(new_nic)
         result.append(("nic.%d" % (len(instance.nics) - 1),
-                       "add:mac=%s,ip=%s,bridge=%s" %
-                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
+                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
+                       (new_nic.mac, new_nic.ip,
+                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
+                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
+                       )))
       else:
-        # change a given nic
-        for key in 'mac', 'ip', 'bridge':
+        for key in 'mac', 'ip':
           if key in nic_dict:
             setattr(instance.nics[nic_op], key, nic_dict[key])
-            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
+        if nic_op in self.nic_pnew:
+          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
+        for key, val in nic_dict.iteritems():
+          result.append(("nic.%s/%d" % (key, nic_op), val))
 
     # hvparams changes
     if self.op.hvparams:
@@ -6103,10 +6407,10 @@ class LUQueryExports(NoHooksLU):
     rpcresult = self.rpc.call_export_list(self.nodes)
     result = {}
     for node in rpcresult:
-      if rpcresult[node].failed:
+      if rpcresult[node].RemoteFailMsg():
         result[node] = False
       else:
-        result[node] = rpcresult[node].data
+        result[node] = rpcresult[node].payload
 
     return result
 
@@ -6205,22 +6509,23 @@ class LUExportInstance(LogicalUnit):
 
     try:
       for disk in instance.disks:
-        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
-        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
-        if new_dev_name.failed or not new_dev_name.data:
-          self.LogWarning("Could not snapshot block device %s on node %s",
-                          disk.logical_id[1], src_node)
+        # result.payload will be a snapshot of an lvm leaf of the one we passed
+        result = self.rpc.call_blockdev_snapshot(src_node, disk)
+        msg = result.RemoteFailMsg()
+        if msg:
+          self.LogWarning("Could not snapshot block device %s on node %s: %s",
+                          disk.logical_id[1], src_node, msg)
           snap_disks.append(False)
         else:
+          disk_id = (vgname, result.payload)
           new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
-                                 logical_id=(vgname, new_dev_name.data),
-                                 physical_id=(vgname, new_dev_name.data),
+                                 logical_id=disk_id, physical_id=disk_id,
                                  iv_name=disk.iv_name)
           snap_disks.append(new_dev)
 
     finally:
       if self.op.shutdown and instance.admin_up:
-        result = self.rpc.call_instance_start(src_node, instance)
+        result = self.rpc.call_instance_start(src_node, instance, None, None)
         msg = result.RemoteFailMsg()
         if msg:
           _ShutdownInstanceDisks(self, instance)
@@ -6233,19 +6538,21 @@ class LUExportInstance(LogicalUnit):
       if dev:
         result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
                                                instance, cluster_name, idx)
-        if result.failed or not result.data:
+        msg = result.RemoteFailMsg()
+        if msg:
           self.LogWarning("Could not export block device %s from node %s to"
-                          " node %s", dev.logical_id[1], src_node,
-                          dst_node.name)
+                          " node %s: %s", dev.logical_id[1], src_node,
+                          dst_node.name, msg)
         msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
         if msg:
           self.LogWarning("Could not remove snapshot block device %s from node"
                           " %s: %s", dev.logical_id[1], src_node, msg)
 
     result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
-    if result.failed or not result.data:
-      self.LogWarning("Could not finalize export for instance %s on node %s",
-                      instance.name, dst_node.name)
+    msg = result.RemoteFailMsg()
+    if msg:
+      self.LogWarning("Could not finalize export for instance %s"
+                      " on node %s: %s", instance.name, dst_node.name, msg)
 
     nodelist = self.cfg.GetNodeList()
     nodelist.remove(dst_node.name)
@@ -6253,15 +6560,17 @@ class LUExportInstance(LogicalUnit):
     # on one-node clusters nodelist will be empty after the removal
     # if we proceed the backup would be removed because OpQueryExports
     # substitutes an empty list with the full cluster node list.
+    iname = instance.name
     if nodelist:
       exportlist = self.rpc.call_export_list(nodelist)
       for node in exportlist:
-        if exportlist[node].failed:
+        if exportlist[node].RemoteFailMsg():
           continue
-        if instance.name in exportlist[node].data:
-          if not self.rpc.call_export_remove(node, instance.name):
+        if iname in exportlist[node].payload:
+          msg = self.rpc.call_export_remove(node, iname).RemoteFailMsg()
+          if msg:
             self.LogWarning("Could not remove older export for instance %s"
-                            " on node %s", instance.name, node)
+                            " on node %s: %s", iname, node, msg)
 
 
 class LURemoveExport(NoHooksLU):
@@ -6295,19 +6604,21 @@ class LURemoveExport(NoHooksLU):
       fqdn_warn = True
       instance_name = self.op.instance_name
 
-    exportlist = self.rpc.call_export_list(self.acquired_locks[
-      locking.LEVEL_NODE])
+    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+    exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
     for node in exportlist:
-      if exportlist[node].failed:
-        self.LogWarning("Failed to query node %s, continuing" % node)
+      msg = exportlist[node].RemoteFailMsg()
+      if msg:
+        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
         continue
-      if instance_name in exportlist[node].data:
+      if instance_name in exportlist[node].payload:
         found = True
         result = self.rpc.call_export_remove(node, instance_name)
-        if result.failed or not result.data:
+        msg = result.RemoteFailMsg()
+        if msg:
           logging.error("Could not remove export for instance %s"
-                        " on node %s", instance_name, node)
+                        " on node %s: %s", instance_name, node, msg)
 
     if fqdn_warn and not found:
       feedback_fn("Export not found. If trying to remove an export belonging"
@@ -6627,29 +6938,33 @@ class IAllocator(object):
         }
 
       if not ninfo.offline:
-        nresult.Raise()
-        if not isinstance(nresult.data, dict):
-          raise errors.OpExecError("Can't get data for node %s" % nname)
-        remote_info = nresult.data
+        msg = nresult.RemoteFailMsg()
+        if msg:
+          raise errors.OpExecError("Can't get data for node %s: %s" %
+                                   (nname, msg))
+        msg = node_iinfo[nname].RemoteFailMsg()
+        if msg:
+          raise errors.OpExecError("Can't get node instance info"
+                                   " from node %s: %s" % (nname, msg))
+        remote_info = nresult.payload
         for attr in ['memory_total', 'memory_free', 'memory_dom0',
                      'vg_size', 'vg_free', 'cpu_total']:
           if attr not in remote_info:
             raise errors.OpExecError("Node '%s' didn't return attribute"
                                      " '%s'" % (nname, attr))
-          try:
-            remote_info[attr] = int(remote_info[attr])
-          except ValueError, err:
+          if not isinstance(remote_info[attr], int):
             raise errors.OpExecError("Node '%s' returned invalid value"
-                                     " for '%s': %s" % (nname, attr, err))
+                                     " for '%s': %s" %
+                                     (nname, attr, remote_info[attr]))
         # compute memory used by primary instances
         i_p_mem = i_p_up_mem = 0
         for iinfo, beinfo in i_list:
           if iinfo.primary_node == nname:
             i_p_mem += beinfo[constants.BE_MEMORY]
-            if iinfo.name not in node_iinfo[nname].data:
+            if iinfo.name not in node_iinfo[nname].payload:
               i_used_mem = 0
             else:
-              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
+              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
             i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
             remote_info['memory_free'] -= max(0, i_mem_diff)
 
@@ -6675,8 +6990,19 @@ class IAllocator(object):
     # instance data
     instance_data = {}
     for iinfo, beinfo in i_list:
-      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
-                  for n in iinfo.nics]
+      nic_data = []
+      for nic in iinfo.nics:
+        filled_params = objects.FillDict(
+            cluster_info.nicparams[constants.PP_DEFAULT],
+            nic.nicparams)
+        nic_dict = {"mac": nic.mac,
+                    "ip": nic.ip,
+                    "mode": filled_params[constants.NIC_MODE],
+                    "link": filled_params[constants.NIC_LINK],
+                   }
+        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
+        nic_data.append(nic_dict)
       pir = {
         "tags": list(iinfo.GetTags()),
         "admin_up": iinfo.admin_up,
@@ -6689,6 +7015,8 @@ class IAllocator(object):
         "disk_template": iinfo.disk_template,
         "hypervisor": iinfo.hypervisor,
         }
+      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
+                                                 pir["disks"])
       instance_data[iinfo.name] = pir
 
     data["instances"] = instance_data