Export extractExTags and updateExclTags
[ganeti-local] / lib / rpc.py
index 74c924f..c934ba2 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # if they need to start using instance attributes
 # R0904: Too many public methods
 
-import os
 import logging
 import zlib
 import base64
 import pycurl
 import threading
+import copy
 
 from ganeti import utils
 from ganeti import objects
@@ -48,6 +48,8 @@ from ganeti import ssconf
 from ganeti import runtime
 from ganeti import compat
 from ganeti import rpc_defs
+from ganeti import pathutils
+from ganeti import vcluster
 
 # Special module generated at build time
 from ganeti import _generated_rpc
@@ -56,22 +58,11 @@ from ganeti import _generated_rpc
 import ganeti.http.client  # pylint: disable=W0611
 
 
-# Timeout for connecting to nodes (seconds)
-_RPC_CONNECT_TIMEOUT = 5
-
 _RPC_CLIENT_HEADERS = [
   "Content-type: %s" % http.HTTP_APP_JSON,
   "Expect:",
   ]
 
-# Various time constants for the timeout table
-_TMO_URGENT = 60 # one minute
-_TMO_FAST = 5 * 60 # five minutes
-_TMO_NORMAL = 15 * 60 # 15 minutes
-_TMO_SLOW = 3600 # one hour
-_TMO_4HRS = 4 * 3600
-_TMO_1DAY = 86400
-
 #: Special value to describe an offline host
 _OFFLINE = object()
 
@@ -105,7 +96,7 @@ def Shutdown():
 
 
 def _ConfigRpcCurl(curl):
-  noded_cert = str(constants.NODED_CERT_FILE)
+  noded_cert = str(pathutils.NODED_CERT_FILE)
 
   curl.setopt(pycurl.FOLLOWLOCATION, False)
   curl.setopt(pycurl.CAINFO, noded_cert)
@@ -115,7 +106,7 @@ def _ConfigRpcCurl(curl):
   curl.setopt(pycurl.SSLCERT, noded_cert)
   curl.setopt(pycurl.SSLKEYTYPE, "PEM")
   curl.setopt(pycurl.SSLKEY, noded_cert)
-  curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
+  curl.setopt(pycurl.CONNECTTIMEOUT, constants.RPC_CONNECT_TIMEOUT)
 
 
 def RunWithRPC(fn):
@@ -159,7 +150,7 @@ class RpcResult(object):
   """RPC Result class.
 
   This class holds an RPC result. It is needed since in multi-node
-  calls we can't raise an exception just because one one out of many
+  calls we can't raise an exception just because one out of many
   failed, and therefore we use this class to encapsulate the result.
 
   @ivar data: the data payload, for successful results, or None
@@ -239,12 +230,27 @@ class RpcResult(object):
       args = (msg, )
     raise ec(*args) # pylint: disable=W0142
 
+  def Warn(self, msg, feedback_fn):
+    """If the result has failed, call the feedback_fn.
+
+    This is used to in cases were LU wants to warn the
+    user about a failure, but continue anyway.
+
+    """
+    if not self.fail_msg:
+      return
+
+    msg = "%s: %s" % (msg, self.fail_msg)
+    feedback_fn(msg)
+
 
-def _SsconfResolver(node_list,
+def _SsconfResolver(ssconf_ips, node_list, _,
                     ssc=ssconf.SimpleStore,
                     nslookup_fn=netutils.Hostname.GetIP):
   """Return addresses for given node names.
 
+  @type ssconf_ips: bool
+  @param ssconf_ips: Use the ssconf IPs
   @type node_list: list
   @param node_list: List of node names
   @type ssc: class
@@ -256,16 +262,20 @@ def _SsconfResolver(node_list,
 
   """
   ss = ssc()
-  iplist = ss.GetNodePrimaryIPList()
   family = ss.GetPrimaryIPFamily()
-  ipmap = dict(entry.split() for entry in iplist)
+
+  if ssconf_ips:
+    iplist = ss.GetNodePrimaryIPList()
+    ipmap = dict(entry.split() for entry in iplist)
+  else:
+    ipmap = {}
 
   result = []
   for node in node_list:
     ip = ipmap.get(node)
     if ip is None:
       ip = nslookup_fn(node, family=family)
-    result.append((node, ip))
+    result.append((node, ip, node))
 
   return result
 
@@ -277,54 +287,65 @@ class _StaticResolver:
     """
     self._addresses = addresses
 
-  def __call__(self, hosts):
+  def __call__(self, hosts, _):
     """Returns static addresses for hosts.
 
     """
     assert len(hosts) == len(self._addresses)
-    return zip(hosts, self._addresses)
+    return zip(hosts, self._addresses, hosts)
 
 
-def _CheckConfigNode(name, node):
+def _CheckConfigNode(node_uuid_or_name, node, accept_offline_node):
   """Checks if a node is online.
 
-  @type name: string
-  @param name: Node name
+  @type node_uuid_or_name: string
+  @param node_uuid_or_name: Node UUID
   @type node: L{objects.Node} or None
   @param node: Node object
 
   """
   if node is None:
-    # Depend on DNS for name resolution
-    ip = name
-  elif node.offline:
-    ip = _OFFLINE
+    # Assume that the passed parameter was actually a node name, so depend on
+    # DNS for name resolution
+    return (node_uuid_or_name, node_uuid_or_name, node_uuid_or_name)
   else:
-    ip = node.primary_ip
-  return (name, ip)
+    if node.offline and not accept_offline_node:
+      ip = _OFFLINE
+    else:
+      ip = node.primary_ip
+    return (node.name, ip, node_uuid_or_name)
 
 
-def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts):
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, node_uuids, opts):
   """Calculate node addresses using configuration.
 
+  Note that strings in node_uuids are treated as node names if the UUID is not
+  found in the configuration.
+
   """
+  accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
+
+  assert accept_offline_node or opts is None, "Unknown option"
+
   # Special case for single-host lookups
-  if len(hosts) == 1:
-    (name, ) = hosts
-    return [_CheckConfigNode(name, single_node_fn(name))]
+  if len(node_uuids) == 1:
+    (uuid, ) = node_uuids
+    return [_CheckConfigNode(uuid, single_node_fn(uuid), accept_offline_node)]
   else:
     all_nodes = all_nodes_fn()
-    return [_CheckConfigNode(name, all_nodes.get(name, None))
-            for name in hosts]
+    return [_CheckConfigNode(uuid, all_nodes.get(uuid, None),
+                             accept_offline_node)
+            for uuid in node_uuids]
 
 
 class _RpcProcessor:
   def __init__(self, resolver, port, lock_monitor_cb=None):
     """Initializes this class.
 
-    @param resolver: callable accepting a list of hostnames, returning a list
-      of tuples containing name and IP address (IP address can be the name or
-      the special value L{_OFFLINE} to mark offline machines)
+    @param resolver: callable accepting a list of node UUIDs or hostnames,
+      returning a list of tuples containing name, IP address and original name
+      of the resolved node. IP address can be the name or the special value
+      L{_OFFLINE} to mark offline machines.
     @type port: int
     @param port: TCP port
     @param lock_monitor_cb: Callable for registering with lock monitor
@@ -338,20 +359,31 @@ class _RpcProcessor:
   def _PrepareRequests(hosts, port, procedure, body, read_timeout):
     """Prepares requests by sorting offline hosts into separate list.
 
+    @type body: dict
+    @param body: a dictionary with per-host body data
+
     """
     results = {}
     requests = {}
 
-    for (name, ip) in hosts:
+    assert isinstance(body, dict)
+    assert len(body) == len(hosts)
+    assert compat.all(isinstance(v, str) for v in body.values())
+    assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
+        "%s != %s" % (hosts, body.keys())
+
+    for (name, ip, original_name) in hosts:
       if ip is _OFFLINE:
         # Node is marked as offline
-        results[name] = RpcResult(node=name, offline=True, call=procedure)
+        results[original_name] = RpcResult(node=name,
+                                           offline=True,
+                                           call=procedure)
       else:
-        requests[name] = \
+        requests[original_name] = \
           http.client.HttpClientRequest(str(ip), port,
-                                        http.HTTP_PUT, str("/%s" % procedure),
+                                        http.HTTP_POST, str("/%s" % procedure),
                                         headers=_RPC_CLIENT_HEADERS,
-                                        post_data=body,
+                                        post_data=body[original_name],
                                         read_timeout=read_timeout,
                                         nicename="%s/%s" % (name, procedure),
                                         curl_config_fn=_ConfigRpcCurl)
@@ -382,26 +414,31 @@ class _RpcProcessor:
 
     return results
 
-  def __call__(self, hosts, procedure, body, read_timeout=None,
-               _req_process_fn=http.client.ProcessRequests):
+  def __call__(self, nodes, procedure, body, read_timeout, resolver_opts,
+               _req_process_fn=None):
     """Makes an RPC request to a number of nodes.
 
-    @type hosts: sequence
-    @param hosts: Hostnames
+    @type nodes: sequence
+    @param nodes: node UUIDs or Hostnames
     @type procedure: string
     @param procedure: Request path
-    @type body: string
-    @param body: Request body
+    @type body: dictionary
+    @param body: dictionary with request bodies per host
     @type read_timeout: int or None
     @param read_timeout: Read timeout for request
+    @rtype: dictionary
+    @return: a dictionary mapping host names to rpc.RpcResult objects
 
     """
     assert read_timeout is not None, \
       "Missing RPC read timeout for procedure '%s'" % procedure
 
+    if _req_process_fn is None:
+      _req_process_fn = http.client.ProcessRequests
+
     (results, requests) = \
-      self._PrepareRequests(self._resolver(hosts), self._port, procedure,
-                            str(body), read_timeout)
+      self._PrepareRequests(self._resolver(nodes, resolver_opts), self._port,
+                            procedure, body, read_timeout)
 
     _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
 
@@ -411,13 +448,15 @@ class _RpcProcessor:
 
 
 class _RpcClientBase:
-  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None):
+  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
+               _req_process_fn=None):
     """Initializes this class.
 
     """
-    self._proc = _RpcProcessor(resolver,
-                               netutils.GetDaemonPort(constants.NODED),
-                               lock_monitor_cb=lock_monitor_cb)
+    proc = _RpcProcessor(resolver,
+                         netutils.GetDaemonPort(constants.NODED),
+                         lock_monitor_cb=lock_monitor_cb)
+    self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
     self._encoder = compat.partial(self._EncodeArg, encoder_fn)
 
   @staticmethod
@@ -430,17 +469,47 @@ class _RpcClientBase:
     else:
       return encoder_fn(argkind)(value)
 
-  def _Call(self, cdef, node_list, timeout, args):
+  def _Call(self, cdef, node_list, args):
     """Entry point for automatically generated RPC wrappers.
 
     """
-    (procedure, _, _, argdefs, _, _) = cdef
+    (procedure, _, resolver_opts, timeout, argdefs,
+     prep_fn, postproc_fn, _) = cdef
+
+    if callable(timeout):
+      read_timeout = timeout(args)
+    else:
+      read_timeout = timeout
+
+    if callable(resolver_opts):
+      req_resolver_opts = resolver_opts(args)
+    else:
+      req_resolver_opts = resolver_opts
 
-    body = serializer.DumpJson(map(self._encoder,
-                                   zip(map(compat.snd, argdefs), args)),
-                               indent=False)
+    if len(args) != len(argdefs):
+      raise errors.ProgrammerError("Number of passed arguments doesn't match")
 
-    return self._proc(node_list, procedure, body, read_timeout=timeout)
+    enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
+    if prep_fn is None:
+      # for a no-op prep_fn, we serialise the body once, and then we
+      # reuse it in the dictionary values
+      body = serializer.DumpJson(enc_args)
+      pnbody = dict((n, body) for n in node_list)
+    else:
+      # for a custom prep_fn, we pass the encoded arguments and the
+      # node name to the prep_fn, and we serialise its return value
+      assert callable(prep_fn)
+      pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
+                    for n in node_list)
+
+    result = self._proc(node_list, procedure, pnbody, read_timeout,
+                        req_resolver_opts)
+
+    if postproc_fn:
+      return dict(map(lambda (key, value): (key, postproc_fn(value)),
+                      result.items()))
+    else:
+      return result
 
 
 def _ObjectToDict(value):
@@ -467,14 +536,22 @@ def _EncodeNodeToDiskDict(value):
               for name, disks in value.items())
 
 
-def _PrepareFileUpload(filename):
+def _PrepareFileUpload(getents_fn, filename):
   """Loads a file and prepares it for an upload to nodes.
 
   """
-  data = _Compress(utils.ReadFile(filename))
-  st = os.stat(filename)
-  getents = runtime.GetEnts()
-  return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
+  statcb = utils.FileStatHelper()
+  data = _Compress(utils.ReadFile(filename, preread=statcb))
+  st = statcb.st
+
+  if getents_fn is None:
+    getents_fn = runtime.GetEnts
+
+  getents = getents_fn()
+
+  virt_filename = vcluster.MakeVirtualPath(filename)
+
+  return [virt_filename, data, st.st_mode, getents.LookupUid(st.st_uid),
           getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
 
 
@@ -515,12 +592,200 @@ def _EncodeBlockdevRename(value):
   return [(d.ToDict(), uid) for d, uid in value]
 
 
+def _AddSpindlesToLegacyNodeInfo(result, space_info):
+  """Extracts the spindle information from the space info and adds
+  it to the result dictionary.
+
+  @type result: dict of strings
+  @param result: dictionary holding the result of the legacy node info
+  @type space_info: list of dicts of strings
+  @param space_info: list, each row holding space information of one storage
+    unit
+  @rtype: None
+  @return: does not return anything, manipulates the C{result} variable
+
+  """
+  lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
+      space_info, constants.ST_LVM_PV)
+  if lvm_pv_info:
+    result["spindles_free"] = lvm_pv_info["storage_free"]
+    result["spindles_total"] = lvm_pv_info["storage_size"]
+  else:
+    raise errors.OpExecError("No spindle storage information available.")
+
+
+def _AddDefaultStorageInfoToLegacyNodeInfo(result, space_info):
+  """Extracts the storage space information of the default storage type from
+  the space info and adds it to the result dictionary.
+
+  @see: C{_AddSpindlesToLegacyNodeInfo} for parameter information.
+
+  """
+  # Check if there is at least one row for non-spindle storage info.
+  no_defaults = (len(space_info) < 1) or \
+      (space_info[0]["type"] == constants.ST_LVM_PV and len(space_info) == 1)
+
+  default_space_info = None
+  if no_defaults:
+    logging.warning("No storage info provided for default storage type.")
+  else:
+    default_space_info = space_info[0]
+
+  if default_space_info:
+    result["name"] = default_space_info["name"]
+    result["storage_free"] = default_space_info["storage_free"]
+    result["storage_size"] = default_space_info["storage_size"]
+
+
+def MakeLegacyNodeInfo(data, require_spindles=False):
+  """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
+
+  Converts the data into a single dictionary. This is fine for most use cases,
+  but some require information from more than one volume group or hypervisor.
+
+  @param require_spindles: add spindle storage information to the legacy node
+      info
+
+  """
+  (bootid, space_info, (hv_info, )) = data
+
+  ret = utils.JoinDisjointDicts(hv_info, {"bootid": bootid})
+
+  if require_spindles:
+    _AddSpindlesToLegacyNodeInfo(ret, space_info)
+  _AddDefaultStorageInfoToLegacyNodeInfo(ret, space_info)
+
+  return ret
+
+
+def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)):
+  """Annotates just DRBD disks layouts.
+
+  """
+  assert disk.dev_type == constants.DT_DRBD8
+
+  disk.params = objects.FillDict(drbd_params, disk.params)
+  (dev_data, dev_meta) = disk.children
+  dev_data.params = objects.FillDict(data_params, dev_data.params)
+  dev_meta.params = objects.FillDict(meta_params, dev_meta.params)
+
+  return disk
+
+
+def _AnnotateDParamsGeneric(disk, (params, )):
+  """Generic disk parameter annotation routine.
+
+  """
+  assert disk.dev_type != constants.DT_DRBD8
+
+  disk.params = objects.FillDict(params, disk.params)
+
+  return disk
+
+
+def AnnotateDiskParams(template, disks, disk_params):
+  """Annotates the disk objects with the disk parameters.
+
+  @param template: The disk template used
+  @param disks: The list of disks objects to annotate
+  @param disk_params: The disk paramaters for annotation
+  @returns: A list of disk objects annotated
+
+  """
+  ld_params = objects.Disk.ComputeLDParams(template, disk_params)
+
+  if template == constants.DT_DRBD8:
+    annotation_fn = _AnnotateDParamsDRBD
+  elif template == constants.DT_DISKLESS:
+    annotation_fn = lambda disk, _: disk
+  else:
+    annotation_fn = _AnnotateDParamsGeneric
+
+  return [annotation_fn(disk.Copy(), ld_params) for disk in disks]
+
+
+def _GetExclusiveStorageFlag(cfg, node_uuid):
+  ni = cfg.GetNodeInfo(node_uuid)
+  if ni is None:
+    raise errors.OpPrereqError("Invalid node name %s" % node_uuid,
+                               errors.ECODE_NOENT)
+  return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE]
+
+
+def _AddExclusiveStorageFlagToLvmStorageUnits(storage_units, es_flag):
+  """Adds the exclusive storage flag to lvm units.
+
+  This function creates a copy of the storage_units lists, with the
+  es_flag being added to all lvm storage units.
+
+  @type storage_units: list of pairs (string, string)
+  @param storage_units: list of 'raw' storage units, consisting only of
+    (storage_type, storage_key)
+  @type es_flag: boolean
+  @param es_flag: exclusive storage flag
+  @rtype: list of tuples (string, string, list)
+  @return: list of storage units (storage_type, storage_key, params) with
+    the params containing the es_flag for lvm-vg storage units
+
+  """
+  result = []
+  for (storage_type, storage_key) in storage_units:
+    if storage_type in [constants.ST_LVM_VG, constants.ST_LVM_PV]:
+      result.append((storage_type, storage_key, [es_flag]))
+    else:
+      result.append((storage_type, storage_key, []))
+  return result
+
+
+def GetExclusiveStorageForNodes(cfg, node_uuids):
+  """Return the exclusive storage flag for all the given nodes.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type node_uuids: list or tuple
+  @param node_uuids: node UUIDs for which to read the flag
+  @rtype: dict
+  @return: mapping from node uuids to exclusive storage flags
+  @raise errors.OpPrereqError: if any given node name has no corresponding
+  node
+
+  """
+  getflag = lambda n: _GetExclusiveStorageFlag(cfg, n)
+  flags = map(getflag, node_uuids)
+  return dict(zip(node_uuids, flags))
+
+
+def PrepareStorageUnitsForNodes(cfg, storage_units, node_uuids):
+  """Return the lvm storage unit for all the given nodes.
+
+  Main purpose of this function is to map the exclusive storage flag, which
+  can be different for each node, to the default LVM storage unit.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type storage_units: list of pairs (string, string)
+  @param storage_units: list of 'raw' storage units, e.g. pairs of
+    (storage_type, storage_key)
+  @type node_uuids: list or tuple
+  @param node_uuids: node UUIDs for which to read the flag
+  @rtype: dict
+  @return: mapping from node uuids to a list of storage units which include
+    the exclusive storage flag for lvm storage
+  @raise errors.OpPrereqError: if any given node name has no corresponding
+  node
+
+  """
+  getunit = lambda n: _AddExclusiveStorageFlagToLvmStorageUnits(
+      storage_units, _GetExclusiveStorageFlag(cfg, n))
+  flags = map(getunit, node_uuids)
+  return dict(zip(node_uuids, flags))
+
+
 #: Generic encoders
 _ENCODERS = {
   rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
   rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
   rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
-  rpc_defs.ED_FILE_DETAILS: _PrepareFileUpload,
   rpc_defs.ED_COMPRESS: _Compress,
   rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
   rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
@@ -531,42 +796,68 @@ _ENCODERS = {
 class RpcRunner(_RpcClientBase,
                 _generated_rpc.RpcClientDefault,
                 _generated_rpc.RpcClientBootstrap,
+                _generated_rpc.RpcClientDnsOnly,
                 _generated_rpc.RpcClientConfig):
   """RPC runner class.
 
   """
-  def __init__(self, context):
+  def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
     """Initialized the RPC runner.
 
-    @type context: C{masterd.GanetiContext}
-    @param context: Ganeti context
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: Configuration
+    @type lock_monitor_cb: callable
+    @param lock_monitor_cb: Lock monitor callback
 
     """
-    self._cfg = context.cfg
+    self._cfg = cfg
 
     encoders = _ENCODERS.copy()
 
-    # Add encoders requiring configuration object
     encoders.update({
+      # Encoders requiring configuration object
       rpc_defs.ED_INST_DICT: self._InstDict,
-      rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep,
-      rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp,
+      rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
+      rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
+      rpc_defs.ED_NIC_DICT: self._NicDict,
+
+      # Encoders annotating disk parameters
+      rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
+      rpc_defs.ED_MULTI_DISKS_DICT_DP: self._MultiDiskDictDP,
+      rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
+
+      # Encoders with special requirements
+      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
       })
 
     # Resolver using configuration
-    resolver = compat.partial(_NodeConfigResolver, self._cfg.GetNodeInfo,
-                              self._cfg.GetAllNodesInfo)
+    resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
+                              cfg.GetAllNodesInfo)
 
     # Pylint doesn't recognize multiple inheritance properly, see
     # <http://www.logilab.org/ticket/36586> and
     # <http://www.logilab.org/ticket/35642>
     # pylint: disable=W0233
     _RpcClientBase.__init__(self, resolver, encoders.get,
-                            lock_monitor_cb=context.glm.AddToLockMonitor)
+                            lock_monitor_cb=lock_monitor_cb,
+                            _req_process_fn=_req_process_fn)
     _generated_rpc.RpcClientConfig.__init__(self)
     _generated_rpc.RpcClientBootstrap.__init__(self)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
     _generated_rpc.RpcClientDefault.__init__(self)
 
+  def _NicDict(self, nic):
+    """Convert the given nic to a dict and encapsulate netinfo
+
+    """
+    n = copy.deepcopy(nic)
+    if n.network:
+      net_uuid = self._cfg.LookupNetwork(n.network)
+      if net_uuid:
+        nobj = self._cfg.GetNetwork(net_uuid)
+        n.netinfo = objects.Network.ToDict(nobj)
+    return n.ToDict()
+
   def _InstDict(self, instance, hvp=None, bep=None, osp=None):
     """Convert the given instance to a dict.
 
@@ -597,96 +888,54 @@ class RpcRunner(_RpcClientBase,
     idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
     if osp is not None:
       idict["osparams"].update(osp)
+    idict["disks"] = self._DisksDictDP((instance.disks, instance))
     for nic in idict["nics"]:
-      nic['nicparams'] = objects.FillDict(
+      nic["nicparams"] = objects.FillDict(
         cluster.nicparams[constants.PP_DEFAULT],
-        nic['nicparams'])
+        nic["nicparams"])
+      network = nic.get("network", None)
+      if network:
+        net_uuid = self._cfg.LookupNetwork(network)
+        if net_uuid:
+          nobj = self._cfg.GetNetwork(net_uuid)
+          nic["netinfo"] = objects.Network.ToDict(nobj)
     return idict
 
-  def _InstDictHvpBep(self, (instance, hvp, bep)):
+  def _InstDictHvpBepDp(self, (instance, hvp, bep)):
     """Wrapper for L{_InstDict}.
 
     """
     return self._InstDict(instance, hvp=hvp, bep=bep)
 
-  def _InstDictOsp(self, (instance, osparams)):
+  def _InstDictOspDp(self, (instance, osparams)):
     """Wrapper for L{_InstDict}.
 
     """
     return self._InstDict(instance, osp=osparams)
 
-  @staticmethod
-  def _MigrationStatusPostProc(result):
-    if not result.fail_msg and result.payload is not None:
-      result.payload = objects.MigrationStatus.FromDict(result.payload)
-    return result
-
-  @staticmethod
-  def _BlockdevFindPostProc(result):
-    if not result.fail_msg and result.payload is not None:
-      result.payload = objects.BlockDevStatus.FromDict(result.payload)
-    return result
-
-  @staticmethod
-  def _BlockdevGetMirrorStatusPostProc(result):
-    if not result.fail_msg:
-      result.payload = [objects.BlockDevStatus.FromDict(i)
-                        for i in result.payload]
-    return result
-
-  @staticmethod
-  def _BlockdevGetMirrorStatusMultiPostProc(result):
-    for nres in result.values():
-      if nres.fail_msg:
-        continue
-
-      for idx, (success, status) in enumerate(nres.payload):
-        if success:
-          nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
-
-    return result
-
-  @staticmethod
-  def _OsGetPostProc(result):
-    if not result.fail_msg and isinstance(result.payload, dict):
-      result.payload = objects.OS.FromDict(result.payload)
-    return result
-
-  @staticmethod
-  def _ImpExpStatusPostProc(result):
-    """Post-processor for import/export status.
-
-    @rtype: Payload containing list of L{objects.ImportExportStatus} instances
-    @return: Returns a list of the state of each named import/export or None if
-             a status couldn't be retrieved
+  def _DisksDictDP(self, (disks, instance)):
+    """Wrapper for L{AnnotateDiskParams}.
 
     """
-    if not result.fail_msg:
-      decoded = []
-
-      for i in result.payload:
-        if i is None:
-          decoded.append(None)
-          continue
-        decoded.append(objects.ImportExportStatus.FromDict(i))
-
-      result.payload = decoded
-
-    return result
+    diskparams = self._cfg.GetInstanceDiskParams(instance)
+    return [disk.ToDict()
+            for disk in AnnotateDiskParams(instance.disk_template,
+                                           disks, diskparams)]
 
-  #
-  # Begin RPC calls
-  #
+  def _MultiDiskDictDP(self, disks_insts):
+    """Wrapper for L{AnnotateDiskParams}.
 
-  def call_test_delay(self, node_list, duration): # pylint: disable=W0221
-    """Sleep for a fixed time on given node(s).
+    Supports a list of (disk, instance) tuples.
+    """
+    return [disk for disk_inst in disks_insts
+            for disk in self._DisksDictDP(disk_inst)]
 
-    This is a multi-node call.
+  def _SingleDiskDictDP(self, (disk, instance)):
+    """Wrapper for L{AnnotateDiskParams}.
 
     """
-    # TODO: Use callable timeout calculation
-    return _generated_rpc.RpcClientDefault.call_test_delay(self,
-      node_list, duration, read_timeout=int(duration + 5))
+    (anno_disk,) = self._DisksDictDP(([disk], instance))
+    return anno_disk
 
 
 class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
@@ -698,7 +947,7 @@ class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
 
     """
     if address_list is None:
-      resolver = _SsconfResolver
+      resolver = compat.partial(_SsconfResolver, True)
     else:
       # Caller provided an address list
       resolver = _StaticResolver(address_list)
@@ -708,7 +957,9 @@ class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
     _generated_rpc.RpcClientJobQueue.__init__(self)
 
 
-class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
+class BootstrapRunner(_RpcClientBase,
+                      _generated_rpc.RpcClientBootstrap,
+                      _generated_rpc.RpcClientDnsOnly):
   """RPC wrappers for bootstrapping.
 
   """
@@ -716,15 +967,35 @@ class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
     """Initializes this class.
 
     """
-    _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get)
+    # Pylint doesn't recognize multiple inheritance properly, see
+    # <http://www.logilab.org/ticket/36586> and
+    # <http://www.logilab.org/ticket/35642>
+    # pylint: disable=W0233
+    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
+                            _ENCODERS.get)
     _generated_rpc.RpcClientBootstrap.__init__(self)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
+
+
+class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly):
+  """RPC wrappers for calls using only DNS.
+
+  """
+  def __init__(self):
+    """Initialize this class.
+
+    """
+    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
+                            _ENCODERS.get)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
 
 
 class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
   """RPC wrappers for L{config}.
 
   """
-  def __init__(self, context, address_list):
+  def __init__(self, context, address_list, _req_process_fn=None,
+               _getents=None):
     """Initializes this class.
 
     """
@@ -734,11 +1005,18 @@ class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
       lock_monitor_cb = None
 
     if address_list is None:
-      resolver = _SsconfResolver
+      resolver = compat.partial(_SsconfResolver, True)
     else:
       # Caller provided an address list
       resolver = _StaticResolver(address_list)
 
-    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
-                            lock_monitor_cb=lock_monitor_cb)
+    encoders = _ENCODERS.copy()
+
+    encoders.update({
+      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+      })
+
+    _RpcClientBase.__init__(self, resolver, encoders.get,
+                            lock_monitor_cb=lock_monitor_cb,
+                            _req_process_fn=_req_process_fn)
     _generated_rpc.RpcClientConfig.__init__(self)