Export extractExTags and updateExclTags
[ganeti-local] / lib / rpc.py
index cd58563..c934ba2 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # if they need to start using instance attributes
 # R0904: Too many public methods
 
-import os
 import logging
 import zlib
 import base64
 import pycurl
 import threading
+import copy
 
 from ganeti import utils
 from ganeti import objects
@@ -47,6 +47,9 @@ from ganeti import netutils
 from ganeti import ssconf
 from ganeti import runtime
 from ganeti import compat
+from ganeti import rpc_defs
+from ganeti import pathutils
+from ganeti import vcluster
 
 # Special module generated at build time
 from ganeti import _generated_rpc
@@ -55,32 +58,11 @@ from ganeti import _generated_rpc
 import ganeti.http.client  # pylint: disable=W0611
 
 
-# Timeout for connecting to nodes (seconds)
-_RPC_CONNECT_TIMEOUT = 5
-
 _RPC_CLIENT_HEADERS = [
   "Content-type: %s" % http.HTTP_APP_JSON,
   "Expect:",
   ]
 
-# Various time constants for the timeout table
-_TMO_URGENT = 60 # one minute
-_TMO_FAST = 5 * 60 # five minutes
-_TMO_NORMAL = 15 * 60 # 15 minutes
-_TMO_SLOW = 3600 # one hour
-_TMO_4HRS = 4 * 3600
-_TMO_1DAY = 86400
-
-# Timeout table that will be built later by decorators
-# Guidelines for choosing timeouts:
-# - call used during watcher: timeout -> 1min, _TMO_URGENT
-# - trivial (but be sure it is trivial) (e.g. reading a file): 5min, _TMO_FAST
-# - other calls: 15 min, _TMO_NORMAL
-# - special calls (instance add, etc.): either _TMO_SLOW (1h) or huge timeouts
-
-_TIMEOUTS = {
-}
-
 #: Special value to describe an offline host
 _OFFLINE = object()
 
@@ -114,7 +96,7 @@ def Shutdown():
 
 
 def _ConfigRpcCurl(curl):
-  noded_cert = str(constants.NODED_CERT_FILE)
+  noded_cert = str(pathutils.NODED_CERT_FILE)
 
   curl.setopt(pycurl.FOLLOWLOCATION, False)
   curl.setopt(pycurl.CAINFO, noded_cert)
@@ -124,22 +106,7 @@ def _ConfigRpcCurl(curl):
   curl.setopt(pycurl.SSLCERT, noded_cert)
   curl.setopt(pycurl.SSLKEYTYPE, "PEM")
   curl.setopt(pycurl.SSLKEY, noded_cert)
-  curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
-
-
-def _RpcTimeout(secs):
-  """Timeout decorator.
-
-  When applied to a rpc call_* function, it updates the global timeout
-  table with the given function/timeout.
-
-  """
-  def decorator(f):
-    name = f.__name__
-    assert name.startswith("call_")
-    _TIMEOUTS[name[len("call_"):]] = secs
-    return f
-  return decorator
+  curl.setopt(pycurl.CONNECTTIMEOUT, constants.RPC_CONNECT_TIMEOUT)
 
 
 def RunWithRPC(fn):
@@ -183,7 +150,7 @@ class RpcResult(object):
   """RPC Result class.
 
   This class holds an RPC result. It is needed since in multi-node
-  calls we can't raise an exception just because one one out of many
+  calls we can't raise an exception just because one out of many
   failed, and therefore we use this class to encapsulate the result.
 
   @ivar data: the data payload, for successful results, or None
@@ -263,12 +230,27 @@ class RpcResult(object):
       args = (msg, )
     raise ec(*args) # pylint: disable=W0142
 
+  def Warn(self, msg, feedback_fn):
+    """If the result has failed, call the feedback_fn.
+
+    This is used to in cases were LU wants to warn the
+    user about a failure, but continue anyway.
+
+    """
+    if not self.fail_msg:
+      return
+
+    msg = "%s: %s" % (msg, self.fail_msg)
+    feedback_fn(msg)
+
 
-def _SsconfResolver(node_list,
+def _SsconfResolver(ssconf_ips, node_list, _,
                     ssc=ssconf.SimpleStore,
                     nslookup_fn=netutils.Hostname.GetIP):
   """Return addresses for given node names.
 
+  @type ssconf_ips: bool
+  @param ssconf_ips: Use the ssconf IPs
   @type node_list: list
   @param node_list: List of node names
   @type ssc: class
@@ -280,16 +262,20 @@ def _SsconfResolver(node_list,
 
   """
   ss = ssc()
-  iplist = ss.GetNodePrimaryIPList()
   family = ss.GetPrimaryIPFamily()
-  ipmap = dict(entry.split() for entry in iplist)
+
+  if ssconf_ips:
+    iplist = ss.GetNodePrimaryIPList()
+    ipmap = dict(entry.split() for entry in iplist)
+  else:
+    ipmap = {}
 
   result = []
   for node in node_list:
     ip = ipmap.get(node)
     if ip is None:
       ip = nslookup_fn(node, family=family)
-    result.append((node, ip))
+    result.append((node, ip, node))
 
   return result
 
@@ -301,54 +287,65 @@ class _StaticResolver:
     """
     self._addresses = addresses
 
-  def __call__(self, hosts):
+  def __call__(self, hosts, _):
     """Returns static addresses for hosts.
 
     """
     assert len(hosts) == len(self._addresses)
-    return zip(hosts, self._addresses)
+    return zip(hosts, self._addresses, hosts)
 
 
-def _CheckConfigNode(name, node):
+def _CheckConfigNode(node_uuid_or_name, node, accept_offline_node):
   """Checks if a node is online.
 
-  @type name: string
-  @param name: Node name
+  @type node_uuid_or_name: string
+  @param node_uuid_or_name: Node UUID
   @type node: L{objects.Node} or None
   @param node: Node object
 
   """
   if node is None:
-    # Depend on DNS for name resolution
-    ip = name
-  elif node.offline:
-    ip = _OFFLINE
+    # Assume that the passed parameter was actually a node name, so depend on
+    # DNS for name resolution
+    return (node_uuid_or_name, node_uuid_or_name, node_uuid_or_name)
   else:
-    ip = node.primary_ip
-  return (name, ip)
+    if node.offline and not accept_offline_node:
+      ip = _OFFLINE
+    else:
+      ip = node.primary_ip
+    return (node.name, ip, node_uuid_or_name)
 
 
-def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts):
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, node_uuids, opts):
   """Calculate node addresses using configuration.
 
+  Note that strings in node_uuids are treated as node names if the UUID is not
+  found in the configuration.
+
   """
+  accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
+
+  assert accept_offline_node or opts is None, "Unknown option"
+
   # Special case for single-host lookups
-  if len(hosts) == 1:
-    (name, ) = hosts
-    return [_CheckConfigNode(name, single_node_fn(name))]
+  if len(node_uuids) == 1:
+    (uuid, ) = node_uuids
+    return [_CheckConfigNode(uuid, single_node_fn(uuid), accept_offline_node)]
   else:
     all_nodes = all_nodes_fn()
-    return [_CheckConfigNode(name, all_nodes.get(name, None))
-            for name in hosts]
+    return [_CheckConfigNode(uuid, all_nodes.get(uuid, None),
+                             accept_offline_node)
+            for uuid in node_uuids]
 
 
 class _RpcProcessor:
   def __init__(self, resolver, port, lock_monitor_cb=None):
     """Initializes this class.
 
-    @param resolver: callable accepting a list of hostnames, returning a list
-      of tuples containing name and IP address (IP address can be the name or
-      the special value L{_OFFLINE} to mark offline machines)
+    @param resolver: callable accepting a list of node UUIDs or hostnames,
+      returning a list of tuples containing name, IP address and original name
+      of the resolved node. IP address can be the name or the special value
+      L{_OFFLINE} to mark offline machines.
     @type port: int
     @param port: TCP port
     @param lock_monitor_cb: Callable for registering with lock monitor
@@ -362,20 +359,31 @@ class _RpcProcessor:
   def _PrepareRequests(hosts, port, procedure, body, read_timeout):
     """Prepares requests by sorting offline hosts into separate list.
 
+    @type body: dict
+    @param body: a dictionary with per-host body data
+
     """
     results = {}
     requests = {}
 
-    for (name, ip) in hosts:
+    assert isinstance(body, dict)
+    assert len(body) == len(hosts)
+    assert compat.all(isinstance(v, str) for v in body.values())
+    assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
+        "%s != %s" % (hosts, body.keys())
+
+    for (name, ip, original_name) in hosts:
       if ip is _OFFLINE:
         # Node is marked as offline
-        results[name] = RpcResult(node=name, offline=True, call=procedure)
+        results[original_name] = RpcResult(node=name,
+                                           offline=True,
+                                           call=procedure)
       else:
-        requests[name] = \
+        requests[original_name] = \
           http.client.HttpClientRequest(str(ip), port,
-                                        http.HTTP_PUT, str("/%s" % procedure),
+                                        http.HTTP_POST, str("/%s" % procedure),
                                         headers=_RPC_CLIENT_HEADERS,
-                                        post_data=body,
+                                        post_data=body[original_name],
                                         read_timeout=read_timeout,
                                         nicename="%s/%s" % (name, procedure),
                                         curl_config_fn=_ConfigRpcCurl)
@@ -406,29 +414,31 @@ class _RpcProcessor:
 
     return results
 
-  def __call__(self, hosts, procedure, body, read_timeout=None,
-               _req_process_fn=http.client.ProcessRequests):
+  def __call__(self, nodes, procedure, body, read_timeout, resolver_opts,
+               _req_process_fn=None):
     """Makes an RPC request to a number of nodes.
 
-    @type hosts: sequence
-    @param hosts: Hostnames
+    @type nodes: sequence
+    @param nodes: node UUIDs or Hostnames
     @type procedure: string
     @param procedure: Request path
-    @type body: string
-    @param body: Request body
+    @type body: dictionary
+    @param body: dictionary with request bodies per host
     @type read_timeout: int or None
     @param read_timeout: Read timeout for request
+    @rtype: dictionary
+    @return: a dictionary mapping host names to rpc.RpcResult objects
 
     """
-    if read_timeout is None:
-      read_timeout = _TIMEOUTS.get(procedure, None)
-
     assert read_timeout is not None, \
       "Missing RPC read timeout for procedure '%s'" % procedure
 
+    if _req_process_fn is None:
+      _req_process_fn = http.client.ProcessRequests
+
     (results, requests) = \
-      self._PrepareRequests(self._resolver(hosts), self._port, procedure,
-                            str(body), read_timeout)
+      self._PrepareRequests(self._resolver(nodes, resolver_opts), self._port,
+                            procedure, body, read_timeout)
 
     _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
 
@@ -437,706 +447,576 @@ class _RpcProcessor:
     return self._CombineResults(results, requests, procedure)
 
 
-def _EncodeImportExportIO(ieio, ieioargs):
-  """Encodes import/export I/O information.
-
-  """
-  if ieio == constants.IEIO_RAW_DISK:
-    assert len(ieioargs) == 1
-    return (ieioargs[0].ToDict(), )
-
-  if ieio == constants.IEIO_SCRIPT:
-    assert len(ieioargs) == 2
-    return (ieioargs[0].ToDict(), ieioargs[1])
-
-  return ieioargs
-
-
-class RpcRunner(_generated_rpc.RpcClientDefault):
-  """RPC runner class.
-
-  """
-  def __init__(self, context):
-    """Initialized the RPC runner.
-
-    @type context: C{masterd.GanetiContext}
-    @param context: Ganeti context
-
-    """
-    _generated_rpc.RpcClientDefault.__init__(self)
-
-    self._cfg = context.cfg
-    self._proc = _RpcProcessor(compat.partial(_NodeConfigResolver,
-                                              self._cfg.GetNodeInfo,
-                                              self._cfg.GetAllNodesInfo),
-                               netutils.GetDaemonPort(constants.NODED),
-                               lock_monitor_cb=context.glm.AddToLockMonitor)
-
-  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
-    """Convert the given instance to a dict.
-
-    This is done via the instance's ToDict() method and additionally
-    we fill the hvparams with the cluster defaults.
-
-    @type instance: L{objects.Instance}
-    @param instance: an Instance object
-    @type hvp: dict or None
-    @param hvp: a dictionary with overridden hypervisor parameters
-    @type bep: dict or None
-    @param bep: a dictionary with overridden backend parameters
-    @type osp: dict or None
-    @param osp: a dictionary with overridden os parameters
-    @rtype: dict
-    @return: the instance dict, with the hvparams filled with the
-        cluster defaults
-
-    """
-    idict = instance.ToDict()
-    cluster = self._cfg.GetClusterInfo()
-    idict["hvparams"] = cluster.FillHV(instance)
-    if hvp is not None:
-      idict["hvparams"].update(hvp)
-    idict["beparams"] = cluster.FillBE(instance)
-    if bep is not None:
-      idict["beparams"].update(bep)
-    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
-    if osp is not None:
-      idict["osparams"].update(osp)
-    for nic in idict["nics"]:
-      nic['nicparams'] = objects.FillDict(
-        cluster.nicparams[constants.PP_DEFAULT],
-        nic['nicparams'])
-    return idict
-
-  def _MultiNodeCall(self, node_list, procedure, args, read_timeout=None):
-    """Helper for making a multi-node call
-
-    """
-    body = serializer.DumpJson(args, indent=False)
-    return self._proc(node_list, procedure, body, read_timeout=read_timeout)
-
-  def _Call(self, node_list, procedure, timeout, args):
-    """Entry point for automatically generated RPC wrappers.
-
-    """
-    return self._MultiNodeCall(node_list, procedure, args, read_timeout=timeout)
-
-  @staticmethod
-  def _StaticMultiNodeCall(node_list, procedure, args,
-                           address_list=None, read_timeout=None):
-    """Helper for making a multi-node static call
+class _RpcClientBase:
+  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
+               _req_process_fn=None):
+    """Initializes this class.
 
     """
-    body = serializer.DumpJson(args, indent=False)
-
-    if address_list is None:
-      resolver = _SsconfResolver
-    else:
-      # Caller provided an address list
-      resolver = _StaticResolver(address_list)
-
     proc = _RpcProcessor(resolver,
-                         netutils.GetDaemonPort(constants.NODED))
-    return proc(node_list, procedure, body, read_timeout=read_timeout)
-
-  def _SingleNodeCall(self, node, procedure, args, read_timeout=None):
-    """Helper for making a single-node call
-
-    """
-    body = serializer.DumpJson(args, indent=False)
-    return self._proc([node], procedure, body, read_timeout=read_timeout)[node]
-
-  @classmethod
-  def _StaticSingleNodeCall(cls, node, procedure, args, read_timeout=None):
-    """Helper for making a single-node static call
-
-    """
-    body = serializer.DumpJson(args, indent=False)
-    proc = _RpcProcessor(_SsconfResolver,
-                         netutils.GetDaemonPort(constants.NODED))
-    return proc([node], procedure, body, read_timeout=read_timeout)[node]
-
-  @staticmethod
-  def _BlockdevFindPostProc(result):
-    if not result.fail_msg and result.payload is not None:
-      result.payload = objects.BlockDevStatus.FromDict(result.payload)
-    return result
-
-  @staticmethod
-  def _BlockdevGetMirrorStatusPostProc(result):
-    if not result.fail_msg:
-      result.payload = [objects.BlockDevStatus.FromDict(i)
-                        for i in result.payload]
-    return result
-
-  @staticmethod
-  def _BlockdevGetMirrorStatusMultiPostProc(result):
-    for nres in result.values():
-      if nres.fail_msg:
-        continue
-
-      for idx, (success, status) in enumerate(nres.payload):
-        if success:
-          nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
-
-    return result
-
-  @staticmethod
-  def _OsGetPostProc(result):
-    if not result.fail_msg and isinstance(result.payload, dict):
-      result.payload = objects.OS.FromDict(result.payload)
-    return result
+                         netutils.GetDaemonPort(constants.NODED),
+                         lock_monitor_cb=lock_monitor_cb)
+    self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
+    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
 
   @staticmethod
-  def _PrepareFinalizeExportDisks(snap_disks):
-    flat_disks = []
-
-    for disk in snap_disks:
-      if isinstance(disk, bool):
-        flat_disks.append(disk)
-      else:
-        flat_disks.append(disk.ToDict())
-
-    return flat_disks
-
-  @staticmethod
-  def _ImpExpStatusPostProc(result):
-    """Post-processor for import/export status.
-
-    @rtype: Payload containing list of L{objects.ImportExportStatus} instances
-    @return: Returns a list of the state of each named import/export or None if
-             a status couldn't be retrieved
+  def _EncodeArg(encoder_fn, (argkind, value)):
+    """Encode argument.
 
     """
-    if not result.fail_msg:
-      decoded = []
-
-      for i in result.payload:
-        if i is None:
-          decoded.append(None)
-          continue
-        decoded.append(objects.ImportExportStatus.FromDict(i))
-
-      result.payload = decoded
-
-    return result
-
-  #
-  # Begin RPC calls
-  #
-
-  @_RpcTimeout(_TMO_URGENT)
-  def call_bdev_sizes(self, node_list, devices):
-    """Gets the sizes of requested block devices present on a node
+    if argkind is None:
+      return value
+    else:
+      return encoder_fn(argkind)(value)
 
-    This is a multi-node call.
+  def _Call(self, cdef, node_list, args):
+    """Entry point for automatically generated RPC wrappers.
 
     """
-    return self._MultiNodeCall(node_list, "bdev_sizes", [devices])
-
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_storage_list(self, node_list, su_name, su_args, name, fields):
-    """Get list of storage units.
+    (procedure, _, resolver_opts, timeout, argdefs,
+     prep_fn, postproc_fn, _) = cdef
 
-    This is a multi-node call.
-
-    """
-    return self._MultiNodeCall(node_list, "storage_list",
-                               [su_name, su_args, name, fields])
+    if callable(timeout):
+      read_timeout = timeout(args)
+    else:
+      read_timeout = timeout
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_storage_modify(self, node, su_name, su_args, name, changes):
-    """Modify a storage unit.
+    if callable(resolver_opts):
+      req_resolver_opts = resolver_opts(args)
+    else:
+      req_resolver_opts = resolver_opts
 
-    This is a single-node call.
+    if len(args) != len(argdefs):
+      raise errors.ProgrammerError("Number of passed arguments doesn't match")
 
-    """
-    return self._SingleNodeCall(node, "storage_modify",
-                                [su_name, su_args, name, changes])
+    enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
+    if prep_fn is None:
+      # for a no-op prep_fn, we serialise the body once, and then we
+      # reuse it in the dictionary values
+      body = serializer.DumpJson(enc_args)
+      pnbody = dict((n, body) for n in node_list)
+    else:
+      # for a custom prep_fn, we pass the encoded arguments and the
+      # node name to the prep_fn, and we serialise its return value
+      assert callable(prep_fn)
+      pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
+                    for n in node_list)
+
+    result = self._proc(node_list, procedure, pnbody, read_timeout,
+                        req_resolver_opts)
+
+    if postproc_fn:
+      return dict(map(lambda (key, value): (key, postproc_fn(value)),
+                      result.items()))
+    else:
+      return result
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_storage_execute(self, node, su_name, su_args, name, op):
-    """Executes an operation on a storage unit.
 
-    This is a single-node call.
+def _ObjectToDict(value):
+  """Converts an object to a dictionary.
 
-    """
-    return self._SingleNodeCall(node, "storage_execute",
-                                [su_name, su_args, name, op])
+  @note: See L{objects}.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_instance_start(self, node, instance, hvp, bep, startup_paused):
-    """Starts an instance.
+  """
+  return value.ToDict()
 
-    This is a single-node call.
 
-    """
-    idict = self._InstDict(instance, hvp=hvp, bep=bep)
-    return self._SingleNodeCall(node, "instance_start", [idict, startup_paused])
+def _ObjectListToDict(value):
+  """Converts a list of L{objects} to dictionaries.
 
-  @_RpcTimeout(_TMO_1DAY)
-  def call_instance_os_add(self, node, inst, reinstall, debug, osparams=None):
-    """Installs an OS on the given instance.
+  """
+  return map(_ObjectToDict, value)
 
-    This is a single-node call.
 
-    """
-    return self._SingleNodeCall(node, "instance_os_add",
-                                [self._InstDict(inst, osp=osparams),
-                                 reinstall, debug])
+def _EncodeNodeToDiskDict(value):
+  """Encodes a dictionary with node name as key and disk objects as values.
 
-  @classmethod
-  @_RpcTimeout(_TMO_FAST)
-  def call_node_start_master_daemons(cls, node, no_voting):
-    """Starts master daemons on a node.
+  """
+  return dict((name, _ObjectListToDict(disks))
+              for name, disks in value.items())
 
-    This is a single-node call.
 
-    """
-    return cls._StaticSingleNodeCall(node, "node_start_master_daemons",
-                                     [no_voting])
+def _PrepareFileUpload(getents_fn, filename):
+  """Loads a file and prepares it for an upload to nodes.
 
-  @classmethod
-  @_RpcTimeout(_TMO_FAST)
-  def call_node_activate_master_ip(cls, node):
-    """Activates master IP on a node.
+  """
+  statcb = utils.FileStatHelper()
+  data = _Compress(utils.ReadFile(filename, preread=statcb))
+  st = statcb.st
 
-    This is a single-node call.
+  if getents_fn is None:
+    getents_fn = runtime.GetEnts
 
-    """
-    return cls._StaticSingleNodeCall(node, "node_activate_master_ip", [])
+  getents = getents_fn()
 
-  @classmethod
-  @_RpcTimeout(_TMO_FAST)
-  def call_node_stop_master(cls, node):
-    """Deactivates master IP and stops master daemons on a node.
+  virt_filename = vcluster.MakeVirtualPath(filename)
 
-    This is a single-node call.
+  return [virt_filename, data, st.st_mode, getents.LookupUid(st.st_uid),
+          getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
 
-    """
-    return cls._StaticSingleNodeCall(node, "node_stop_master", [])
 
-  @classmethod
-  @_RpcTimeout(_TMO_FAST)
-  def call_node_deactivate_master_ip(cls, node):
-    """Deactivates master IP on a node.
+def _PrepareFinalizeExportDisks(snap_disks):
+  """Encodes disks for finalizing export.
 
-    This is a single-node call.
+  """
+  flat_disks = []
 
-    """
-    return cls._StaticSingleNodeCall(node, "node_deactivate_master_ip", [])
+  for disk in snap_disks:
+    if isinstance(disk, bool):
+      flat_disks.append(disk)
+    else:
+      flat_disks.append(disk.ToDict())
 
-  @classmethod
-  @_RpcTimeout(_TMO_FAST)
-  def call_node_change_master_netmask(cls, node, netmask):
-    """Change master IP netmask.
+  return flat_disks
 
-    This is a single-node call.
 
-    """
-    return cls._StaticSingleNodeCall(node, "node_change_master_netmask",
-                  [netmask])
+def _EncodeImportExportIO((ieio, ieioargs)):
+  """Encodes import/export I/O information.
 
-  @classmethod
-  @_RpcTimeout(_TMO_URGENT)
-  def call_master_info(cls, node_list):
-    """Query master info.
+  """
+  if ieio == constants.IEIO_RAW_DISK:
+    assert len(ieioargs) == 1
+    return (ieio, (ieioargs[0].ToDict(), ))
 
-    This is a multi-node call.
+  if ieio == constants.IEIO_SCRIPT:
+    assert len(ieioargs) == 2
+    return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
 
-    """
-    # TODO: should this method query down nodes?
-    return cls._StaticMultiNodeCall(node_list, "master_info", [])
+  return (ieio, ieioargs)
 
-  @classmethod
-  @_RpcTimeout(_TMO_URGENT)
-  def call_version(cls, node_list):
-    """Query node version.
 
-    This is a multi-node call.
+def _EncodeBlockdevRename(value):
+  """Encodes information for renaming block devices.
 
-    """
-    return cls._StaticMultiNodeCall(node_list, "version", [])
+  """
+  return [(d.ToDict(), uid) for d, uid in value]
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_create(self, node, bdev, size, owner, on_primary, info):
-    """Request creation of a given block device.
 
-    This is a single-node call.
+def _AddSpindlesToLegacyNodeInfo(result, space_info):
+  """Extracts the spindle information from the space info and adds
+  it to the result dictionary.
 
-    """
-    return self._SingleNodeCall(node, "blockdev_create",
-                                [bdev.ToDict(), size, owner, on_primary, info])
+  @type result: dict of strings
+  @param result: dictionary holding the result of the legacy node info
+  @type space_info: list of dicts of strings
+  @param space_info: list, each row holding space information of one storage
+    unit
+  @rtype: None
+  @return: does not return anything, manipulates the C{result} variable
 
-  @_RpcTimeout(_TMO_SLOW)
-  def call_blockdev_wipe(self, node, bdev, offset, size):
-    """Request wipe at given offset with given size of a block device.
+  """
+  lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
+      space_info, constants.ST_LVM_PV)
+  if lvm_pv_info:
+    result["spindles_free"] = lvm_pv_info["storage_free"]
+    result["spindles_total"] = lvm_pv_info["storage_size"]
+  else:
+    raise errors.OpExecError("No spindle storage information available.")
 
-    This is a single-node call.
 
-    """
-    return self._SingleNodeCall(node, "blockdev_wipe",
-                                [bdev.ToDict(), offset, size])
+def _AddDefaultStorageInfoToLegacyNodeInfo(result, space_info):
+  """Extracts the storage space information of the default storage type from
+  the space info and adds it to the result dictionary.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_remove(self, node, bdev):
-    """Request removal of a given block device.
+  @see: C{_AddSpindlesToLegacyNodeInfo} for parameter information.
 
-    This is a single-node call.
+  """
+  # Check if there is at least one row for non-spindle storage info.
+  no_defaults = (len(space_info) < 1) or \
+      (space_info[0]["type"] == constants.ST_LVM_PV and len(space_info) == 1)
 
-    """
-    return self._SingleNodeCall(node, "blockdev_remove", [bdev.ToDict()])
+  default_space_info = None
+  if no_defaults:
+    logging.warning("No storage info provided for default storage type.")
+  else:
+    default_space_info = space_info[0]
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_rename(self, node, devlist):
-    """Request rename of the given block devices.
+  if default_space_info:
+    result["name"] = default_space_info["name"]
+    result["storage_free"] = default_space_info["storage_free"]
+    result["storage_size"] = default_space_info["storage_size"]
 
-    This is a single-node call.
 
-    """
-    return self._SingleNodeCall(node, "blockdev_rename",
-                                [[(d.ToDict(), uid) for d, uid in devlist]])
+def MakeLegacyNodeInfo(data, require_spindles=False):
+  """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_pause_resume_sync(self, node, disks, pause):
-    """Request a pause/resume of given block device.
+  Converts the data into a single dictionary. This is fine for most use cases,
+  but some require information from more than one volume group or hypervisor.
 
-    This is a single-node call.
+  @param require_spindles: add spindle storage information to the legacy node
+      info
 
-    """
-    return self._SingleNodeCall(node, "blockdev_pause_resume_sync",
-                                [[bdev.ToDict() for bdev in disks], pause])
+  """
+  (bootid, space_info, (hv_info, )) = data
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_assemble(self, node, disk, owner, on_primary, idx):
-    """Request assembling of a given block device.
+  ret = utils.JoinDisjointDicts(hv_info, {"bootid": bootid})
 
-    This is a single-node call.
+  if require_spindles:
+    _AddSpindlesToLegacyNodeInfo(ret, space_info)
+  _AddDefaultStorageInfoToLegacyNodeInfo(ret, space_info)
 
-    """
-    return self._SingleNodeCall(node, "blockdev_assemble",
-                                [disk.ToDict(), owner, on_primary, idx])
+  return ret
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_shutdown(self, node, disk):
-    """Request shutdown of a given block device.
 
-    This is a single-node call.
+def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)):
+  """Annotates just DRBD disks layouts.
 
-    """
-    return self._SingleNodeCall(node, "blockdev_shutdown", [disk.ToDict()])
+  """
+  assert disk.dev_type == constants.DT_DRBD8
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_addchildren(self, node, bdev, ndevs):
-    """Request adding a list of children to a (mirroring) device.
+  disk.params = objects.FillDict(drbd_params, disk.params)
+  (dev_data, dev_meta) = disk.children
+  dev_data.params = objects.FillDict(data_params, dev_data.params)
+  dev_meta.params = objects.FillDict(meta_params, dev_meta.params)
 
-    This is a single-node call.
+  return disk
 
-    """
-    return self._SingleNodeCall(node, "blockdev_addchildren",
-                                [bdev.ToDict(),
-                                 [disk.ToDict() for disk in ndevs]])
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_removechildren(self, node, bdev, ndevs):
-    """Request removing a list of children from a (mirroring) device.
+def _AnnotateDParamsGeneric(disk, (params, )):
+  """Generic disk parameter annotation routine.
 
-    This is a single-node call.
+  """
+  assert disk.dev_type != constants.DT_DRBD8
 
-    """
-    return self._SingleNodeCall(node, "blockdev_removechildren",
-                                [bdev.ToDict(),
-                                 [disk.ToDict() for disk in ndevs]])
+  disk.params = objects.FillDict(params, disk.params)
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_getmirrorstatus(self, node, disks):
-    """Request status of a (mirroring) device.
+  return disk
 
-    This is a single-node call.
 
-    """
-    result = self._SingleNodeCall(node, "blockdev_getmirrorstatus",
-                                  [dsk.ToDict() for dsk in disks])
-    if not result.fail_msg:
-      result.payload = [objects.BlockDevStatus.FromDict(i)
-                        for i in result.payload]
-    return result
+def AnnotateDiskParams(template, disks, disk_params):
+  """Annotates the disk objects with the disk parameters.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_getmirrorstatus_multi(self, node_list, node_disks):
-    """Request status of (mirroring) devices from multiple nodes.
+  @param template: The disk template used
+  @param disks: The list of disks objects to annotate
+  @param disk_params: The disk paramaters for annotation
+  @returns: A list of disk objects annotated
 
-    This is a multi-node call.
+  """
+  ld_params = objects.Disk.ComputeLDParams(template, disk_params)
 
-    """
-    result = self._MultiNodeCall(node_list, "blockdev_getmirrorstatus_multi",
-                                 [dict((name, [dsk.ToDict() for dsk in disks])
-                                       for name, disks in node_disks.items())])
-    for nres in result.values():
-      if nres.fail_msg:
-        continue
+  if template == constants.DT_DRBD8:
+    annotation_fn = _AnnotateDParamsDRBD
+  elif template == constants.DT_DISKLESS:
+    annotation_fn = lambda disk, _: disk
+  else:
+    annotation_fn = _AnnotateDParamsGeneric
 
-      for idx, (success, status) in enumerate(nres.payload):
-        if success:
-          nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
+  return [annotation_fn(disk.Copy(), ld_params) for disk in disks]
 
-    return result
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_find(self, node, disk):
-    """Request identification of a given block device.
+def _GetExclusiveStorageFlag(cfg, node_uuid):
+  ni = cfg.GetNodeInfo(node_uuid)
+  if ni is None:
+    raise errors.OpPrereqError("Invalid node name %s" % node_uuid,
+                               errors.ECODE_NOENT)
+  return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE]
 
-    This is a single-node call.
 
-    """
-    result = self._SingleNodeCall(node, "blockdev_find", [disk.ToDict()])
-    if not result.fail_msg and result.payload is not None:
-      result.payload = objects.BlockDevStatus.FromDict(result.payload)
-    return result
+def _AddExclusiveStorageFlagToLvmStorageUnits(storage_units, es_flag):
+  """Adds the exclusive storage flag to lvm units.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_close(self, node, instance_name, disks):
-    """Closes the given block devices.
+  This function creates a copy of the storage_units lists, with the
+  es_flag being added to all lvm storage units.
 
-    This is a single-node call.
+  @type storage_units: list of pairs (string, string)
+  @param storage_units: list of 'raw' storage units, consisting only of
+    (storage_type, storage_key)
+  @type es_flag: boolean
+  @param es_flag: exclusive storage flag
+  @rtype: list of tuples (string, string, list)
+  @return: list of storage units (storage_type, storage_key, params) with
+    the params containing the es_flag for lvm-vg storage units
 
-    """
-    params = [instance_name, [cf.ToDict() for cf in disks]]
-    return self._SingleNodeCall(node, "blockdev_close", params)
+  """
+  result = []
+  for (storage_type, storage_key) in storage_units:
+    if storage_type in [constants.ST_LVM_VG, constants.ST_LVM_PV]:
+      result.append((storage_type, storage_key, [es_flag]))
+    else:
+      result.append((storage_type, storage_key, []))
+  return result
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_getsize(self, node, disks):
-    """Returns the size of the given disks.
 
-    This is a single-node call.
+def GetExclusiveStorageForNodes(cfg, node_uuids):
+  """Return the exclusive storage flag for all the given nodes.
 
-    """
-    params = [[cf.ToDict() for cf in disks]]
-    return self._SingleNodeCall(node, "blockdev_getsize", params)
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type node_uuids: list or tuple
+  @param node_uuids: node UUIDs for which to read the flag
+  @rtype: dict
+  @return: mapping from node uuids to exclusive storage flags
+  @raise errors.OpPrereqError: if any given node name has no corresponding
+  node
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_drbd_disconnect_net(self, node_list, nodes_ip, disks):
-    """Disconnects the network of the given drbd devices.
+  """
+  getflag = lambda n: _GetExclusiveStorageFlag(cfg, n)
+  flags = map(getflag, node_uuids)
+  return dict(zip(node_uuids, flags))
+
+
+def PrepareStorageUnitsForNodes(cfg, storage_units, node_uuids):
+  """Return the lvm storage unit for all the given nodes.
+
+  Main purpose of this function is to map the exclusive storage flag, which
+  can be different for each node, to the default LVM storage unit.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type storage_units: list of pairs (string, string)
+  @param storage_units: list of 'raw' storage units, e.g. pairs of
+    (storage_type, storage_key)
+  @type node_uuids: list or tuple
+  @param node_uuids: node UUIDs for which to read the flag
+  @rtype: dict
+  @return: mapping from node uuids to a list of storage units which include
+    the exclusive storage flag for lvm storage
+  @raise errors.OpPrereqError: if any given node name has no corresponding
+  node
 
-    This is a multi-node call.
+  """
+  getunit = lambda n: _AddExclusiveStorageFlagToLvmStorageUnits(
+      storage_units, _GetExclusiveStorageFlag(cfg, n))
+  flags = map(getunit, node_uuids)
+  return dict(zip(node_uuids, flags))
+
+
+#: Generic encoders
+_ENCODERS = {
+  rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
+  rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
+  rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
+  rpc_defs.ED_COMPRESS: _Compress,
+  rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
+  rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
+  rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
+  }
+
+
+class RpcRunner(_RpcClientBase,
+                _generated_rpc.RpcClientDefault,
+                _generated_rpc.RpcClientBootstrap,
+                _generated_rpc.RpcClientDnsOnly,
+                _generated_rpc.RpcClientConfig):
+  """RPC runner class.
 
-    """
-    return self._MultiNodeCall(node_list, "drbd_disconnect_net",
-                               [nodes_ip, [cf.ToDict() for cf in disks]])
+  """
+  def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
+    """Initialized the RPC runner.
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_drbd_attach_net(self, node_list, nodes_ip,
-                           disks, instance_name, multimaster):
-    """Disconnects the given drbd devices.
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: Configuration
+    @type lock_monitor_cb: callable
+    @param lock_monitor_cb: Lock monitor callback
+
+    """
+    self._cfg = cfg
+
+    encoders = _ENCODERS.copy()
+
+    encoders.update({
+      # Encoders requiring configuration object
+      rpc_defs.ED_INST_DICT: self._InstDict,
+      rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
+      rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
+      rpc_defs.ED_NIC_DICT: self._NicDict,
+
+      # Encoders annotating disk parameters
+      rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
+      rpc_defs.ED_MULTI_DISKS_DICT_DP: self._MultiDiskDictDP,
+      rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
+
+      # Encoders with special requirements
+      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+      })
+
+    # Resolver using configuration
+    resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
+                              cfg.GetAllNodesInfo)
+
+    # Pylint doesn't recognize multiple inheritance properly, see
+    # <http://www.logilab.org/ticket/36586> and
+    # <http://www.logilab.org/ticket/35642>
+    # pylint: disable=W0233
+    _RpcClientBase.__init__(self, resolver, encoders.get,
+                            lock_monitor_cb=lock_monitor_cb,
+                            _req_process_fn=_req_process_fn)
+    _generated_rpc.RpcClientConfig.__init__(self)
+    _generated_rpc.RpcClientBootstrap.__init__(self)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
+    _generated_rpc.RpcClientDefault.__init__(self)
 
-    This is a multi-node call.
+  def _NicDict(self, nic):
+    """Convert the given nic to a dict and encapsulate netinfo
 
     """
-    return self._MultiNodeCall(node_list, "drbd_attach_net",
-                               [nodes_ip, [cf.ToDict() for cf in disks],
-                                instance_name, multimaster])
-
-  @_RpcTimeout(_TMO_SLOW)
-  def call_drbd_wait_sync(self, node_list, nodes_ip, disks):
-    """Waits for the synchronization of drbd devices is complete.
-
-    This is a multi-node call.
+    n = copy.deepcopy(nic)
+    if n.network:
+      net_uuid = self._cfg.LookupNetwork(n.network)
+      if net_uuid:
+        nobj = self._cfg.GetNetwork(net_uuid)
+        n.netinfo = objects.Network.ToDict(nobj)
+    return n.ToDict()
 
-    """
-    return self._MultiNodeCall(node_list, "drbd_wait_sync",
-                               [nodes_ip, [cf.ToDict() for cf in disks]])
+  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
+    """Convert the given instance to a dict.
 
-  @_RpcTimeout(_TMO_URGENT)
-  def call_drbd_helper(self, node_list):
-    """Gets drbd helper.
+    This is done via the instance's ToDict() method and additionally
+    we fill the hvparams with the cluster defaults.
 
-    This is a multi-node call.
+    @type instance: L{objects.Instance}
+    @param instance: an Instance object
+    @type hvp: dict or None
+    @param hvp: a dictionary with overridden hypervisor parameters
+    @type bep: dict or None
+    @param bep: a dictionary with overridden backend parameters
+    @type osp: dict or None
+    @param osp: a dictionary with overridden os parameters
+    @rtype: dict
+    @return: the instance dict, with the hvparams filled with the
+        cluster defaults
 
     """
-    return self._MultiNodeCall(node_list, "drbd_helper", [])
-
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_upload_file(cls, node_list, file_name, address_list=None):
-    """Upload a file.
-
-    The node will refuse the operation in case the file is not on the
-    approved file list.
-
-    This is a multi-node call.
-
-    @type node_list: list
-    @param node_list: the list of node names to upload to
-    @type file_name: str
-    @param file_name: the filename to upload
-    @type address_list: list or None
-    @keyword address_list: an optional list of node addresses, in order
-        to optimize the RPC speed
+    idict = instance.ToDict()
+    cluster = self._cfg.GetClusterInfo()
+    idict["hvparams"] = cluster.FillHV(instance)
+    if hvp is not None:
+      idict["hvparams"].update(hvp)
+    idict["beparams"] = cluster.FillBE(instance)
+    if bep is not None:
+      idict["beparams"].update(bep)
+    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
+    if osp is not None:
+      idict["osparams"].update(osp)
+    idict["disks"] = self._DisksDictDP((instance.disks, instance))
+    for nic in idict["nics"]:
+      nic["nicparams"] = objects.FillDict(
+        cluster.nicparams[constants.PP_DEFAULT],
+        nic["nicparams"])
+      network = nic.get("network", None)
+      if network:
+        net_uuid = self._cfg.LookupNetwork(network)
+        if net_uuid:
+          nobj = self._cfg.GetNetwork(net_uuid)
+          nic["netinfo"] = objects.Network.ToDict(nobj)
+    return idict
 
-    """
-    file_contents = utils.ReadFile(file_name)
-    data = _Compress(file_contents)
-    st = os.stat(file_name)
-    getents = runtime.GetEnts()
-    params = [file_name, data, st.st_mode, getents.LookupUid(st.st_uid),
-              getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
-    return cls._StaticMultiNodeCall(node_list, "upload_file", params,
-                                    address_list=address_list)
-
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_write_ssconf_files(cls, node_list, values):
-    """Write ssconf files.
-
-    This is a multi-node call.
+  def _InstDictHvpBepDp(self, (instance, hvp, bep)):
+    """Wrapper for L{_InstDict}.
 
     """
-    return cls._StaticMultiNodeCall(node_list, "write_ssconf_files", [values])
-
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_grow(self, node, cf_bdev, amount, dryrun):
-    """Request a snapshot of the given block device.
+    return self._InstDict(instance, hvp=hvp, bep=bep)
 
-    This is a single-node call.
+  def _InstDictOspDp(self, (instance, osparams)):
+    """Wrapper for L{_InstDict}.
 
     """
-    return self._SingleNodeCall(node, "blockdev_grow",
-                                [cf_bdev.ToDict(), amount, dryrun])
+    return self._InstDict(instance, osp=osparams)
 
-  @_RpcTimeout(_TMO_1DAY)
-  def call_blockdev_export(self, node, cf_bdev,
-                           dest_node, dest_path, cluster_name):
-    """Export a given disk to another node.
-
-    This is a single-node call.
+  def _DisksDictDP(self, (disks, instance)):
+    """Wrapper for L{AnnotateDiskParams}.
 
     """
-    return self._SingleNodeCall(node, "blockdev_export",
-                                [cf_bdev.ToDict(), dest_node, dest_path,
-                                 cluster_name])
-
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_blockdev_snapshot(self, node, cf_bdev):
-    """Request a snapshot of the given block device.
+    diskparams = self._cfg.GetInstanceDiskParams(instance)
+    return [disk.ToDict()
+            for disk in AnnotateDiskParams(instance.disk_template,
+                                           disks, diskparams)]
 
-    This is a single-node call.
+  def _MultiDiskDictDP(self, disks_insts):
+    """Wrapper for L{AnnotateDiskParams}.
 
+    Supports a list of (disk, instance) tuples.
     """
-    return self._SingleNodeCall(node, "blockdev_snapshot", [cf_bdev.ToDict()])
-
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_node_leave_cluster(cls, node, modify_ssh_setup):
-    """Requests a node to clean the cluster information it has.
-
-    This will remove the configuration information from the ganeti data
-    dir.
+    return [disk for disk_inst in disks_insts
+            for disk in self._DisksDictDP(disk_inst)]
 
-    This is a single-node call.
+  def _SingleDiskDictDP(self, (disk, instance)):
+    """Wrapper for L{AnnotateDiskParams}.
 
     """
-    return cls._StaticSingleNodeCall(node, "node_leave_cluster",
-                                     [modify_ssh_setup])
+    (anno_disk,) = self._DisksDictDP(([disk], instance))
+    return anno_disk
 
-  def call_test_delay(self, node_list, duration, read_timeout=None):
-    """Sleep for a fixed time on given node(s).
 
-    This is a multi-node call.
+class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
+  """RPC wrappers for job queue.
 
-    """
-    assert read_timeout is None
-    return self.call_test_delay(node_list, duration,
-                                read_timeout=int(duration + 5))
-
-  @classmethod
-  @_RpcTimeout(_TMO_URGENT)
-  def call_jobqueue_update(cls, node_list, address_list, file_name, content):
-    """Update job queue.
-
-    This is a multi-node call.
+  """
+  def __init__(self, context, address_list):
+    """Initializes this class.
 
     """
-    return cls._StaticMultiNodeCall(node_list, "jobqueue_update",
-                                    [file_name, _Compress(content)],
-                                    address_list=address_list)
-
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_jobqueue_purge(cls, node):
-    """Purge job queue.
+    if address_list is None:
+      resolver = compat.partial(_SsconfResolver, True)
+    else:
+      # Caller provided an address list
+      resolver = _StaticResolver(address_list)
 
-    This is a single-node call.
+    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+                            lock_monitor_cb=context.glm.AddToLockMonitor)
+    _generated_rpc.RpcClientJobQueue.__init__(self)
 
-    """
-    return cls._StaticSingleNodeCall(node, "jobqueue_purge", [])
 
-  @classmethod
-  @_RpcTimeout(_TMO_URGENT)
-  def call_jobqueue_rename(cls, node_list, address_list, rename):
-    """Rename a job queue file.
+class BootstrapRunner(_RpcClientBase,
+                      _generated_rpc.RpcClientBootstrap,
+                      _generated_rpc.RpcClientDnsOnly):
+  """RPC wrappers for bootstrapping.
 
-    This is a multi-node call.
+  """
+  def __init__(self):
+    """Initializes this class.
 
     """
-    return cls._StaticMultiNodeCall(node_list, "jobqueue_rename", rename,
-                                    address_list=address_list)
+    # Pylint doesn't recognize multiple inheritance properly, see
+    # <http://www.logilab.org/ticket/36586> and
+    # <http://www.logilab.org/ticket/35642>
+    # pylint: disable=W0233
+    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
+                            _ENCODERS.get)
+    _generated_rpc.RpcClientBootstrap.__init__(self)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_hypervisor_validate_params(self, node_list, hvname, hvparams):
-    """Validate the hypervisor params.
 
-    This is a multi-node call.
+class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly):
+  """RPC wrappers for calls using only DNS.
 
-    @type node_list: list
-    @param node_list: the list of nodes to query
-    @type hvname: string
-    @param hvname: the hypervisor name
-    @type hvparams: dict
-    @param hvparams: the hypervisor parameters to be validated
+  """
+  def __init__(self):
+    """Initialize this class.
 
     """
-    cluster = self._cfg.GetClusterInfo()
-    hv_full = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
-    return self._MultiNodeCall(node_list, "hypervisor_validate_params",
-                               [hvname, hv_full])
+    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
+                            _ENCODERS.get)
+    _generated_rpc.RpcClientDnsOnly.__init__(self)
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_import_start(self, node, opts, instance, component,
-                        dest, dest_args):
-    """Starts a listener for an import.
 
-    This is a single-node call.
+class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
+  """RPC wrappers for L{config}.
 
-    @type node: string
-    @param node: Node name
-    @type instance: C{objects.Instance}
-    @param instance: Instance object
-    @type component: string
-    @param component: which part of the instance is being imported
+  """
+  def __init__(self, context, address_list, _req_process_fn=None,
+               _getents=None):
+    """Initializes this class.
 
     """
-    return self._SingleNodeCall(node, "import_start",
-                                [opts.ToDict(),
-                                 self._InstDict(instance), component, dest,
-                                 _EncodeImportExportIO(dest, dest_args)])
+    if context:
+      lock_monitor_cb = context.glm.AddToLockMonitor
+    else:
+      lock_monitor_cb = None
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_export_start(self, node, opts, host, port,
-                        instance, component, source, source_args):
-    """Starts an export daemon.
+    if address_list is None:
+      resolver = compat.partial(_SsconfResolver, True)
+    else:
+      # Caller provided an address list
+      resolver = _StaticResolver(address_list)
 
-    This is a single-node call.
+    encoders = _ENCODERS.copy()
 
-    @type node: string
-    @param node: Node name
-    @type instance: C{objects.Instance}
-    @param instance: Instance object
-    @type component: string
-    @param component: which part of the instance is being imported
+    encoders.update({
+      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+      })
 
-    """
-    return self._SingleNodeCall(node, "export_start",
-                                [opts.ToDict(), host, port,
-                                 self._InstDict(instance),
-                                 component, source,
-                                 _EncodeImportExportIO(source, source_args)])
+    _RpcClientBase.__init__(self, resolver, encoders.get,
+                            lock_monitor_cb=lock_monitor_cb,
+                            _req_process_fn=_req_process_fn)
+    _generated_rpc.RpcClientConfig.__init__(self)