gnt-cluster: Add hv/disk state to init
[ganeti-local] / lib / rpc.py
index 03b57c0..0bedb3c 100644 (file)
@@ -47,6 +47,7 @@ from ganeti import netutils
 from ganeti import ssconf
 from ganeti import runtime
 from ganeti import compat
+from ganeti import rpc_defs
 
 # Special module generated at build time
 from ganeti import _generated_rpc
@@ -71,16 +72,6 @@ _TMO_SLOW = 3600 # one hour
 _TMO_4HRS = 4 * 3600
 _TMO_1DAY = 86400
 
-# Timeout table that will be built later by decorators
-# Guidelines for choosing timeouts:
-# - call used during watcher: timeout -> 1min, _TMO_URGENT
-# - trivial (but be sure it is trivial) (e.g. reading a file): 5min, _TMO_FAST
-# - other calls: 15 min, _TMO_NORMAL
-# - special calls (instance add, etc.): either _TMO_SLOW (1h) or huge timeouts
-
-_TIMEOUTS = {
-}
-
 #: Special value to describe an offline host
 _OFFLINE = object()
 
@@ -127,21 +118,6 @@ def _ConfigRpcCurl(curl):
   curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
 
 
-def _RpcTimeout(secs):
-  """Timeout decorator.
-
-  When applied to a rpc call_* function, it updates the global timeout
-  table with the given function/timeout.
-
-  """
-  def decorator(f):
-    name = f.__name__
-    assert name.startswith("call_")
-    _TIMEOUTS[name[len("call_"):]] = secs
-    return f
-  return decorator
-
-
 def RunWithRPC(fn):
   """RPC-wrapper decorator.
 
@@ -420,9 +396,6 @@ class _RpcProcessor:
     @param read_timeout: Read timeout for request
 
     """
-    if read_timeout is None:
-      read_timeout = _TIMEOUTS.get(procedure, None)
-
     assert read_timeout is not None, \
       "Missing RPC read timeout for procedure '%s'" % procedure
 
@@ -437,321 +410,243 @@ class _RpcProcessor:
     return self._CombineResults(results, requests, procedure)
 
 
-class RpcRunner(_generated_rpc.RpcClientDefault,
-                _generated_rpc.RpcClientBootstrap):
-  """RPC runner class.
-
-  """
-  def __init__(self, context):
-    """Initialized the RPC runner.
-
-    @type context: C{masterd.GanetiContext}
-    @param context: Ganeti context
+class _RpcClientBase:
+  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None):
+    """Initializes this class.
 
     """
-    # Pylint doesn't recognize multiple inheritance properly, see
-    # <http://www.logilab.org/ticket/36586> and
-    # <http://www.logilab.org/ticket/35642>
-    # pylint: disable=W0233
-    _generated_rpc.RpcClientBootstrap.__init__(self)
-    _generated_rpc.RpcClientDefault.__init__(self)
-
-    self._cfg = context.cfg
-    self._proc = _RpcProcessor(compat.partial(_NodeConfigResolver,
-                                              self._cfg.GetNodeInfo,
-                                              self._cfg.GetAllNodesInfo),
+    self._proc = _RpcProcessor(resolver,
                                netutils.GetDaemonPort(constants.NODED),
-                               lock_monitor_cb=context.glm.AddToLockMonitor)
-
-  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
-    """Convert the given instance to a dict.
-
-    This is done via the instance's ToDict() method and additionally
-    we fill the hvparams with the cluster defaults.
-
-    @type instance: L{objects.Instance}
-    @param instance: an Instance object
-    @type hvp: dict or None
-    @param hvp: a dictionary with overridden hypervisor parameters
-    @type bep: dict or None
-    @param bep: a dictionary with overridden backend parameters
-    @type osp: dict or None
-    @param osp: a dictionary with overridden os parameters
-    @rtype: dict
-    @return: the instance dict, with the hvparams filled with the
-        cluster defaults
+                               lock_monitor_cb=lock_monitor_cb)
+    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
 
-    """
-    idict = instance.ToDict()
-    cluster = self._cfg.GetClusterInfo()
-    idict["hvparams"] = cluster.FillHV(instance)
-    if hvp is not None:
-      idict["hvparams"].update(hvp)
-    idict["beparams"] = cluster.FillBE(instance)
-    if bep is not None:
-      idict["beparams"].update(bep)
-    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
-    if osp is not None:
-      idict["osparams"].update(osp)
-    for nic in idict["nics"]:
-      nic['nicparams'] = objects.FillDict(
-        cluster.nicparams[constants.PP_DEFAULT],
-        nic['nicparams'])
-    return idict
-
-  def _MultiNodeCall(self, node_list, procedure, args, read_timeout=None):
-    """Helper for making a multi-node call
+  @staticmethod
+  def _EncodeArg(encoder_fn, (argkind, value)):
+    """Encode argument.
 
     """
-    body = serializer.DumpJson(args, indent=False)
-    return self._proc(node_list, procedure, body, read_timeout=read_timeout)
+    if argkind is None:
+      return value
+    else:
+      return encoder_fn(argkind)(value)
 
-  def _Call(self, node_list, procedure, timeout, args):
+  def _Call(self, cdef, node_list, args):
     """Entry point for automatically generated RPC wrappers.
 
     """
-    return self._MultiNodeCall(node_list, procedure, args, read_timeout=timeout)
-
-  @staticmethod
-  def _StaticMultiNodeCall(node_list, procedure, args,
-                           address_list=None, read_timeout=None):
-    """Helper for making a multi-node static call
-
-    """
-    body = serializer.DumpJson(args, indent=False)
+    (procedure, _, timeout, argdefs, postproc_fn, _) = cdef
 
-    if address_list is None:
-      resolver = _SsconfResolver
+    if callable(timeout):
+      read_timeout = timeout(args)
     else:
-      # Caller provided an address list
-      resolver = _StaticResolver(address_list)
+      read_timeout = timeout
 
-    proc = _RpcProcessor(resolver,
-                         netutils.GetDaemonPort(constants.NODED))
-    return proc(node_list, procedure, body, read_timeout=read_timeout)
+    body = serializer.DumpJson(map(self._encoder,
+                                   zip(map(compat.snd, argdefs), args)),
+                               indent=False)
 
-  def _SingleNodeCall(self, node, procedure, args, read_timeout=None):
-    """Helper for making a single-node call
+    result = self._proc(node_list, procedure, body, read_timeout=read_timeout)
 
-    """
-    body = serializer.DumpJson(args, indent=False)
-    return self._proc([node], procedure, body, read_timeout=read_timeout)[node]
+    if postproc_fn:
+      return dict(map(lambda (key, value): (key, postproc_fn(value)),
+                      result.items()))
+    else:
+      return result
 
-  @classmethod
-  def _StaticSingleNodeCall(cls, node, procedure, args, read_timeout=None):
-    """Helper for making a single-node static call
 
-    """
-    body = serializer.DumpJson(args, indent=False)
-    proc = _RpcProcessor(_SsconfResolver,
-                         netutils.GetDaemonPort(constants.NODED))
-    return proc([node], procedure, body, read_timeout=read_timeout)[node]
+def _ObjectToDict(value):
+  """Converts an object to a dictionary.
 
-  @staticmethod
-  def _BlockdevFindPostProc(result):
-    if not result.fail_msg and result.payload is not None:
-      result.payload = objects.BlockDevStatus.FromDict(result.payload)
-    return result
+  @note: See L{objects}.
 
-  @staticmethod
-  def _BlockdevGetMirrorStatusPostProc(result):
-    if not result.fail_msg:
-      result.payload = [objects.BlockDevStatus.FromDict(i)
-                        for i in result.payload]
-    return result
+  """
+  return value.ToDict()
 
-  @staticmethod
-  def _BlockdevGetMirrorStatusMultiPostProc(result):
-    for nres in result.values():
-      if nres.fail_msg:
-        continue
 
-      for idx, (success, status) in enumerate(nres.payload):
-        if success:
-          nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
+def _ObjectListToDict(value):
+  """Converts a list of L{objects} to dictionaries.
 
-    return result
+  """
+  return map(_ObjectToDict, value)
 
-  @staticmethod
-  def _OsGetPostProc(result):
-    if not result.fail_msg and isinstance(result.payload, dict):
-      result.payload = objects.OS.FromDict(result.payload)
-    return result
 
-  @staticmethod
-  def _PrepareFinalizeExportDisks(snap_disks):
-    flat_disks = []
+def _EncodeNodeToDiskDict(value):
+  """Encodes a dictionary with node name as key and disk objects as values.
 
-    for disk in snap_disks:
-      if isinstance(disk, bool):
-        flat_disks.append(disk)
-      else:
-        flat_disks.append(disk.ToDict())
+  """
+  return dict((name, _ObjectListToDict(disks))
+              for name, disks in value.items())
 
-    return flat_disks
 
-  @staticmethod
-  def _ImpExpStatusPostProc(result):
-    """Post-processor for import/export status.
+def _PrepareFileUpload(filename):
+  """Loads a file and prepares it for an upload to nodes.
 
-    @rtype: Payload containing list of L{objects.ImportExportStatus} instances
-    @return: Returns a list of the state of each named import/export or None if
-             a status couldn't be retrieved
+  """
+  data = _Compress(utils.ReadFile(filename))
+  st = os.stat(filename)
+  getents = runtime.GetEnts()
+  return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
+          getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
 
-    """
-    if not result.fail_msg:
-      decoded = []
 
-      for i in result.payload:
-        if i is None:
-          decoded.append(None)
-          continue
-        decoded.append(objects.ImportExportStatus.FromDict(i))
+def _PrepareFinalizeExportDisks(snap_disks):
+  """Encodes disks for finalizing export.
 
-      result.payload = decoded
+  """
+  flat_disks = []
 
-    return result
+  for disk in snap_disks:
+    if isinstance(disk, bool):
+      flat_disks.append(disk)
+    else:
+      flat_disks.append(disk.ToDict())
 
-  @staticmethod
-  def _EncodeImportExportIO(ieio, ieioargs):
-    """Encodes import/export I/O information.
+  return flat_disks
 
-    """
-    if ieio == constants.IEIO_RAW_DISK:
-      assert len(ieioargs) == 1
-      return (ieioargs[0].ToDict(), )
 
-    if ieio == constants.IEIO_SCRIPT:
-      assert len(ieioargs) == 2
-      return (ieioargs[0].ToDict(), ieioargs[1])
+def _EncodeImportExportIO((ieio, ieioargs)):
+  """Encodes import/export I/O information.
 
-    return ieioargs
+  """
+  if ieio == constants.IEIO_RAW_DISK:
+    assert len(ieioargs) == 1
+    return (ieio, (ieioargs[0].ToDict(), ))
 
-  #
-  # Begin RPC calls
-  #
+  if ieio == constants.IEIO_SCRIPT:
+    assert len(ieioargs) == 2
+    return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_instance_start(self, node, instance, hvp, bep, startup_paused):
-    """Starts an instance.
+  return (ieio, ieioargs)
 
-    This is a single-node call.
 
-    """
-    idict = self._InstDict(instance, hvp=hvp, bep=bep)
-    return self._SingleNodeCall(node, "instance_start", [idict, startup_paused])
+def _EncodeBlockdevRename(value):
+  """Encodes information for renaming block devices.
 
-  @_RpcTimeout(_TMO_1DAY)
-  def call_instance_os_add(self, node, inst, reinstall, debug, osparams=None):
-    """Installs an OS on the given instance.
+  """
+  return [(d.ToDict(), uid) for d, uid in value]
+
+
+#: Generic encoders
+_ENCODERS = {
+  rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
+  rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
+  rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
+  rpc_defs.ED_FILE_DETAILS: _PrepareFileUpload,
+  rpc_defs.ED_COMPRESS: _Compress,
+  rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
+  rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
+  rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
+  }
+
+
+class RpcRunner(_RpcClientBase,
+                _generated_rpc.RpcClientDefault,
+                _generated_rpc.RpcClientBootstrap,
+                _generated_rpc.RpcClientConfig):
+  """RPC runner class.
 
-    This is a single-node call.
+  """
+  def __init__(self, context):
+    """Initialized the RPC runner.
 
-    """
-    return self._SingleNodeCall(node, "instance_os_add",
-                                [self._InstDict(inst, osp=osparams),
-                                 reinstall, debug])
+    @type context: C{masterd.GanetiContext}
+    @param context: Ganeti context
 
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_upload_file(cls, node_list, file_name, address_list=None):
-    """Upload a file.
+    """
+    self._cfg = context.cfg
 
-    The node will refuse the operation in case the file is not on the
-    approved file list.
+    encoders = _ENCODERS.copy()
 
-    This is a multi-node call.
+    # Add encoders requiring configuration object
+    encoders.update({
+      rpc_defs.ED_INST_DICT: self._InstDict,
+      rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep,
+      rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp,
+      })
 
-    @type node_list: list
-    @param node_list: the list of node names to upload to
-    @type file_name: str
-    @param file_name: the filename to upload
-    @type address_list: list or None
-    @keyword address_list: an optional list of node addresses, in order
-        to optimize the RPC speed
+    # Resolver using configuration
+    resolver = compat.partial(_NodeConfigResolver, self._cfg.GetNodeInfo,
+                              self._cfg.GetAllNodesInfo)
 
-    """
-    file_contents = utils.ReadFile(file_name)
-    data = _Compress(file_contents)
-    st = os.stat(file_name)
-    getents = runtime.GetEnts()
-    params = [file_name, data, st.st_mode, getents.LookupUid(st.st_uid),
-              getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
-    return cls._StaticMultiNodeCall(node_list, "upload_file", params,
-                                    address_list=address_list)
-
-  @classmethod
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_write_ssconf_files(cls, node_list, values):
-    """Write ssconf files.
-
-    This is a multi-node call.
+    # Pylint doesn't recognize multiple inheritance properly, see
+    # <http://www.logilab.org/ticket/36586> and
+    # <http://www.logilab.org/ticket/35642>
+    # pylint: disable=W0233
+    _RpcClientBase.__init__(self, resolver, encoders.get,
+                            lock_monitor_cb=context.glm.AddToLockMonitor)
+    _generated_rpc.RpcClientConfig.__init__(self)
+    _generated_rpc.RpcClientBootstrap.__init__(self)
+    _generated_rpc.RpcClientDefault.__init__(self)
 
-    """
-    return cls._StaticMultiNodeCall(node_list, "write_ssconf_files", [values])
+  def _InstDict(self, instance, hvp=None, bep=None, osp=None):
+    """Convert the given instance to a dict.
 
-  def call_test_delay(self, node_list, duration, read_timeout=None):
-    """Sleep for a fixed time on given node(s).
+    This is done via the instance's ToDict() method and additionally
+    we fill the hvparams with the cluster defaults.
 
-    This is a multi-node call.
+    @type instance: L{objects.Instance}
+    @param instance: an Instance object
+    @type hvp: dict or None
+    @param hvp: a dictionary with overridden hypervisor parameters
+    @type bep: dict or None
+    @param bep: a dictionary with overridden backend parameters
+    @type osp: dict or None
+    @param osp: a dictionary with overridden os parameters
+    @rtype: dict
+    @return: the instance dict, with the hvparams filled with the
+        cluster defaults
 
     """
-    assert read_timeout is None
-    return self.call_test_delay(node_list, duration,
-                                read_timeout=int(duration + 5))
+    idict = instance.ToDict()
+    cluster = self._cfg.GetClusterInfo()
+    idict["hvparams"] = cluster.FillHV(instance)
+    if hvp is not None:
+      idict["hvparams"].update(hvp)
+    idict["beparams"] = cluster.FillBE(instance)
+    if bep is not None:
+      idict["beparams"].update(bep)
+    idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
+    if osp is not None:
+      idict["osparams"].update(osp)
+    for nic in idict["nics"]:
+      nic['nicparams'] = objects.FillDict(
+        cluster.nicparams[constants.PP_DEFAULT],
+        nic['nicparams'])
+    return idict
 
-  @_RpcTimeout(_TMO_NORMAL)
-  def call_hypervisor_validate_params(self, node_list, hvname, hvparams):
-    """Validate the hypervisor params.
+  def _InstDictHvpBep(self, (instance, hvp, bep)):
+    """Wrapper for L{_InstDict}.
 
-    This is a multi-node call.
+    """
+    return self._InstDict(instance, hvp=hvp, bep=bep)
 
-    @type node_list: list
-    @param node_list: the list of nodes to query
-    @type hvname: string
-    @param hvname: the hypervisor name
-    @type hvparams: dict
-    @param hvparams: the hypervisor parameters to be validated
+  def _InstDictOsp(self, (instance, osparams)):
+    """Wrapper for L{_InstDict}.
 
     """
-    cluster = self._cfg.GetClusterInfo()
-    hv_full = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
-    return self._MultiNodeCall(node_list, "hypervisor_validate_params",
-                               [hvname, hv_full])
+    return self._InstDict(instance, osp=osparams)
 
 
-class JobQueueRunner(_generated_rpc.RpcClientJobQueue):
+class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
   """RPC wrappers for job queue.
 
   """
-  _Compress = staticmethod(_Compress)
-
   def __init__(self, context, address_list):
     """Initializes this class.
 
     """
-    _generated_rpc.RpcClientJobQueue.__init__(self)
-
     if address_list is None:
       resolver = _SsconfResolver
     else:
       # Caller provided an address list
       resolver = _StaticResolver(address_list)
 
-    self._proc = _RpcProcessor(resolver,
-                               netutils.GetDaemonPort(constants.NODED),
-                               lock_monitor_cb=context.glm.AddToLockMonitor)
-
-  def _Call(self, node_list, procedure, timeout, args):
-    """Entry point for automatically generated RPC wrappers.
-
-    """
-    body = serializer.DumpJson(args, indent=False)
-
-    return self._proc(node_list, procedure, body, read_timeout=timeout)
+    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+                            lock_monitor_cb=context.glm.AddToLockMonitor)
+    _generated_rpc.RpcClientJobQueue.__init__(self)
 
 
-class BootstrapRunner(_generated_rpc.RpcClientBootstrap):
+class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
   """RPC wrappers for bootstrapping.
 
   """
@@ -759,15 +654,29 @@ class BootstrapRunner(_generated_rpc.RpcClientBootstrap):
     """Initializes this class.
 
     """
+    _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get)
     _generated_rpc.RpcClientBootstrap.__init__(self)
 
-    self._proc = _RpcProcessor(_SsconfResolver,
-                               netutils.GetDaemonPort(constants.NODED))
 
-  def _Call(self, node_list, procedure, timeout, args):
-    """Entry point for automatically generated RPC wrappers.
+class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
+  """RPC wrappers for L{config}.
+
+  """
+  def __init__(self, context, address_list):
+    """Initializes this class.
 
     """
-    body = serializer.DumpJson(args, indent=False)
+    if context:
+      lock_monitor_cb = context.glm.AddToLockMonitor
+    else:
+      lock_monitor_cb = None
+
+    if address_list is None:
+      resolver = _SsconfResolver
+    else:
+      # Caller provided an address list
+      resolver = _StaticResolver(address_list)
 
-    return self._proc(node_list, procedure, body, read_timeout=timeout)
+    _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+                            lock_monitor_cb=lock_monitor_cb)
+    _generated_rpc.RpcClientConfig.__init__(self)