4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Inter-node RPC library.
26 # pylint: disable=C0103,R0201,R0904
27 # C0103: Invalid name, since call_ are not valid
28 # R0201: Method could be a function, we keep all rpcs instance methods
29 # as not to change them back and forth between static/instance methods
30 # if they need to start using instance attributes
31 # R0904: Too many public methods
40 from ganeti import utils
41 from ganeti import objects
42 from ganeti import http
43 from ganeti import serializer
44 from ganeti import constants
45 from ganeti import errors
46 from ganeti import netutils
47 from ganeti import ssconf
48 from ganeti import runtime
49 from ganeti import compat
50 from ganeti import rpc_defs
51 from ganeti import pathutils
52 from ganeti import vcluster
54 # Special module generated at build time
55 from ganeti import _generated_rpc
57 # pylint has a bug here, doesn't see this import
58 import ganeti.http.client # pylint: disable=W0611
61 _RPC_CLIENT_HEADERS = [
62 "Content-type: %s" % http.HTTP_APP_JSON,
66 #: Special value to describe an offline host
71 """Initializes the module-global HTTP client manager.
73 Must be called before using any RPC function and while exactly one thread is
77 # curl_global_init(3) and curl_global_cleanup(3) must be called with only
78 # one thread running. This check is just a safety measure -- it doesn't
80 assert threading.activeCount() == 1, \
81 "Found more than one active thread when initializing pycURL"
83 logging.info("Using PycURL %s", pycurl.version)
85 pycurl.global_init(pycurl.GLOBAL_ALL)
89 """Stops the module-global HTTP client manager.
91 Must be called before quitting the program and while exactly one thread is
95 pycurl.global_cleanup()
98 def _ConfigRpcCurl(curl):
99 noded_cert = str(pathutils.NODED_CERT_FILE)
101 curl.setopt(pycurl.FOLLOWLOCATION, False)
102 curl.setopt(pycurl.CAINFO, noded_cert)
103 curl.setopt(pycurl.SSL_VERIFYHOST, 0)
104 curl.setopt(pycurl.SSL_VERIFYPEER, True)
105 curl.setopt(pycurl.SSLCERTTYPE, "PEM")
106 curl.setopt(pycurl.SSLCERT, noded_cert)
107 curl.setopt(pycurl.SSLKEYTYPE, "PEM")
108 curl.setopt(pycurl.SSLKEY, noded_cert)
109 curl.setopt(pycurl.CONNECTTIMEOUT, constants.RPC_CONNECT_TIMEOUT)
113 """RPC-wrapper decorator.
115 When applied to a function, it runs it with the RPC system
116 initialized, and it shutsdown the system afterwards. This means the
117 function must be called without RPC being initialized.
120 def wrapper(*args, **kwargs):
123 return fn(*args, **kwargs)
130 """Compresses a string for transport over RPC.
132 Small amounts of data are not compressed.
137 @return: Encoded data to send
140 # Small amounts of data are not compressed
142 return (constants.RPC_ENCODING_NONE, data)
144 # Compress with zlib and encode in base64
145 return (constants.RPC_ENCODING_ZLIB_BASE64,
146 base64.b64encode(zlib.compress(data, 3)))
149 class RpcResult(object):
152 This class holds an RPC result. It is needed since in multi-node
153 calls we can't raise an exception just because one out of many
154 failed, and therefore we use this class to encapsulate the result.
156 @ivar data: the data payload, for successful results, or None
157 @ivar call: the name of the RPC call
158 @ivar node: the name of the node to which we made the call
159 @ivar offline: whether the operation failed because the node was
160 offline, as opposed to actual failure; offline=True will always
161 imply failed=True, in order to allow simpler checking if
162 the user doesn't care about the exact failure mode
163 @ivar fail_msg: the error message if the call failed
166 def __init__(self, data=None, failed=False, offline=False,
167 call=None, node=None):
168 self.offline = offline
173 self.fail_msg = "Node is marked offline"
174 self.data = self.payload = None
176 self.fail_msg = self._EnsureErr(data)
177 self.data = self.payload = None
180 if not isinstance(self.data, (tuple, list)):
181 self.fail_msg = ("RPC layer error: invalid result type (%s)" %
185 self.fail_msg = ("RPC layer error: invalid result length (%d), "
186 "expected 2" % len(self.data))
188 elif not self.data[0]:
189 self.fail_msg = self._EnsureErr(self.data[1])
194 self.payload = data[1]
196 for attr_name in ["call", "data", "fail_msg",
197 "node", "offline", "payload"]:
198 assert hasattr(self, attr_name), "Missing attribute %s" % attr_name
202 """Helper to ensure we return a 'True' value for error."""
206 return "No error information"
208 def Raise(self, msg, prereq=False, ecode=None):
209 """If the result has failed, raise an OpExecError.
211 This is used so that LU code doesn't have to check for each
212 result, but instead can call this function.
215 if not self.fail_msg:
218 if not msg: # one could pass None for default message
219 msg = ("Call '%s' to node '%s' has failed: %s" %
220 (self.call, self.node, self.fail_msg))
222 msg = "%s: %s" % (msg, self.fail_msg)
224 ec = errors.OpPrereqError
226 ec = errors.OpExecError
227 if ecode is not None:
231 raise ec(*args) # pylint: disable=W0142
233 def Warn(self, msg, feedback_fn):
234 """If the result has failed, call the feedback_fn.
236 This is used to in cases were LU wants to warn the
237 user about a failure, but continue anyway.
240 if not self.fail_msg:
243 msg = "%s: %s" % (msg, self.fail_msg)
247 def _SsconfResolver(ssconf_ips, node_list, _,
248 ssc=ssconf.SimpleStore,
249 nslookup_fn=netutils.Hostname.GetIP):
250 """Return addresses for given node names.
252 @type ssconf_ips: bool
253 @param ssconf_ips: Use the ssconf IPs
254 @type node_list: list
255 @param node_list: List of node names
257 @param ssc: SimpleStore class that is used to obtain node->ip mappings
258 @type nslookup_fn: callable
259 @param nslookup_fn: function use to do NS lookup
260 @rtype: list of tuple; (string, string)
261 @return: List of tuples containing node name and IP address
265 family = ss.GetPrimaryIPFamily()
268 iplist = ss.GetNodePrimaryIPList()
269 ipmap = dict(entry.split() for entry in iplist)
274 for node in node_list:
277 ip = nslookup_fn(node, family=family)
278 result.append((node, ip, node))
283 class _StaticResolver:
284 def __init__(self, addresses):
285 """Initializes this class.
288 self._addresses = addresses
290 def __call__(self, hosts, _):
291 """Returns static addresses for hosts.
294 assert len(hosts) == len(self._addresses)
295 return zip(hosts, self._addresses, hosts)
298 def _CheckConfigNode(node_uuid_or_name, node, accept_offline_node):
299 """Checks if a node is online.
301 @type node_uuid_or_name: string
302 @param node_uuid_or_name: Node UUID
303 @type node: L{objects.Node} or None
304 @param node: Node object
308 # Assume that the passed parameter was actually a node name, so depend on
309 # DNS for name resolution
310 return (node_uuid_or_name, node_uuid_or_name, node_uuid_or_name)
312 if node.offline and not accept_offline_node:
316 return (node.name, ip, node_uuid_or_name)
319 def _NodeConfigResolver(single_node_fn, all_nodes_fn, node_uuids, opts):
320 """Calculate node addresses using configuration.
322 Note that strings in node_uuids are treated as node names if the UUID is not
323 found in the configuration.
326 accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
328 assert accept_offline_node or opts is None, "Unknown option"
330 # Special case for single-host lookups
331 if len(node_uuids) == 1:
332 (uuid, ) = node_uuids
333 return [_CheckConfigNode(uuid, single_node_fn(uuid), accept_offline_node)]
335 all_nodes = all_nodes_fn()
336 return [_CheckConfigNode(uuid, all_nodes.get(uuid, None),
338 for uuid in node_uuids]
342 def __init__(self, resolver, port, lock_monitor_cb=None):
343 """Initializes this class.
345 @param resolver: callable accepting a list of node UUIDs or hostnames,
346 returning a list of tuples containing name, IP address and original name
347 of the resolved node. IP address can be the name or the special value
348 L{_OFFLINE} to mark offline machines.
350 @param port: TCP port
351 @param lock_monitor_cb: Callable for registering with lock monitor
354 self._resolver = resolver
356 self._lock_monitor_cb = lock_monitor_cb
359 def _PrepareRequests(hosts, port, procedure, body, read_timeout):
360 """Prepares requests by sorting offline hosts into separate list.
363 @param body: a dictionary with per-host body data
369 assert isinstance(body, dict)
370 assert len(body) == len(hosts)
371 assert compat.all(isinstance(v, str) for v in body.values())
372 assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
373 "%s != %s" % (hosts, body.keys())
375 for (name, ip, original_name) in hosts:
377 # Node is marked as offline
378 results[original_name] = RpcResult(node=name,
382 requests[original_name] = \
383 http.client.HttpClientRequest(str(ip), port,
384 http.HTTP_POST, str("/%s" % procedure),
385 headers=_RPC_CLIENT_HEADERS,
386 post_data=body[original_name],
387 read_timeout=read_timeout,
388 nicename="%s/%s" % (name, procedure),
389 curl_config_fn=_ConfigRpcCurl)
391 return (results, requests)
394 def _CombineResults(results, requests, procedure):
395 """Combines pre-computed results for offline hosts with actual call results.
398 for name, req in requests.items():
399 if req.success and req.resp_status_code == http.HTTP_OK:
400 host_result = RpcResult(data=serializer.LoadJson(req.resp_body),
401 node=name, call=procedure)
403 # TODO: Better error reporting
409 logging.error("RPC error in %s on node %s: %s", procedure, name, msg)
410 host_result = RpcResult(data=msg, failed=True, node=name,
413 results[name] = host_result
417 def __call__(self, nodes, procedure, body, read_timeout, resolver_opts,
418 _req_process_fn=None):
419 """Makes an RPC request to a number of nodes.
421 @type nodes: sequence
422 @param nodes: node UUIDs or Hostnames
423 @type procedure: string
424 @param procedure: Request path
425 @type body: dictionary
426 @param body: dictionary with request bodies per host
427 @type read_timeout: int or None
428 @param read_timeout: Read timeout for request
430 @return: a dictionary mapping host names to rpc.RpcResult objects
433 assert read_timeout is not None, \
434 "Missing RPC read timeout for procedure '%s'" % procedure
436 if _req_process_fn is None:
437 _req_process_fn = http.client.ProcessRequests
439 (results, requests) = \
440 self._PrepareRequests(self._resolver(nodes, resolver_opts), self._port,
441 procedure, body, read_timeout)
443 _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
445 assert not frozenset(results).intersection(requests)
447 return self._CombineResults(results, requests, procedure)
450 class _RpcClientBase:
451 def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
452 _req_process_fn=None):
453 """Initializes this class.
456 proc = _RpcProcessor(resolver,
457 netutils.GetDaemonPort(constants.NODED),
458 lock_monitor_cb=lock_monitor_cb)
459 self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
460 self._encoder = compat.partial(self._EncodeArg, encoder_fn)
463 def _EncodeArg(encoder_fn, (argkind, value)):
470 return encoder_fn(argkind)(value)
472 def _Call(self, cdef, node_list, args):
473 """Entry point for automatically generated RPC wrappers.
476 (procedure, _, resolver_opts, timeout, argdefs,
477 prep_fn, postproc_fn, _) = cdef
479 if callable(timeout):
480 read_timeout = timeout(args)
482 read_timeout = timeout
484 if callable(resolver_opts):
485 req_resolver_opts = resolver_opts(args)
487 req_resolver_opts = resolver_opts
489 if len(args) != len(argdefs):
490 raise errors.ProgrammerError("Number of passed arguments doesn't match")
492 enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
494 # for a no-op prep_fn, we serialise the body once, and then we
495 # reuse it in the dictionary values
496 body = serializer.DumpJson(enc_args)
497 pnbody = dict((n, body) for n in node_list)
499 # for a custom prep_fn, we pass the encoded arguments and the
500 # node name to the prep_fn, and we serialise its return value
501 assert callable(prep_fn)
502 pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
505 result = self._proc(node_list, procedure, pnbody, read_timeout,
509 return dict(map(lambda (key, value): (key, postproc_fn(value)),
515 def _ObjectToDict(value):
516 """Converts an object to a dictionary.
518 @note: See L{objects}.
521 return value.ToDict()
524 def _ObjectListToDict(value):
525 """Converts a list of L{objects} to dictionaries.
528 return map(_ObjectToDict, value)
531 def _EncodeNodeToDiskDict(value):
532 """Encodes a dictionary with node name as key and disk objects as values.
535 return dict((name, _ObjectListToDict(disks))
536 for name, disks in value.items())
539 def _PrepareFileUpload(getents_fn, filename):
540 """Loads a file and prepares it for an upload to nodes.
543 statcb = utils.FileStatHelper()
544 data = _Compress(utils.ReadFile(filename, preread=statcb))
547 if getents_fn is None:
548 getents_fn = runtime.GetEnts
550 getents = getents_fn()
552 virt_filename = vcluster.MakeVirtualPath(filename)
554 return [virt_filename, data, st.st_mode, getents.LookupUid(st.st_uid),
555 getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
558 def _PrepareFinalizeExportDisks(snap_disks):
559 """Encodes disks for finalizing export.
564 for disk in snap_disks:
565 if isinstance(disk, bool):
566 flat_disks.append(disk)
568 flat_disks.append(disk.ToDict())
573 def _EncodeImportExportIO((ieio, ieioargs)):
574 """Encodes import/export I/O information.
577 if ieio == constants.IEIO_RAW_DISK:
578 assert len(ieioargs) == 1
579 return (ieio, (ieioargs[0].ToDict(), ))
581 if ieio == constants.IEIO_SCRIPT:
582 assert len(ieioargs) == 2
583 return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
585 return (ieio, ieioargs)
588 def _EncodeBlockdevRename(value):
589 """Encodes information for renaming block devices.
592 return [(d.ToDict(), uid) for d, uid in value]
595 def _AddSpindlesToLegacyNodeInfo(result, space_info):
596 """Extracts the spindle information from the space info and adds
597 it to the result dictionary.
599 @type result: dict of strings
600 @param result: dictionary holding the result of the legacy node info
601 @type space_info: list of dicts of strings
602 @param space_info: list, each row holding space information of one storage
605 @return: does not return anything, manipulates the C{result} variable
608 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
609 space_info, constants.ST_LVM_PV)
611 result["spindles_free"] = lvm_pv_info["storage_free"]
612 result["spindles_total"] = lvm_pv_info["storage_size"]
615 def _AddDefaultStorageInfoToLegacyNodeInfo(result, space_info,
616 require_vg_info=True):
617 """Extracts the storage space information of the default storage type from
618 the space info and adds it to the result dictionary.
620 @see: C{_AddSpindlesToLegacyNodeInfo} for parameter information.
621 @type require_vg_info: boolean
622 @param require_vg_info: indicates whether volume group information is
626 # Check if there is at least one row for non-spindle storage info.
627 no_defaults = (len(space_info) < 1) or \
628 (space_info[0]["type"] == constants.ST_LVM_PV and len(space_info) == 1)
630 default_space_info = None
632 logging.warning("No storage info provided for default storage type.")
634 default_space_info = space_info[0]
637 # if lvm storage is required, ignore the actual default and look for LVM
638 lvm_info_found = False
639 for space_entry in space_info:
640 if space_entry["type"] == constants.ST_LVM_VG:
641 default_space_info = space_entry
642 lvm_info_found = True
644 if not lvm_info_found:
645 raise errors.OpExecError("LVM volume group info required, but not"
648 if default_space_info:
649 result["name"] = default_space_info["name"]
650 result["storage_free"] = default_space_info["storage_free"]
651 result["storage_size"] = default_space_info["storage_size"]
654 def MakeLegacyNodeInfo(data, require_vg_info=True):
655 """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
657 Converts the data into a single dictionary. This is fine for most use cases,
658 but some require information from more than one volume group or hypervisor.
660 @param require_vg_info: raise an error if the returnd vg_info
661 doesn't have any values
664 (bootid, space_info, (hv_info, )) = data
666 ret = utils.JoinDisjointDicts(hv_info, {"bootid": bootid})
668 _AddSpindlesToLegacyNodeInfo(ret, space_info)
669 _AddDefaultStorageInfoToLegacyNodeInfo(ret, space_info,
670 require_vg_info=require_vg_info)
675 def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)):
676 """Annotates just DRBD disks layouts.
679 assert disk.dev_type == constants.LD_DRBD8
681 disk.params = objects.FillDict(drbd_params, disk.params)
682 (dev_data, dev_meta) = disk.children
683 dev_data.params = objects.FillDict(data_params, dev_data.params)
684 dev_meta.params = objects.FillDict(meta_params, dev_meta.params)
689 def _AnnotateDParamsGeneric(disk, (params, )):
690 """Generic disk parameter annotation routine.
693 assert disk.dev_type != constants.LD_DRBD8
695 disk.params = objects.FillDict(params, disk.params)
700 def AnnotateDiskParams(template, disks, disk_params):
701 """Annotates the disk objects with the disk parameters.
703 @param template: The disk template used
704 @param disks: The list of disks objects to annotate
705 @param disk_params: The disk paramaters for annotation
706 @returns: A list of disk objects annotated
709 ld_params = objects.Disk.ComputeLDParams(template, disk_params)
711 if template == constants.DT_DRBD8:
712 annotation_fn = _AnnotateDParamsDRBD
713 elif template == constants.DT_DISKLESS:
714 annotation_fn = lambda disk, _: disk
716 annotation_fn = _AnnotateDParamsGeneric
718 return [annotation_fn(disk.Copy(), ld_params) for disk in disks]
721 def _GetESFlag(cfg, node_uuid):
722 ni = cfg.GetNodeInfo(node_uuid)
724 raise errors.OpPrereqError("Invalid node name %s" % node_uuid,
726 return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE]
729 def GetExclusiveStorageForNodes(cfg, node_uuids):
730 """Return the exclusive storage flag for all the given nodes.
732 @type cfg: L{config.ConfigWriter}
733 @param cfg: cluster configuration
734 @type node_uuids: list or tuple
735 @param node_uuids: node UUIDs for which to read the flag
737 @return: mapping from node names to exclusive storage flags
738 @raise errors.OpPrereqError: if any given node name has no corresponding
742 getflag = lambda n: _GetESFlag(cfg, n)
743 flags = map(getflag, node_uuids)
744 return dict(zip(node_uuids, flags))
749 rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
750 rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
751 rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
752 rpc_defs.ED_COMPRESS: _Compress,
753 rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
754 rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
755 rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
759 class RpcRunner(_RpcClientBase,
760 _generated_rpc.RpcClientDefault,
761 _generated_rpc.RpcClientBootstrap,
762 _generated_rpc.RpcClientDnsOnly,
763 _generated_rpc.RpcClientConfig):
767 def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
768 """Initialized the RPC runner.
770 @type cfg: L{config.ConfigWriter}
771 @param cfg: Configuration
772 @type lock_monitor_cb: callable
773 @param lock_monitor_cb: Lock monitor callback
778 encoders = _ENCODERS.copy()
781 # Encoders requiring configuration object
782 rpc_defs.ED_INST_DICT: self._InstDict,
783 rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
784 rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
785 rpc_defs.ED_NIC_DICT: self._NicDict,
787 # Encoders annotating disk parameters
788 rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
789 rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
791 # Encoders with special requirements
792 rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
795 # Resolver using configuration
796 resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
799 # Pylint doesn't recognize multiple inheritance properly, see
800 # <http://www.logilab.org/ticket/36586> and
801 # <http://www.logilab.org/ticket/35642>
802 # pylint: disable=W0233
803 _RpcClientBase.__init__(self, resolver, encoders.get,
804 lock_monitor_cb=lock_monitor_cb,
805 _req_process_fn=_req_process_fn)
806 _generated_rpc.RpcClientConfig.__init__(self)
807 _generated_rpc.RpcClientBootstrap.__init__(self)
808 _generated_rpc.RpcClientDnsOnly.__init__(self)
809 _generated_rpc.RpcClientDefault.__init__(self)
811 def _NicDict(self, nic):
812 """Convert the given nic to a dict and encapsulate netinfo
815 n = copy.deepcopy(nic)
817 net_uuid = self._cfg.LookupNetwork(n.network)
819 nobj = self._cfg.GetNetwork(net_uuid)
820 n.netinfo = objects.Network.ToDict(nobj)
823 def _InstDict(self, instance, hvp=None, bep=None, osp=None):
824 """Convert the given instance to a dict.
826 This is done via the instance's ToDict() method and additionally
827 we fill the hvparams with the cluster defaults.
829 @type instance: L{objects.Instance}
830 @param instance: an Instance object
831 @type hvp: dict or None
832 @param hvp: a dictionary with overridden hypervisor parameters
833 @type bep: dict or None
834 @param bep: a dictionary with overridden backend parameters
835 @type osp: dict or None
836 @param osp: a dictionary with overridden os parameters
838 @return: the instance dict, with the hvparams filled with the
842 idict = instance.ToDict()
843 cluster = self._cfg.GetClusterInfo()
844 idict["hvparams"] = cluster.FillHV(instance)
846 idict["hvparams"].update(hvp)
847 idict["beparams"] = cluster.FillBE(instance)
849 idict["beparams"].update(bep)
850 idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
852 idict["osparams"].update(osp)
853 idict["disks"] = self._DisksDictDP((instance.disks, instance))
854 for nic in idict["nics"]:
855 nic["nicparams"] = objects.FillDict(
856 cluster.nicparams[constants.PP_DEFAULT],
858 network = nic.get("network", None)
860 net_uuid = self._cfg.LookupNetwork(network)
862 nobj = self._cfg.GetNetwork(net_uuid)
863 nic["netinfo"] = objects.Network.ToDict(nobj)
866 def _InstDictHvpBepDp(self, (instance, hvp, bep)):
867 """Wrapper for L{_InstDict}.
870 return self._InstDict(instance, hvp=hvp, bep=bep)
872 def _InstDictOspDp(self, (instance, osparams)):
873 """Wrapper for L{_InstDict}.
876 return self._InstDict(instance, osp=osparams)
878 def _DisksDictDP(self, (disks, instance)):
879 """Wrapper for L{AnnotateDiskParams}.
882 diskparams = self._cfg.GetInstanceDiskParams(instance)
883 return [disk.ToDict()
884 for disk in AnnotateDiskParams(instance.disk_template,
887 def _SingleDiskDictDP(self, (disk, instance)):
888 """Wrapper for L{AnnotateDiskParams}.
891 (anno_disk,) = self._DisksDictDP(([disk], instance))
895 class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
896 """RPC wrappers for job queue.
899 def __init__(self, context, address_list):
900 """Initializes this class.
903 if address_list is None:
904 resolver = compat.partial(_SsconfResolver, True)
906 # Caller provided an address list
907 resolver = _StaticResolver(address_list)
909 _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
910 lock_monitor_cb=context.glm.AddToLockMonitor)
911 _generated_rpc.RpcClientJobQueue.__init__(self)
914 class BootstrapRunner(_RpcClientBase,
915 _generated_rpc.RpcClientBootstrap,
916 _generated_rpc.RpcClientDnsOnly):
917 """RPC wrappers for bootstrapping.
921 """Initializes this class.
924 # Pylint doesn't recognize multiple inheritance properly, see
925 # <http://www.logilab.org/ticket/36586> and
926 # <http://www.logilab.org/ticket/35642>
927 # pylint: disable=W0233
928 _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
930 _generated_rpc.RpcClientBootstrap.__init__(self)
931 _generated_rpc.RpcClientDnsOnly.__init__(self)
934 class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly):
935 """RPC wrappers for calls using only DNS.
939 """Initialize this class.
942 _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
944 _generated_rpc.RpcClientDnsOnly.__init__(self)
947 class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
948 """RPC wrappers for L{config}.
951 def __init__(self, context, address_list, _req_process_fn=None,
953 """Initializes this class.
957 lock_monitor_cb = context.glm.AddToLockMonitor
959 lock_monitor_cb = None
961 if address_list is None:
962 resolver = compat.partial(_SsconfResolver, True)
964 # Caller provided an address list
965 resolver = _StaticResolver(address_list)
967 encoders = _ENCODERS.copy()
970 rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
973 _RpcClientBase.__init__(self, resolver, encoders.get,
974 lock_monitor_cb=lock_monitor_cb,
975 _req_process_fn=_req_process_fn)
976 _generated_rpc.RpcClientConfig.__init__(self)