# if they need to start using instance attributes
# R0904: Too many public methods
-import os
import logging
import zlib
import base64
from ganeti import ssconf
from ganeti import runtime
from ganeti import compat
+from ganeti import rpc_defs
# Special module generated at build time
from ganeti import _generated_rpc
raise ec(*args) # pylint: disable=W0142
-def _SsconfResolver(node_list,
+def _SsconfResolver(ssconf_ips, node_list, _,
ssc=ssconf.SimpleStore,
nslookup_fn=netutils.Hostname.GetIP):
"""Return addresses for given node names.
+ @type ssconf_ips: bool
+ @param ssconf_ips: Use the ssconf IPs
@type node_list: list
@param node_list: List of node names
@type ssc: class
"""
ss = ssc()
- iplist = ss.GetNodePrimaryIPList()
family = ss.GetPrimaryIPFamily()
- ipmap = dict(entry.split() for entry in iplist)
+
+ if ssconf_ips:
+ iplist = ss.GetNodePrimaryIPList()
+ ipmap = dict(entry.split() for entry in iplist)
+ else:
+ ipmap = {}
result = []
for node in node_list:
"""
self._addresses = addresses
- def __call__(self, hosts):
+ def __call__(self, hosts, _):
"""Returns static addresses for hosts.
"""
return zip(hosts, self._addresses)
-def _CheckConfigNode(name, node):
+def _CheckConfigNode(name, node, accept_offline_node):
"""Checks if a node is online.
@type name: string
if node is None:
# Depend on DNS for name resolution
ip = name
- elif node.offline:
+ elif node.offline and not accept_offline_node:
ip = _OFFLINE
else:
ip = node.primary_ip
return (name, ip)
-def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts):
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts, opts):
"""Calculate node addresses using configuration.
"""
+ accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
+
+ assert accept_offline_node or opts is None, "Unknown option"
+
# Special case for single-host lookups
if len(hosts) == 1:
(name, ) = hosts
- return [_CheckConfigNode(name, single_node_fn(name))]
+ return [_CheckConfigNode(name, single_node_fn(name), accept_offline_node)]
else:
all_nodes = all_nodes_fn()
- return [_CheckConfigNode(name, all_nodes.get(name, None))
+ return [_CheckConfigNode(name, all_nodes.get(name, None),
+ accept_offline_node)
for name in hosts]
def _PrepareRequests(hosts, port, procedure, body, read_timeout):
"""Prepares requests by sorting offline hosts into separate list.
+ @type body: dict
+ @param body: a dictionary with per-host body data
+
"""
results = {}
requests = {}
+ assert isinstance(body, dict)
+ assert len(body) == len(hosts)
+ assert compat.all(isinstance(v, str) for v in body.values())
+ assert frozenset(map(compat.fst, hosts)) == frozenset(body.keys()), \
+ "%s != %s" % (hosts, body.keys())
+
for (name, ip) in hosts:
if ip is _OFFLINE:
# Node is marked as offline
else:
requests[name] = \
http.client.HttpClientRequest(str(ip), port,
- http.HTTP_PUT, str("/%s" % procedure),
+ http.HTTP_POST, str("/%s" % procedure),
headers=_RPC_CLIENT_HEADERS,
- post_data=body,
+ post_data=body[name],
read_timeout=read_timeout,
nicename="%s/%s" % (name, procedure),
curl_config_fn=_ConfigRpcCurl)
return results
- def __call__(self, hosts, procedure, body, read_timeout=None,
- _req_process_fn=http.client.ProcessRequests):
+ def __call__(self, hosts, procedure, body, read_timeout, resolver_opts,
+ _req_process_fn=None):
"""Makes an RPC request to a number of nodes.
@type hosts: sequence
@param hosts: Hostnames
@type procedure: string
@param procedure: Request path
- @type body: string
- @param body: Request body
+ @type body: dictionary
+ @param body: dictionary with request bodies per host
@type read_timeout: int or None
@param read_timeout: Read timeout for request
assert read_timeout is not None, \
"Missing RPC read timeout for procedure '%s'" % procedure
+ if _req_process_fn is None:
+ _req_process_fn = http.client.ProcessRequests
+
(results, requests) = \
- self._PrepareRequests(self._resolver(hosts), self._port, procedure,
- str(body), read_timeout)
+ self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port,
+ procedure, body, read_timeout)
_req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
return self._CombineResults(results, requests, procedure)
-class RpcRunner(_generated_rpc.RpcClientDefault,
+class _RpcClientBase:
+ def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
+ _req_process_fn=None):
+ """Initializes this class.
+
+ """
+ proc = _RpcProcessor(resolver,
+ netutils.GetDaemonPort(constants.NODED),
+ lock_monitor_cb=lock_monitor_cb)
+ self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
+ self._encoder = compat.partial(self._EncodeArg, encoder_fn)
+
+ @staticmethod
+ def _EncodeArg(encoder_fn, (argkind, value)):
+ """Encode argument.
+
+ """
+ if argkind is None:
+ return value
+ else:
+ return encoder_fn(argkind)(value)
+
+ def _Call(self, cdef, node_list, args):
+ """Entry point for automatically generated RPC wrappers.
+
+ """
+ (procedure, _, resolver_opts, timeout, argdefs,
+ prep_fn, postproc_fn, _) = cdef
+
+ if callable(timeout):
+ read_timeout = timeout(args)
+ else:
+ read_timeout = timeout
+
+ if callable(resolver_opts):
+ req_resolver_opts = resolver_opts(args)
+ else:
+ req_resolver_opts = resolver_opts
+
+ if len(args) != len(argdefs):
+ raise errors.ProgrammerError("Number of passed arguments doesn't match")
+
+ enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
+ if prep_fn is None:
+ # for a no-op prep_fn, we serialise the body once, and then we
+ # reuse it in the dictionary values
+ body = serializer.DumpJson(enc_args)
+ pnbody = dict((n, body) for n in node_list)
+ else:
+ # for a custom prep_fn, we pass the encoded arguments and the
+ # node name to the prep_fn, and we serialise its return value
+ assert callable(prep_fn)
+ pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
+ for n in node_list)
+
+ result = self._proc(node_list, procedure, pnbody, read_timeout,
+ req_resolver_opts)
+
+ if postproc_fn:
+ return dict(map(lambda (key, value): (key, postproc_fn(value)),
+ result.items()))
+ else:
+ return result
+
+
+def _ObjectToDict(value):
+ """Converts an object to a dictionary.
+
+ @note: See L{objects}.
+
+ """
+ return value.ToDict()
+
+
+def _ObjectListToDict(value):
+ """Converts a list of L{objects} to dictionaries.
+
+ """
+ return map(_ObjectToDict, value)
+
+
+def _EncodeNodeToDiskDict(value):
+ """Encodes a dictionary with node name as key and disk objects as values.
+
+ """
+ return dict((name, _ObjectListToDict(disks))
+ for name, disks in value.items())
+
+
+def _PrepareFileUpload(getents_fn, filename):
+ """Loads a file and prepares it for an upload to nodes.
+
+ """
+ statcb = utils.FileStatHelper()
+ data = _Compress(utils.ReadFile(filename, preread=statcb))
+ st = statcb.st
+
+ if getents_fn is None:
+ getents_fn = runtime.GetEnts
+
+ getents = getents_fn()
+
+ return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
+ getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
+
+
+def _PrepareFinalizeExportDisks(snap_disks):
+ """Encodes disks for finalizing export.
+
+ """
+ flat_disks = []
+
+ for disk in snap_disks:
+ if isinstance(disk, bool):
+ flat_disks.append(disk)
+ else:
+ flat_disks.append(disk.ToDict())
+
+ return flat_disks
+
+
+def _EncodeImportExportIO((ieio, ieioargs)):
+ """Encodes import/export I/O information.
+
+ """
+ if ieio == constants.IEIO_RAW_DISK:
+ assert len(ieioargs) == 1
+ return (ieio, (ieioargs[0].ToDict(), ))
+
+ if ieio == constants.IEIO_SCRIPT:
+ assert len(ieioargs) == 2
+ return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
+
+ return (ieio, ieioargs)
+
+
+def _EncodeBlockdevRename(value):
+ """Encodes information for renaming block devices.
+
+ """
+ return [(d.ToDict(), uid) for d, uid in value]
+
+
+def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)):
+ """Annotates just DRBD disks layouts.
+
+ """
+ assert disk.dev_type == constants.LD_DRBD8
+
+ disk.params = objects.FillDict(drbd_params, disk.params)
+ (dev_data, dev_meta) = disk.children
+ dev_data.params = objects.FillDict(data_params, dev_data.params)
+ dev_meta.params = objects.FillDict(meta_params, dev_meta.params)
+
+ return disk
+
+
+def _AnnotateDParamsGeneric(disk, (params, )):
+ """Generic disk parameter annotation routine.
+
+ """
+ assert disk.dev_type != constants.LD_DRBD8
+
+ disk.params = objects.FillDict(params, disk.params)
+
+ return disk
+
+
+def AnnotateDiskParams(template, disks, disk_params):
+ """Annotates the disk objects with the disk parameters.
+
+ @param template: The disk template used
+ @param disks: The list of disks objects to annotate
+ @param disk_params: The disk paramaters for annotation
+ @returns: A list of disk objects annotated
+
+ """
+ ld_params = objects.Disk.ComputeLDParams(template, disk_params)
+
+ if template == constants.DT_DRBD8:
+ annotation_fn = _AnnotateDParamsDRBD
+ elif template == constants.DT_DISKLESS:
+ annotation_fn = lambda disk, _: disk
+ else:
+ annotation_fn = _AnnotateDParamsGeneric
+
+ new_disks = []
+ for disk in disks:
+ new_disks.append(annotation_fn(disk.Copy(), ld_params))
+
+ return new_disks
+
+
+#: Generic encoders
+_ENCODERS = {
+ rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
+ rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
+ rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
+ rpc_defs.ED_COMPRESS: _Compress,
+ rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
+ rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
+ rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
+ }
+
+
+class RpcRunner(_RpcClientBase,
+ _generated_rpc.RpcClientDefault,
_generated_rpc.RpcClientBootstrap,
+ _generated_rpc.RpcClientDnsOnly,
_generated_rpc.RpcClientConfig):
"""RPC runner class.
"""
- def __init__(self, context):
+ def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
"""Initialized the RPC runner.
- @type context: C{masterd.GanetiContext}
- @param context: Ganeti context
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: Configuration
+ @type lock_monitor_cb: callable
+ @param lock_monitor_cb: Lock monitor callback
"""
+ self._cfg = cfg
+
+ encoders = _ENCODERS.copy()
+
+ encoders.update({
+ # Encoders requiring configuration object
+ rpc_defs.ED_INST_DICT: self._InstDict,
+ rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
+ rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
+
+ # Encoders annotating disk parameters
+ rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
+ rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
+
+ # Encoders with special requirements
+ rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+ })
+
+ # Resolver using configuration
+ resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
+ cfg.GetAllNodesInfo)
+
# Pylint doesn't recognize multiple inheritance properly, see
# <http://www.logilab.org/ticket/36586> and
# <http://www.logilab.org/ticket/35642>
# pylint: disable=W0233
+ _RpcClientBase.__init__(self, resolver, encoders.get,
+ lock_monitor_cb=lock_monitor_cb,
+ _req_process_fn=_req_process_fn)
_generated_rpc.RpcClientConfig.__init__(self)
_generated_rpc.RpcClientBootstrap.__init__(self)
+ _generated_rpc.RpcClientDnsOnly.__init__(self)
_generated_rpc.RpcClientDefault.__init__(self)
- self._cfg = context.cfg
- self._proc = _RpcProcessor(compat.partial(_NodeConfigResolver,
- self._cfg.GetNodeInfo,
- self._cfg.GetAllNodesInfo),
- netutils.GetDaemonPort(constants.NODED),
- lock_monitor_cb=context.glm.AddToLockMonitor)
-
def _InstDict(self, instance, hvp=None, bep=None, osp=None):
"""Convert the given instance to a dict.
if osp is not None:
idict["osparams"].update(osp)
for nic in idict["nics"]:
- nic['nicparams'] = objects.FillDict(
+ nic["nicparams"] = objects.FillDict(
cluster.nicparams[constants.PP_DEFAULT],
- nic['nicparams'])
+ nic["nicparams"])
+ idict["disks"] = self._DisksDictDP((instance.disks, instance))
return idict
- def _InstDictHvpBep(self, (instance, hvp, bep)):
+ def _InstDictHvpBepDp(self, (instance, hvp, bep)):
"""Wrapper for L{_InstDict}.
"""
return self._InstDict(instance, hvp=hvp, bep=bep)
- def _InstDictOsp(self, (instance, osparams)):
+ def _InstDictOspDp(self, (instance, osparams)):
"""Wrapper for L{_InstDict}.
"""
return self._InstDict(instance, osp=osparams)
- def _Call(self, node_list, procedure, timeout, args):
- """Entry point for automatically generated RPC wrappers.
+ def _DisksDictDP(self, (disks, instance)):
+ """Wrapper for L{AnnotateDiskParams}.
"""
- body = serializer.DumpJson(args, indent=False)
-
- return self._proc(node_list, procedure, body, read_timeout=timeout)
-
- @staticmethod
- def _BlockdevFindPostProc(result):
- if not result.fail_msg and result.payload is not None:
- result.payload = objects.BlockDevStatus.FromDict(result.payload)
- return result
-
- @staticmethod
- def _BlockdevGetMirrorStatusPostProc(result):
- if not result.fail_msg:
- result.payload = [objects.BlockDevStatus.FromDict(i)
- for i in result.payload]
- return result
-
- @staticmethod
- def _BlockdevGetMirrorStatusMultiPostProc(result):
- for nres in result.values():
- if nres.fail_msg:
- continue
-
- for idx, (success, status) in enumerate(nres.payload):
- if success:
- nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
-
- return result
-
- @staticmethod
- def _OsGetPostProc(result):
- if not result.fail_msg and isinstance(result.payload, dict):
- result.payload = objects.OS.FromDict(result.payload)
- return result
-
- @staticmethod
- def _PrepareFinalizeExportDisks(snap_disks):
- flat_disks = []
-
- for disk in snap_disks:
- if isinstance(disk, bool):
- flat_disks.append(disk)
- else:
- flat_disks.append(disk.ToDict())
-
- return flat_disks
-
- @staticmethod
- def _ImpExpStatusPostProc(result):
- """Post-processor for import/export status.
+ diskparams = self._cfg.GetInstanceDiskParams(instance)
+ return [disk.ToDict()
+ for disk in AnnotateDiskParams(instance.disk_template,
+ disks, diskparams)]
- @rtype: Payload containing list of L{objects.ImportExportStatus} instances
- @return: Returns a list of the state of each named import/export or None if
- a status couldn't be retrieved
+ def _SingleDiskDictDP(self, (disk, instance)):
+ """Wrapper for L{AnnotateDiskParams}.
"""
- if not result.fail_msg:
- decoded = []
-
- for i in result.payload:
- if i is None:
- decoded.append(None)
- continue
- decoded.append(objects.ImportExportStatus.FromDict(i))
-
- result.payload = decoded
+ (anno_disk,) = self._DisksDictDP(([disk], instance))
+ return anno_disk
- return result
-
- @staticmethod
- def _EncodeImportExportIO(ieio, ieioargs):
- """Encodes import/export I/O information.
- """
- if ieio == constants.IEIO_RAW_DISK:
- assert len(ieioargs) == 1
- return (ieioargs[0].ToDict(), )
-
- if ieio == constants.IEIO_SCRIPT:
- assert len(ieioargs) == 2
- return (ieioargs[0].ToDict(), ieioargs[1])
-
- return ieioargs
-
- @staticmethod
- def _PrepareFileUpload(filename):
- """Loads a file and prepares it for an upload to nodes.
-
- """
- data = _Compress(utils.ReadFile(filename))
- st = os.stat(filename)
- getents = runtime.GetEnts()
- return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
- getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
-
- #
- # Begin RPC calls
- #
-
- def call_test_delay(self, node_list, duration, read_timeout=None):
- """Sleep for a fixed time on given node(s).
-
- This is a multi-node call.
-
- """
- assert read_timeout is None
- return self.call_test_delay(node_list, duration,
- read_timeout=int(duration + 5))
-
-
-class JobQueueRunner(_generated_rpc.RpcClientJobQueue):
+class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
"""RPC wrappers for job queue.
"""
- _Compress = staticmethod(_Compress)
-
def __init__(self, context, address_list):
"""Initializes this class.
"""
- _generated_rpc.RpcClientJobQueue.__init__(self)
-
if address_list is None:
- resolver = _SsconfResolver
+ resolver = compat.partial(_SsconfResolver, True)
else:
# Caller provided an address list
resolver = _StaticResolver(address_list)
- self._proc = _RpcProcessor(resolver,
- netutils.GetDaemonPort(constants.NODED),
- lock_monitor_cb=context.glm.AddToLockMonitor)
-
- def _Call(self, node_list, procedure, timeout, args):
- """Entry point for automatically generated RPC wrappers.
-
- """
- body = serializer.DumpJson(args, indent=False)
-
- return self._proc(node_list, procedure, body, read_timeout=timeout)
+ _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+ lock_monitor_cb=context.glm.AddToLockMonitor)
+ _generated_rpc.RpcClientJobQueue.__init__(self)
-class BootstrapRunner(_generated_rpc.RpcClientBootstrap):
+class BootstrapRunner(_RpcClientBase,
+ _generated_rpc.RpcClientBootstrap,
+ _generated_rpc.RpcClientDnsOnly):
"""RPC wrappers for bootstrapping.
"""
"""Initializes this class.
"""
+ # Pylint doesn't recognize multiple inheritance properly, see
+ # <http://www.logilab.org/ticket/36586> and
+ # <http://www.logilab.org/ticket/35642>
+ # pylint: disable=W0233
+ _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
+ _ENCODERS.get)
_generated_rpc.RpcClientBootstrap.__init__(self)
+ _generated_rpc.RpcClientDnsOnly.__init__(self)
- self._proc = _RpcProcessor(_SsconfResolver,
- netutils.GetDaemonPort(constants.NODED))
- def _Call(self, node_list, procedure, timeout, args):
- """Entry point for automatically generated RPC wrappers.
+class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly):
+ """RPC wrappers for calls using only DNS.
- """
- body = serializer.DumpJson(args, indent=False)
+ """
+ def __init__(self):
+ """Initialize this class.
- return self._proc(node_list, procedure, body, read_timeout=timeout)
+ """
+ _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
+ _ENCODERS.get)
+ _generated_rpc.RpcClientDnsOnly.__init__(self)
-class ConfigRunner(_generated_rpc.RpcClientConfig):
+class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
"""RPC wrappers for L{config}.
"""
- _PrepareFileUpload = \
- staticmethod(RpcRunner._PrepareFileUpload) # pylint: disable=W0212
-
- def __init__(self, address_list):
+ def __init__(self, context, address_list, _req_process_fn=None,
+ _getents=None):
"""Initializes this class.
"""
- _generated_rpc.RpcClientConfig.__init__(self)
+ if context:
+ lock_monitor_cb = context.glm.AddToLockMonitor
+ else:
+ lock_monitor_cb = None
if address_list is None:
- resolver = _SsconfResolver
+ resolver = compat.partial(_SsconfResolver, True)
else:
# Caller provided an address list
resolver = _StaticResolver(address_list)
- self._proc = _RpcProcessor(resolver,
- netutils.GetDaemonPort(constants.NODED))
+ encoders = _ENCODERS.copy()
- def _Call(self, node_list, procedure, timeout, args):
- """Entry point for automatically generated RPC wrappers.
-
- """
- body = serializer.DumpJson(args, indent=False)
+ encoders.update({
+ rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+ })
- return self._proc(node_list, procedure, body, read_timeout=timeout)
+ _RpcClientBase.__init__(self, resolver, encoders.get,
+ lock_monitor_cb=lock_monitor_cb,
+ _req_process_fn=_req_process_fn)
+ _generated_rpc.RpcClientConfig.__init__(self)