X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/890ea4cef63a0a397f4be759c380647737d736d0..d46c9fd68ac69503e748b47a7c6a4c6aa297e221:/lib/rpc.py diff --git a/lib/rpc.py b/lib/rpc.py index 1269845..0c4bad0 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -30,12 +30,12 @@ # if they need to start using instance attributes # R0904: Too many public methods -import os import logging import zlib import base64 import pycurl import threading +import copy from ganeti import utils from ganeti import objects @@ -48,6 +48,8 @@ from ganeti import ssconf from ganeti import runtime from ganeti import compat from ganeti import rpc_defs +from ganeti import pathutils +from ganeti import vcluster # Special module generated at build time from ganeti import _generated_rpc @@ -56,22 +58,11 @@ from ganeti import _generated_rpc import ganeti.http.client # pylint: disable=W0611 -# Timeout for connecting to nodes (seconds) -_RPC_CONNECT_TIMEOUT = 5 - _RPC_CLIENT_HEADERS = [ "Content-type: %s" % http.HTTP_APP_JSON, "Expect:", ] -# Various time constants for the timeout table -_TMO_URGENT = 60 # one minute -_TMO_FAST = 5 * 60 # five minutes -_TMO_NORMAL = 15 * 60 # 15 minutes -_TMO_SLOW = 3600 # one hour -_TMO_4HRS = 4 * 3600 -_TMO_1DAY = 86400 - #: Special value to describe an offline host _OFFLINE = object() @@ -105,7 +96,7 @@ def Shutdown(): def _ConfigRpcCurl(curl): - noded_cert = str(constants.NODED_CERT_FILE) + noded_cert = str(pathutils.NODED_CERT_FILE) curl.setopt(pycurl.FOLLOWLOCATION, False) curl.setopt(pycurl.CAINFO, noded_cert) @@ -115,7 +106,7 @@ def _ConfigRpcCurl(curl): curl.setopt(pycurl.SSLCERT, noded_cert) curl.setopt(pycurl.SSLKEYTYPE, "PEM") curl.setopt(pycurl.SSLKEY, noded_cert) - curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT) + curl.setopt(pycurl.CONNECTTIMEOUT, constants.RPC_CONNECT_TIMEOUT) def RunWithRPC(fn): @@ -159,7 +150,7 @@ class RpcResult(object): """RPC Result class. This class holds an RPC result. It is needed since in multi-node - calls we can't raise an exception just because one one out of many + calls we can't raise an exception just because one out of many failed, and therefore we use this class to encapsulate the result. @ivar data: the data payload, for successful results, or None @@ -240,11 +231,13 @@ class RpcResult(object): raise ec(*args) # pylint: disable=W0142 -def _SsconfResolver(node_list, _, +def _SsconfResolver(ssconf_ips, node_list, _, ssc=ssconf.SimpleStore, nslookup_fn=netutils.Hostname.GetIP): """Return addresses for given node names. + @type ssconf_ips: bool + @param ssconf_ips: Use the ssconf IPs @type node_list: list @param node_list: List of node names @type ssc: class @@ -256,9 +249,13 @@ def _SsconfResolver(node_list, _, """ ss = ssc() - iplist = ss.GetNodePrimaryIPList() family = ss.GetPrimaryIPFamily() - ipmap = dict(entry.split() for entry in iplist) + + if ssconf_ips: + iplist = ss.GetNodePrimaryIPList() + ipmap = dict(entry.split() for entry in iplist) + else: + ipmap = {} result = [] for node in node_list: @@ -363,7 +360,7 @@ class _RpcProcessor: else: requests[name] = \ http.client.HttpClientRequest(str(ip), port, - http.HTTP_PUT, str("/%s" % procedure), + http.HTTP_POST, str("/%s" % procedure), headers=_RPC_CLIENT_HEADERS, post_data=body[name], read_timeout=read_timeout, @@ -397,7 +394,7 @@ class _RpcProcessor: return results def __call__(self, hosts, procedure, body, read_timeout, resolver_opts, - _req_process_fn=http.client.ProcessRequests): + _req_process_fn=None): """Makes an RPC request to a number of nodes. @type hosts: sequence @@ -408,11 +405,16 @@ class _RpcProcessor: @param body: dictionary with request bodies per host @type read_timeout: int or None @param read_timeout: Read timeout for request + @rtype: dictionary + @return: a dictionary mapping host names to rpc.RpcResult objects """ assert read_timeout is not None, \ "Missing RPC read timeout for procedure '%s'" % procedure + if _req_process_fn is None: + _req_process_fn = http.client.ProcessRequests + (results, requests) = \ self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port, procedure, body, read_timeout) @@ -425,13 +427,15 @@ class _RpcProcessor: class _RpcClientBase: - def __init__(self, resolver, encoder_fn, lock_monitor_cb=None): + def __init__(self, resolver, encoder_fn, lock_monitor_cb=None, + _req_process_fn=None): """Initializes this class. """ - self._proc = _RpcProcessor(resolver, - netutils.GetDaemonPort(constants.NODED), - lock_monitor_cb=lock_monitor_cb) + proc = _RpcProcessor(resolver, + netutils.GetDaemonPort(constants.NODED), + lock_monitor_cb=lock_monitor_cb) + self._proc = compat.partial(proc, _req_process_fn=_req_process_fn) self._encoder = compat.partial(self._EncodeArg, encoder_fn) @staticmethod @@ -461,6 +465,9 @@ class _RpcClientBase: else: req_resolver_opts = resolver_opts + if len(args) != len(argdefs): + raise errors.ProgrammerError("Number of passed arguments doesn't match") + enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args)) if prep_fn is None: # for a no-op prep_fn, we serialise the body once, and then we @@ -470,7 +477,7 @@ class _RpcClientBase: else: # for a custom prep_fn, we pass the encoded arguments and the # node name to the prep_fn, and we serialise its return value - assert(callable(prep_fn)) + assert callable(prep_fn) pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args))) for n in node_list) @@ -508,14 +515,22 @@ def _EncodeNodeToDiskDict(value): for name, disks in value.items()) -def _PrepareFileUpload(filename): +def _PrepareFileUpload(getents_fn, filename): """Loads a file and prepares it for an upload to nodes. """ - data = _Compress(utils.ReadFile(filename)) - st = os.stat(filename) - getents = runtime.GetEnts() - return [filename, data, st.st_mode, getents.LookupUid(st.st_uid), + statcb = utils.FileStatHelper() + data = _Compress(utils.ReadFile(filename, preread=statcb)) + st = statcb.st + + if getents_fn is None: + getents_fn = runtime.GetEnts + + getents = getents_fn() + + virt_filename = vcluster.MakeVirtualPath(filename) + + return [virt_filename, data, st.st_mode, getents.LookupUid(st.st_uid), getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime] @@ -556,12 +571,96 @@ def _EncodeBlockdevRename(value): return [(d.ToDict(), uid) for d, uid in value] +def MakeLegacyNodeInfo(data): + """Formats the data returned by L{rpc.RpcRunner.call_node_info}. + + Converts the data into a single dictionary. This is fine for most use cases, + but some require information from more than one volume group or hypervisor. + + """ + (bootid, (vg_info, ), (hv_info, )) = data + + return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), { + "bootid": bootid, + }) + + +def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)): + """Annotates just DRBD disks layouts. + + """ + assert disk.dev_type == constants.LD_DRBD8 + + disk.params = objects.FillDict(drbd_params, disk.params) + (dev_data, dev_meta) = disk.children + dev_data.params = objects.FillDict(data_params, dev_data.params) + dev_meta.params = objects.FillDict(meta_params, dev_meta.params) + + return disk + + +def _AnnotateDParamsGeneric(disk, (params, )): + """Generic disk parameter annotation routine. + + """ + assert disk.dev_type != constants.LD_DRBD8 + + disk.params = objects.FillDict(params, disk.params) + + return disk + + +def AnnotateDiskParams(template, disks, disk_params): + """Annotates the disk objects with the disk parameters. + + @param template: The disk template used + @param disks: The list of disks objects to annotate + @param disk_params: The disk paramaters for annotation + @returns: A list of disk objects annotated + + """ + ld_params = objects.Disk.ComputeLDParams(template, disk_params) + + if template == constants.DT_DRBD8: + annotation_fn = _AnnotateDParamsDRBD + elif template == constants.DT_DISKLESS: + annotation_fn = lambda disk, _: disk + else: + annotation_fn = _AnnotateDParamsGeneric + + return [annotation_fn(disk.Copy(), ld_params) for disk in disks] + + +def _GetESFlag(cfg, nodename): + ni = cfg.GetNodeInfo(nodename) + if ni is None: + raise errors.OpPrereqError("Invalid node name %s" % nodename, + errors.ECODE_NOENT) + return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE] + + +def GetExclusiveStorageForNodeNames(cfg, nodelist): + """Return the exclusive storage flag for all the given nodes. + + @type cfg: L{config.ConfigWriter} + @param cfg: cluster configuration + @type nodelist: list or tuple + @param nodelist: node names for which to read the flag + @rtype: dict + @return: mapping from node names to exclusive storage flags + @raise errors.OpPrereqError: if any given node name has no corresponding node + + """ + getflag = lambda n: _GetESFlag(cfg, n) + flags = map(getflag, nodelist) + return dict(zip(nodelist, flags)) + + #: Generic encoders _ENCODERS = { rpc_defs.ED_OBJECT_DICT: _ObjectToDict, rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict, rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict, - rpc_defs.ED_FILE_DETAILS: _PrepareFileUpload, rpc_defs.ED_COMPRESS: _Compress, rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks, rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO, @@ -572,42 +671,67 @@ _ENCODERS = { class RpcRunner(_RpcClientBase, _generated_rpc.RpcClientDefault, _generated_rpc.RpcClientBootstrap, + _generated_rpc.RpcClientDnsOnly, _generated_rpc.RpcClientConfig): """RPC runner class. """ - def __init__(self, context): + def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None): """Initialized the RPC runner. - @type context: C{masterd.GanetiContext} - @param context: Ganeti context + @type cfg: L{config.ConfigWriter} + @param cfg: Configuration + @type lock_monitor_cb: callable + @param lock_monitor_cb: Lock monitor callback """ - self._cfg = context.cfg + self._cfg = cfg encoders = _ENCODERS.copy() - # Add encoders requiring configuration object encoders.update({ + # Encoders requiring configuration object rpc_defs.ED_INST_DICT: self._InstDict, - rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep, - rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp, + rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp, + rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp, + rpc_defs.ED_NIC_DICT: self._NicDict, + + # Encoders annotating disk parameters + rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP, + rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP, + + # Encoders with special requirements + rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents), }) # Resolver using configuration - resolver = compat.partial(_NodeConfigResolver, self._cfg.GetNodeInfo, - self._cfg.GetAllNodesInfo) + resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo, + cfg.GetAllNodesInfo) # Pylint doesn't recognize multiple inheritance properly, see # and # # pylint: disable=W0233 _RpcClientBase.__init__(self, resolver, encoders.get, - lock_monitor_cb=context.glm.AddToLockMonitor) + lock_monitor_cb=lock_monitor_cb, + _req_process_fn=_req_process_fn) _generated_rpc.RpcClientConfig.__init__(self) _generated_rpc.RpcClientBootstrap.__init__(self) + _generated_rpc.RpcClientDnsOnly.__init__(self) _generated_rpc.RpcClientDefault.__init__(self) + def _NicDict(self, nic): + """Convert the given nic to a dict and encapsulate netinfo + + """ + n = copy.deepcopy(nic) + if n.network: + net_uuid = self._cfg.LookupNetwork(n.network) + if net_uuid: + nobj = self._cfg.GetNetwork(net_uuid) + n.netinfo = objects.Network.ToDict(nobj) + return n.ToDict() + def _InstDict(self, instance, hvp=None, bep=None, osp=None): """Convert the given instance to a dict. @@ -638,24 +762,47 @@ class RpcRunner(_RpcClientBase, idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams) if osp is not None: idict["osparams"].update(osp) + idict["disks"] = self._DisksDictDP((instance.disks, instance)) for nic in idict["nics"]: - nic['nicparams'] = objects.FillDict( + nic["nicparams"] = objects.FillDict( cluster.nicparams[constants.PP_DEFAULT], - nic['nicparams']) + nic["nicparams"]) + network = nic.get("network", None) + if network: + net_uuid = self._cfg.LookupNetwork(network) + if net_uuid: + nobj = self._cfg.GetNetwork(net_uuid) + nic["netinfo"] = objects.Network.ToDict(nobj) return idict - def _InstDictHvpBep(self, (instance, hvp, bep)): + def _InstDictHvpBepDp(self, (instance, hvp, bep)): """Wrapper for L{_InstDict}. """ return self._InstDict(instance, hvp=hvp, bep=bep) - def _InstDictOsp(self, (instance, osparams)): + def _InstDictOspDp(self, (instance, osparams)): """Wrapper for L{_InstDict}. """ return self._InstDict(instance, osp=osparams) + def _DisksDictDP(self, (disks, instance)): + """Wrapper for L{AnnotateDiskParams}. + + """ + diskparams = self._cfg.GetInstanceDiskParams(instance) + return [disk.ToDict() + for disk in AnnotateDiskParams(instance.disk_template, + disks, diskparams)] + + def _SingleDiskDictDP(self, (disk, instance)): + """Wrapper for L{AnnotateDiskParams}. + + """ + (anno_disk,) = self._DisksDictDP(([disk], instance)) + return anno_disk + class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue): """RPC wrappers for job queue. @@ -666,7 +813,7 @@ class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue): """ if address_list is None: - resolver = _SsconfResolver + resolver = compat.partial(_SsconfResolver, True) else: # Caller provided an address list resolver = _StaticResolver(address_list) @@ -676,7 +823,9 @@ class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue): _generated_rpc.RpcClientJobQueue.__init__(self) -class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap): +class BootstrapRunner(_RpcClientBase, + _generated_rpc.RpcClientBootstrap, + _generated_rpc.RpcClientDnsOnly): """RPC wrappers for bootstrapping. """ @@ -684,15 +833,35 @@ class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap): """Initializes this class. """ - _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get) + # Pylint doesn't recognize multiple inheritance properly, see + # and + # + # pylint: disable=W0233 + _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True), + _ENCODERS.get) _generated_rpc.RpcClientBootstrap.__init__(self) + _generated_rpc.RpcClientDnsOnly.__init__(self) + + +class DnsOnlyRunner(_RpcClientBase, _generated_rpc.RpcClientDnsOnly): + """RPC wrappers for calls using only DNS. + + """ + def __init__(self): + """Initialize this class. + + """ + _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False), + _ENCODERS.get) + _generated_rpc.RpcClientDnsOnly.__init__(self) class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig): """RPC wrappers for L{config}. """ - def __init__(self, context, address_list): + def __init__(self, context, address_list, _req_process_fn=None, + _getents=None): """Initializes this class. """ @@ -702,11 +871,18 @@ class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig): lock_monitor_cb = None if address_list is None: - resolver = _SsconfResolver + resolver = compat.partial(_SsconfResolver, True) else: # Caller provided an address list resolver = _StaticResolver(address_list) - _RpcClientBase.__init__(self, resolver, _ENCODERS.get, - lock_monitor_cb=lock_monitor_cb) + encoders = _ENCODERS.copy() + + encoders.update({ + rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents), + }) + + _RpcClientBase.__init__(self, resolver, encoders.get, + lock_monitor_cb=lock_monitor_cb, + _req_process_fn=_req_process_fn) _generated_rpc.RpcClientConfig.__init__(self)