X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/e0036155d195f8d5f2db8d40dfba2ca881cfcf16..3b01286e6de5b54648bbb96dafd6deb67babc01a:/lib/rpc.py diff --git a/lib/rpc.py b/lib/rpc.py index 1343269..9090390 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -34,6 +34,8 @@ import os import logging import zlib import base64 +import pycurl +import threading from ganeti import utils from ganeti import objects @@ -41,41 +43,140 @@ from ganeti import http from ganeti import serializer from ganeti import constants from ganeti import errors +from ganeti import netutils +from ganeti import ssconf # pylint has a bug here, doesn't see this import import ganeti.http.client # pylint: disable-msg=W0611 -# Module level variable -_http_manager = None +# Timeout for connecting to nodes (seconds) +_RPC_CONNECT_TIMEOUT = 5 + +_RPC_CLIENT_HEADERS = [ + "Content-type: %s" % http.HTTP_APP_JSON, + "Expect:", + ] + +# Various time constants for the timeout table +_TMO_URGENT = 60 # one minute +_TMO_FAST = 5 * 60 # five minutes +_TMO_NORMAL = 15 * 60 # 15 minutes +_TMO_SLOW = 3600 # one hour +_TMO_4HRS = 4 * 3600 +_TMO_1DAY = 86400 + +# Timeout table that will be built later by decorators +# Guidelines for choosing timeouts: +# - call used during watcher: timeout -> 1min, _TMO_URGENT +# - trivial (but be sure it is trivial) (e.g. reading a file): 5min, _TMO_FAST +# - other calls: 15 min, _TMO_NORMAL +# - special calls (instance add, etc.): either _TMO_SLOW (1h) or huge timeouts + +_TIMEOUTS = { +} def Init(): """Initializes the module-global HTTP client manager. - Must be called before using any RPC function. + Must be called before using any RPC function and while exactly one thread is + running. """ - global _http_manager # pylint: disable-msg=W0603 - - assert not _http_manager, "RPC module initialized more than once" + # curl_global_init(3) and curl_global_cleanup(3) must be called with only + # one thread running. This check is just a safety measure -- it doesn't + # cover all cases. + assert threading.activeCount() == 1, \ + "Found more than one active thread when initializing pycURL" - http.InitSsl() + logging.info("Using PycURL %s", pycurl.version) - _http_manager = http.client.HttpClientManager() + pycurl.global_init(pycurl.GLOBAL_ALL) def Shutdown(): """Stops the module-global HTTP client manager. - Must be called before quitting the program. + Must be called before quitting the program and while exactly one thread is + running. + + """ + pycurl.global_cleanup() + + +def _ConfigRpcCurl(curl): + noded_cert = str(constants.NODED_CERT_FILE) + + curl.setopt(pycurl.FOLLOWLOCATION, False) + curl.setopt(pycurl.CAINFO, noded_cert) + curl.setopt(pycurl.SSL_VERIFYHOST, 0) + curl.setopt(pycurl.SSL_VERIFYPEER, True) + curl.setopt(pycurl.SSLCERTTYPE, "PEM") + curl.setopt(pycurl.SSLCERT, noded_cert) + curl.setopt(pycurl.SSLKEYTYPE, "PEM") + curl.setopt(pycurl.SSLKEY, noded_cert) + curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT) + + +# Aliasing this module avoids the following warning by epydoc: "Warning: No +# information available for ganeti.rpc._RpcThreadLocal's base threading.local" +_threading = threading + + +class _RpcThreadLocal(_threading.local): + def GetHttpClientPool(self): + """Returns a per-thread HTTP client pool. + + @rtype: L{http.client.HttpClientPool} + + """ + try: + pool = self.hcp + except AttributeError: + pool = http.client.HttpClientPool(_ConfigRpcCurl) + self.hcp = pool + + return pool + + +# Remove module alias (see above) +del _threading + + +_thread_local = _RpcThreadLocal() + + +def _RpcTimeout(secs): + """Timeout decorator. + + When applied to a rpc call_* function, it updates the global timeout + table with the given function/timeout. """ - global _http_manager # pylint: disable-msg=W0603 + def decorator(f): + name = f.__name__ + assert name.startswith("call_") + _TIMEOUTS[name[len("call_"):]] = secs + return f + return decorator - if _http_manager: - _http_manager.Shutdown() - _http_manager = None + +def RunWithRPC(fn): + """RPC-wrapper decorator. + + When applied to a function, it runs it with the RPC system + initialized, and it shutsdown the system afterwards. This means the + function must be called without RPC being initialized. + + """ + def wrapper(*args, **kwargs): + Init() + try: + return fn(*args, **kwargs) + finally: + Shutdown() + return wrapper class RpcResult(object): @@ -160,12 +261,41 @@ class RpcResult(object): else: ec = errors.OpExecError if ecode is not None: - args = (msg, prereq) + args = (msg, ecode) else: args = (msg, ) raise ec(*args) # pylint: disable-msg=W0142 +def _AddressLookup(node_list, + ssc=ssconf.SimpleStore, + nslookup_fn=netutils.Hostname.GetIP): + """Return addresses for given node names. + + @type node_list: list + @param node_list: List of node names + @type ssc: class + @param ssc: SimpleStore class that is used to obtain node->ip mappings + @type nslookup_fn: callable + @param nslookup_fn: function use to do NS lookup + @rtype: list of addresses and/or None's + @returns: List of corresponding addresses, if found + + """ + ss = ssc() + iplist = ss.GetNodePrimaryIPList() + family = ss.GetPrimaryIPFamily() + addresses = [] + ipmap = dict(entry.split() for entry in iplist) + for node in node_list: + address = ipmap.get(node) + if address is None: + address = nslookup_fn(node, family=family) + addresses.append(address) + + return addresses + + class Client: """RPC Client class. @@ -178,15 +308,14 @@ class Client: cause bugs. """ - def __init__(self, procedure, body, port): + def __init__(self, procedure, body, port, address_lookup_fn=_AddressLookup): + assert procedure in _TIMEOUTS, ("New RPC call not declared in the" + " timeouts table") self.procedure = procedure self.body = body self.port = port - self.nc = {} - - self._ssl_params = \ - http.HttpSslParams(ssl_key_path=constants.NODED_CERT_FILE, - ssl_cert_path=constants.NODED_CERT_FILE) + self._request = {} + self._address_lookup_fn = address_lookup_fn def ConnectList(self, node_list, address_list=None, read_timeout=None): """Add a list of nodes to the target nodes. @@ -197,15 +326,16 @@ class Client: @keyword address_list: either None or a list with node addresses, which must have the same length as the node list @type read_timeout: int - @param read_timeout: overwrites the default read timeout for the - given operation + @param read_timeout: overwrites default timeout for operation """ if address_list is None: - address_list = [None for _ in node_list] - else: - assert len(node_list) == len(address_list), \ - "Name and address lists should have the same length" + # Always use IP address instead of node name + address_list = self._address_lookup_fn(node_list) + + assert len(node_list) == len(address_list), \ + "Name and address lists must have the same length" + for node, address in zip(node_list, address_list): self.ConnectNode(node, address, read_timeout=read_timeout) @@ -215,34 +345,42 @@ class Client: @type name: str @param name: the node name @type address: str - @keyword address: the node address, if known + @param address: the node address, if known + @type read_timeout: int + @param read_timeout: overwrites default timeout for operation """ if address is None: - address = name - - self.nc[name] = \ - http.client.HttpClientRequest(address, self.port, http.HTTP_PUT, - "/%s" % self.procedure, - post_data=self.body, - ssl_params=self._ssl_params, - ssl_verify_peer=True, + # Always use IP address instead of node name + address = self._address_lookup_fn([name])[0] + + assert(address is not None) + + if read_timeout is None: + read_timeout = _TIMEOUTS[self.procedure] + + self._request[name] = \ + http.client.HttpClientRequest(str(address), self.port, + http.HTTP_PUT, str("/%s" % self.procedure), + headers=_RPC_CLIENT_HEADERS, + post_data=str(self.body), read_timeout=read_timeout) - def GetResults(self): + def GetResults(self, http_pool=None): """Call nodes and return results. @rtype: list @return: List of RPC results """ - assert _http_manager, "RPC module not initialized" + if not http_pool: + http_pool = _thread_local.GetHttpClientPool() - _http_manager.ExecRequests(self.nc.values()) + http_pool.ProcessRequests(self._request.values()) results = {} - for name, req in self.nc.iteritems(): + for name, req in self._request.iteritems(): if req.success and req.resp_status_code == http.HTTP_OK: results[name] = RpcResult(data=serializer.LoadJson(req.resp_body), node=name, call=self.procedure) @@ -289,9 +427,9 @@ class RpcRunner(object): """ self._cfg = cfg - self.port = utils.GetDaemonPort(constants.NODED) + self.port = netutils.GetDaemonPort(constants.NODED) - def _InstDict(self, instance, hvp=None, bep=None): + def _InstDict(self, instance, hvp=None, bep=None, osp=None): """Convert the given instance to a dict. This is done via the instance's ToDict() method and additionally @@ -303,6 +441,8 @@ class RpcRunner(object): @param hvp: a dictionary with overridden hypervisor parameters @type bep: dict or None @param bep: a dictionary with overridden backend parameters + @type osp: dict or None + @param osp: a dictionary with overridden os parameters @rtype: dict @return: the instance dict, with the hvparams filled with the cluster defaults @@ -316,6 +456,9 @@ class RpcRunner(object): idict["beparams"] = cluster.FillBE(instance) if bep is not None: idict["beparams"].update(bep) + idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams) + if osp is not None: + idict["osparams"].update(osp) for nic in idict["nics"]: nic['nicparams'] = objects.FillDict( cluster.nicparams[constants.PP_DEFAULT], @@ -398,7 +541,7 @@ class RpcRunner(object): """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, utils.GetDaemonPort(constants.NODED)) + c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED)) c.ConnectList(node_list, address_list=address_list, read_timeout=read_timeout) return c.GetResults() @@ -421,7 +564,7 @@ class RpcRunner(object): """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, utils.GetDaemonPort(constants.NODED)) + c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED)) c.ConnectNode(node, read_timeout=read_timeout) return c.GetResults()[node] @@ -449,6 +592,7 @@ class RpcRunner(object): # Begin RPC calls # + @_RpcTimeout(_TMO_URGENT) def call_lv_list(self, node_list, vg_name): """Gets the logical volumes present in a given volume group. @@ -457,6 +601,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "lv_list", [vg_name]) + @_RpcTimeout(_TMO_URGENT) def call_vg_list(self, node_list): """Gets the volume group list. @@ -465,6 +610,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "vg_list", []) + @_RpcTimeout(_TMO_NORMAL) def call_storage_list(self, node_list, su_name, su_args, name, fields): """Get list of storage units. @@ -474,6 +620,7 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "storage_list", [su_name, su_args, name, fields]) + @_RpcTimeout(_TMO_NORMAL) def call_storage_modify(self, node, su_name, su_args, name, changes): """Modify a storage unit. @@ -483,6 +630,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "storage_modify", [su_name, su_args, name, changes]) + @_RpcTimeout(_TMO_NORMAL) def call_storage_execute(self, node, su_name, su_args, name, op): """Executes an operation on a storage unit. @@ -492,6 +640,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "storage_execute", [su_name, su_args, name, op]) + @_RpcTimeout(_TMO_URGENT) def call_bridges_exist(self, node, bridges_list): """Checks if a node has all the bridges given. @@ -504,6 +653,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "bridges_exist", [bridges_list]) + @_RpcTimeout(_TMO_NORMAL) def call_instance_start(self, node, instance, hvp, bep): """Starts an instance. @@ -513,6 +663,7 @@ class RpcRunner(object): idict = self._InstDict(instance, hvp=hvp, bep=bep) return self._SingleNodeCall(node, "instance_start", [idict]) + @_RpcTimeout(_TMO_NORMAL) def call_instance_shutdown(self, node, instance, timeout): """Stops an instance. @@ -522,6 +673,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "instance_shutdown", [self._InstDict(instance), timeout]) + @_RpcTimeout(_TMO_NORMAL) def call_migration_info(self, node, instance): """Gather the information necessary to prepare an instance migration. @@ -536,6 +688,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "migration_info", [self._InstDict(instance)]) + @_RpcTimeout(_TMO_NORMAL) def call_accept_instance(self, node, instance, info, target): """Prepare a node to accept an instance. @@ -554,6 +707,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "accept_instance", [self._InstDict(instance), info, target]) + @_RpcTimeout(_TMO_NORMAL) def call_finalize_migration(self, node, instance, info, success): """Finalize any target-node migration specific operation. @@ -575,6 +729,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "finalize_migration", [self._InstDict(instance), info, success]) + @_RpcTimeout(_TMO_SLOW) def call_instance_migrate(self, node, instance, target, live): """Migrate an instance. @@ -594,6 +749,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "instance_migrate", [self._InstDict(instance), target, live]) + @_RpcTimeout(_TMO_NORMAL) def call_instance_reboot(self, node, inst, reboot_type, shutdown_timeout): """Reboots an instance. @@ -604,15 +760,18 @@ class RpcRunner(object): [self._InstDict(inst), reboot_type, shutdown_timeout]) - def call_instance_os_add(self, node, inst, reinstall, debug): + @_RpcTimeout(_TMO_1DAY) + def call_instance_os_add(self, node, inst, reinstall, debug, osparams=None): """Installs an OS on the given instance. This is a single-node call. """ return self._SingleNodeCall(node, "instance_os_add", - [self._InstDict(inst), reinstall, debug]) + [self._InstDict(inst, osp=osparams), + reinstall, debug]) + @_RpcTimeout(_TMO_SLOW) def call_instance_run_rename(self, node, inst, old_name, debug): """Run the OS rename script for an instance. @@ -622,6 +781,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "instance_run_rename", [self._InstDict(inst), old_name, debug]) + @_RpcTimeout(_TMO_URGENT) def call_instance_info(self, node, instance, hname): """Returns information about a single instance. @@ -637,6 +797,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "instance_info", [instance, hname]) + @_RpcTimeout(_TMO_NORMAL) def call_instance_migratable(self, node, instance): """Checks whether the given instance can be migrated. @@ -651,6 +812,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "instance_migratable", [self._InstDict(instance)]) + @_RpcTimeout(_TMO_URGENT) def call_all_instances_info(self, node_list, hypervisor_list): """Returns information about all instances on the given nodes. @@ -665,6 +827,7 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "all_instances_info", [hypervisor_list]) + @_RpcTimeout(_TMO_URGENT) def call_instance_list(self, node_list, hypervisor_list): """Returns the list of running instances on a given node. @@ -678,6 +841,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "instance_list", [hypervisor_list]) + @_RpcTimeout(_TMO_FAST) def call_node_tcp_ping(self, node, source, target, port, timeout, live_port_needed): """Do a TcpPing on the remote node @@ -689,6 +853,7 @@ class RpcRunner(object): [source, target, port, timeout, live_port_needed]) + @_RpcTimeout(_TMO_FAST) def call_node_has_ip_address(self, node, address): """Checks if a node has the given IP address. @@ -697,6 +862,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "node_has_ip_address", [address]) + @_RpcTimeout(_TMO_URGENT) def call_node_info(self, node_list, vg_name, hypervisor_type): """Return node information. @@ -718,15 +884,23 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "node_info", [vg_name, hypervisor_type]) - def call_node_add(self, node, dsa, dsapub, rsa, rsapub, ssh, sshpub): - """Add a node to the cluster. + @_RpcTimeout(_TMO_NORMAL) + def call_etc_hosts_modify(self, node, mode, name, ip): + """Modify hosts file with name - This is a single-node call. + @type node: string + @param node: The node to call + @type mode: string + @param mode: The mode to operate. Currently "add" or "remove" + @type name: string + @param name: The host name to be modified + @type ip: string + @param ip: The ip of the entry (just valid if mode is "add") """ - return self._SingleNodeCall(node, "node_add", - [dsa, dsapub, rsa, rsapub, ssh, sshpub]) + return self._SingleNodeCall(node, "etc_hosts_modify", [mode, name, ip]) + @_RpcTimeout(_TMO_NORMAL) def call_node_verify(self, node_list, checkdict, cluster_name): """Request verification of given parameters. @@ -737,6 +911,7 @@ class RpcRunner(object): [checkdict, cluster_name]) @classmethod + @_RpcTimeout(_TMO_FAST) def call_node_start_master(cls, node, start_daemons, no_voting): """Tells a node to activate itself as a master. @@ -747,6 +922,7 @@ class RpcRunner(object): [start_daemons, no_voting]) @classmethod + @_RpcTimeout(_TMO_FAST) def call_node_stop_master(cls, node, stop_daemons): """Tells a node to demote itself from master status. @@ -756,6 +932,7 @@ class RpcRunner(object): return cls._StaticSingleNodeCall(node, "node_stop_master", [stop_daemons]) @classmethod + @_RpcTimeout(_TMO_URGENT) def call_master_info(cls, node_list): """Query master info. @@ -766,6 +943,7 @@ class RpcRunner(object): return cls._StaticMultiNodeCall(node_list, "master_info", []) @classmethod + @_RpcTimeout(_TMO_URGENT) def call_version(cls, node_list): """Query node version. @@ -774,6 +952,7 @@ class RpcRunner(object): """ return cls._StaticMultiNodeCall(node_list, "version", []) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_create(self, node, bdev, size, owner, on_primary, info): """Request creation of a given block device. @@ -783,6 +962,17 @@ class RpcRunner(object): return self._SingleNodeCall(node, "blockdev_create", [bdev.ToDict(), size, owner, on_primary, info]) + @_RpcTimeout(_TMO_SLOW) + def call_blockdev_wipe(self, node, bdev, offset, size): + """Request wipe at given offset with given size of a block device. + + This is a single-node call. + + """ + return self._SingleNodeCall(node, "blockdev_wipe", + [bdev.ToDict(), offset, size]) + + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_remove(self, node, bdev): """Request removal of a given block device. @@ -791,6 +981,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "blockdev_remove", [bdev.ToDict()]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_rename(self, node, devlist): """Request rename of the given block devices. @@ -800,6 +991,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "blockdev_rename", [(d.ToDict(), uid) for d, uid in devlist]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_assemble(self, node, disk, owner, on_primary): """Request assembling of a given block device. @@ -809,6 +1001,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "blockdev_assemble", [disk.ToDict(), owner, on_primary]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_shutdown(self, node, disk): """Request shutdown of a given block device. @@ -817,6 +1010,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "blockdev_shutdown", [disk.ToDict()]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_addchildren(self, node, bdev, ndevs): """Request adding a list of children to a (mirroring) device. @@ -827,6 +1021,7 @@ class RpcRunner(object): [bdev.ToDict(), [disk.ToDict() for disk in ndevs]]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_removechildren(self, node, bdev, ndevs): """Request removing a list of children from a (mirroring) device. @@ -837,6 +1032,7 @@ class RpcRunner(object): [bdev.ToDict(), [disk.ToDict() for disk in ndevs]]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_getmirrorstatus(self, node, disks): """Request status of a (mirroring) device. @@ -850,6 +1046,23 @@ class RpcRunner(object): for i in result.payload] return result + @_RpcTimeout(_TMO_NORMAL) + def call_blockdev_getmirrorstatus_multi(self, node_list, node_disks): + """Request status of (mirroring) devices from multiple nodes. + + This is a multi-node call. + + """ + result = self._MultiNodeCall(node_list, "blockdev_getmirrorstatus_multi", + [dict((name, [dsk.ToDict() for dsk in disks]) + for name, disks in node_disks.items())]) + for nres in result.values(): + if not nres.fail_msg: + nres.payload = [objects.BlockDevStatus.FromDict(i) + for i in nres.payload] + return result + + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_find(self, node, disk): """Request identification of a given block device. @@ -861,6 +1074,7 @@ class RpcRunner(object): result.payload = objects.BlockDevStatus.FromDict(result.payload) return result + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_close(self, node, instance_name, disks): """Closes the given block devices. @@ -870,6 +1084,7 @@ class RpcRunner(object): params = [instance_name, [cf.ToDict() for cf in disks]] return self._SingleNodeCall(node, "blockdev_close", params) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_getsizes(self, node, disks): """Returns the size of the given disks. @@ -879,6 +1094,7 @@ class RpcRunner(object): params = [[cf.ToDict() for cf in disks]] return self._SingleNodeCall(node, "blockdev_getsize", params) + @_RpcTimeout(_TMO_NORMAL) def call_drbd_disconnect_net(self, node_list, nodes_ip, disks): """Disconnects the network of the given drbd devices. @@ -888,6 +1104,7 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "drbd_disconnect_net", [nodes_ip, [cf.ToDict() for cf in disks]]) + @_RpcTimeout(_TMO_NORMAL) def call_drbd_attach_net(self, node_list, nodes_ip, disks, instance_name, multimaster): """Disconnects the given drbd devices. @@ -899,6 +1116,7 @@ class RpcRunner(object): [nodes_ip, [cf.ToDict() for cf in disks], instance_name, multimaster]) + @_RpcTimeout(_TMO_SLOW) def call_drbd_wait_sync(self, node_list, nodes_ip, disks): """Waits for the synchronization of drbd devices is complete. @@ -908,7 +1126,17 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "drbd_wait_sync", [nodes_ip, [cf.ToDict() for cf in disks]]) + @_RpcTimeout(_TMO_URGENT) + def call_drbd_helper(self, node_list): + """Gets drbd helper. + + This is a multi-node call. + + """ + return self._MultiNodeCall(node_list, "drbd_helper", []) + @classmethod + @_RpcTimeout(_TMO_NORMAL) def call_upload_file(cls, node_list, file_name, address_list=None): """Upload a file. @@ -935,6 +1163,7 @@ class RpcRunner(object): address_list=address_list) @classmethod + @_RpcTimeout(_TMO_NORMAL) def call_write_ssconf_files(cls, node_list, values): """Write ssconf files. @@ -943,6 +1172,7 @@ class RpcRunner(object): """ return cls._StaticMultiNodeCall(node_list, "write_ssconf_files", [values]) + @_RpcTimeout(_TMO_FAST) def call_os_diagnose(self, node_list): """Request a diagnose of OS definitions. @@ -951,6 +1181,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "os_diagnose", []) + @_RpcTimeout(_TMO_FAST) def call_os_get(self, node, name): """Returns an OS definition. @@ -962,6 +1193,17 @@ class RpcRunner(object): result.payload = objects.OS.FromDict(result.payload) return result + @_RpcTimeout(_TMO_FAST) + def call_os_validate(self, required, nodes, name, checks, params): + """Run a validation routine for a given OS. + + This is a multi-node call. + + """ + return self._MultiNodeCall(nodes, "os_validate", + [required, name, checks, params]) + + @_RpcTimeout(_TMO_NORMAL) def call_hooks_runner(self, node_list, hpath, phase, env): """Call the hooks runner. @@ -975,6 +1217,7 @@ class RpcRunner(object): params = [hpath, phase, env] return self._MultiNodeCall(node_list, "hooks_runner", params) + @_RpcTimeout(_TMO_NORMAL) def call_iallocator_runner(self, node, name, idata): """Call an iallocator on a remote node @@ -987,6 +1230,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "iallocator_runner", [name, idata]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_grow(self, node, cf_bdev, amount): """Request a snapshot of the given block device. @@ -996,6 +1240,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "blockdev_grow", [cf_bdev.ToDict(), amount]) + @_RpcTimeout(_TMO_1DAY) def call_blockdev_export(self, node, cf_bdev, dest_node, dest_path, cluster_name): """Export a given disk to another node. @@ -1007,6 +1252,7 @@ class RpcRunner(object): [cf_bdev.ToDict(), dest_node, dest_path, cluster_name]) + @_RpcTimeout(_TMO_NORMAL) def call_blockdev_snapshot(self, node, cf_bdev): """Request a snapshot of the given block device. @@ -1015,6 +1261,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "blockdev_snapshot", [cf_bdev.ToDict()]) + @_RpcTimeout(_TMO_NORMAL) def call_finalize_export(self, node, instance, snap_disks): """Request the completion of an export operation. @@ -1033,6 +1280,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "finalize_export", [self._InstDict(instance), flat_disks]) + @_RpcTimeout(_TMO_FAST) def call_export_info(self, node, path): """Queries the export information in a given path. @@ -1041,6 +1289,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "export_info", [path]) + @_RpcTimeout(_TMO_FAST) def call_export_list(self, node_list): """Gets the stored exports list. @@ -1049,6 +1298,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "export_list", []) + @_RpcTimeout(_TMO_FAST) def call_export_remove(self, node, export): """Requests removal of a given export. @@ -1058,6 +1308,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "export_remove", [export]) @classmethod + @_RpcTimeout(_TMO_NORMAL) def call_node_leave_cluster(cls, node, modify_ssh_setup): """Requests a node to clean the cluster information it has. @@ -1070,6 +1321,7 @@ class RpcRunner(object): return cls._StaticSingleNodeCall(node, "node_leave_cluster", [modify_ssh_setup]) + @_RpcTimeout(_TMO_FAST) def call_node_volumes(self, node_list): """Gets all volumes on node(s). @@ -1078,6 +1330,7 @@ class RpcRunner(object): """ return self._MultiNodeCall(node_list, "node_volumes", []) + @_RpcTimeout(_TMO_FAST) def call_node_demote_from_mc(self, node): """Demote a node from the master candidate role. @@ -1086,6 +1339,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "node_demote_from_mc", []) + @_RpcTimeout(_TMO_NORMAL) def call_node_powercycle(self, node, hypervisor): """Tries to powercycle a node. @@ -1094,14 +1348,17 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "node_powercycle", [hypervisor]) + @_RpcTimeout(None) def call_test_delay(self, node_list, duration): """Sleep for a fixed time on given node(s). This is a multi-node call. """ - return self._MultiNodeCall(node_list, "test_delay", [duration]) + return self._MultiNodeCall(node_list, "test_delay", [duration], + read_timeout=int(duration + 5)) + @_RpcTimeout(_TMO_FAST) def call_file_storage_dir_create(self, node, file_storage_dir): """Create the given file storage directory. @@ -1111,6 +1368,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "file_storage_dir_create", [file_storage_dir]) + @_RpcTimeout(_TMO_FAST) def call_file_storage_dir_remove(self, node, file_storage_dir): """Remove the given file storage directory. @@ -1120,6 +1378,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "file_storage_dir_remove", [file_storage_dir]) + @_RpcTimeout(_TMO_FAST) def call_file_storage_dir_rename(self, node, old_file_storage_dir, new_file_storage_dir): """Rename file storage directory. @@ -1131,6 +1390,7 @@ class RpcRunner(object): [old_file_storage_dir, new_file_storage_dir]) @classmethod + @_RpcTimeout(_TMO_FAST) def call_jobqueue_update(cls, node_list, address_list, file_name, content): """Update job queue. @@ -1142,6 +1402,7 @@ class RpcRunner(object): address_list=address_list) @classmethod + @_RpcTimeout(_TMO_NORMAL) def call_jobqueue_purge(cls, node): """Purge job queue. @@ -1151,6 +1412,7 @@ class RpcRunner(object): return cls._StaticSingleNodeCall(node, "jobqueue_purge", []) @classmethod + @_RpcTimeout(_TMO_FAST) def call_jobqueue_rename(cls, node_list, address_list, rename): """Rename a job queue file. @@ -1160,21 +1422,7 @@ class RpcRunner(object): return cls._StaticMultiNodeCall(node_list, "jobqueue_rename", rename, address_list=address_list) - @classmethod - def call_jobqueue_set_drain(cls, node_list, drain_flag): - """Set the drain flag on the queue. - - This is a multi-node call. - - @type node_list: list - @param node_list: the list of nodes to query - @type drain_flag: bool - @param drain_flag: if True, will set the drain flag, otherwise reset it. - - """ - return cls._StaticMultiNodeCall(node_list, "jobqueue_set_drain", - [drain_flag]) - + @_RpcTimeout(_TMO_NORMAL) def call_hypervisor_validate_params(self, node_list, hvname, hvparams): """Validate the hypervisor params. @@ -1193,6 +1441,7 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "hypervisor_validate_params", [hvname, hv_full]) + @_RpcTimeout(_TMO_NORMAL) def call_x509_cert_create(self, node, validity): """Creates a new X509 certificate for SSL/TLS. @@ -1204,6 +1453,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "x509_cert_create", [validity]) + @_RpcTimeout(_TMO_NORMAL) def call_x509_cert_remove(self, node, name): """Removes a X509 certificate. @@ -1215,6 +1465,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "x509_cert_remove", [name]) + @_RpcTimeout(_TMO_NORMAL) def call_import_start(self, node, opts, instance, dest, dest_args): """Starts a listener for an import. @@ -1231,6 +1482,7 @@ class RpcRunner(object): self._InstDict(instance), dest, _EncodeImportExportIO(dest, dest_args)]) + @_RpcTimeout(_TMO_NORMAL) def call_export_start(self, node, opts, host, port, instance, source, source_args): """Starts an export daemon. @@ -1248,6 +1500,7 @@ class RpcRunner(object): self._InstDict(instance), source, _EncodeImportExportIO(source, source_args)]) + @_RpcTimeout(_TMO_FAST) def call_impexp_status(self, node, names): """Gets the status of an import or export. @@ -1277,6 +1530,7 @@ class RpcRunner(object): return result + @_RpcTimeout(_TMO_NORMAL) def call_impexp_abort(self, node, name): """Aborts an import or export. @@ -1290,6 +1544,7 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "impexp_abort", [name]) + @_RpcTimeout(_TMO_NORMAL) def call_impexp_cleanup(self, node, name): """Cleans up after an import or export.