X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/2c0f74f24c0913a25c1d1dcb84ffa937413a9aca..ee8fd7b72eba989f409490e1070586b2fe296930:/lib/rpc.py diff --git a/lib/rpc.py b/lib/rpc.py index a4298be..7d75a58 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,7 +23,7 @@ """ -# pylint: disable-msg=C0103,R0201,R0904 +# pylint: disable=C0103,R0201,R0904 # C0103: Invalid name, since call_ are not valid # R0201: Method could be a function, we keep all rpcs instance methods # as not to change them back and forth between static/instance methods @@ -45,9 +45,11 @@ from ganeti import constants from ganeti import errors from ganeti import netutils from ganeti import ssconf +from ganeti import runtime +from ganeti import compat # pylint has a bug here, doesn't see this import -import ganeti.http.client # pylint: disable-msg=W0611 +import ganeti.http.client # pylint: disable=W0611 # Timeout for connecting to nodes (seconds) @@ -76,6 +78,9 @@ _TMO_1DAY = 86400 _TIMEOUTS = { } +#: Special value to describe an offline host +_OFFLINE = object() + def Init(): """Initializes the module-global HTTP client manager. @@ -119,34 +124,6 @@ def _ConfigRpcCurl(curl): curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT) -# Aliasing this module avoids the following warning by epydoc: "Warning: No -# information available for ganeti.rpc._RpcThreadLocal's base threading.local" -_threading = threading - - -class _RpcThreadLocal(_threading.local): - def GetHttpClientPool(self): - """Returns a per-thread HTTP client pool. - - @rtype: L{http.client.HttpClientPool} - - """ - try: - pool = self.hcp - except AttributeError: - pool = http.client.HttpClientPool(_ConfigRpcCurl) - self.hcp = pool - - return pool - - -# Remove module alias (see above) -del _threading - - -_thread_local = _RpcThreadLocal() - - def _RpcTimeout(secs): """Timeout decorator. @@ -179,6 +156,26 @@ def RunWithRPC(fn): return wrapper +def _Compress(data): + """Compresses a string for transport over RPC. + + Small amounts of data are not compressed. + + @type data: str + @param data: Data + @rtype: tuple + @return: Encoded data to send + + """ + # Small amounts of data are not compressed + if len(data) < 512: + return (constants.RPC_ENCODING_NONE, data) + + # Compress with zlib and encode in base64 + return (constants.RPC_ENCODING_ZLIB_BASE64, + base64.b64encode(zlib.compress(data, 3))) + + class RpcResult(object): """RPC Result class. @@ -261,12 +258,12 @@ class RpcResult(object): args = (msg, ecode) else: args = (msg, ) - raise ec(*args) # pylint: disable-msg=W0142 + raise ec(*args) # pylint: disable=W0142 -def _AddressLookup(node_list, - ssc=ssconf.SimpleStore, - nslookup_fn=netutils.Hostname.GetIP): +def _SsconfResolver(node_list, + ssc=ssconf.SimpleStore, + nslookup_fn=netutils.Hostname.GetIP): """Return addresses for given node names. @type node_list: list @@ -275,127 +272,166 @@ def _AddressLookup(node_list, @param ssc: SimpleStore class that is used to obtain node->ip mappings @type nslookup_fn: callable @param nslookup_fn: function use to do NS lookup - @rtype: list of addresses and/or None's - @returns: List of corresponding addresses, if found + @rtype: list of tuple; (string, string) + @return: List of tuples containing node name and IP address """ ss = ssc() iplist = ss.GetNodePrimaryIPList() family = ss.GetPrimaryIPFamily() - addresses = [] ipmap = dict(entry.split() for entry in iplist) - for node in node_list: - address = ipmap.get(node) - if address is None: - address = nslookup_fn(node, family=family) - addresses.append(address) - return addresses + result = [] + for node in node_list: + ip = ipmap.get(node) + if ip is None: + ip = nslookup_fn(node, family=family) + result.append((node, ip)) + return result -class Client: - """RPC Client class. - This class, given a (remote) method name, a list of parameters and a - list of nodes, will contact (in parallel) all nodes, and return a - dict of results (key: node name, value: result). +class _StaticResolver: + def __init__(self, addresses): + """Initializes this class. - One current bug is that generic failure is still signaled by - 'False' result, which is not good. This overloading of values can - cause bugs. + """ + self._addresses = addresses - """ - def __init__(self, procedure, body, port, address_lookup_fn=_AddressLookup): - assert procedure in _TIMEOUTS, ("New RPC call not declared in the" - " timeouts table") - self.procedure = procedure - self.body = body - self.port = port - self._request = {} - self._address_lookup_fn = address_lookup_fn - - def ConnectList(self, node_list, address_list=None, read_timeout=None): - """Add a list of nodes to the target nodes. - - @type node_list: list - @param node_list: the list of node names to connect - @type address_list: list or None - @keyword address_list: either None or a list with node addresses, - which must have the same length as the node list - @type read_timeout: int - @param read_timeout: overwrites default timeout for operation + def __call__(self, hosts): + """Returns static addresses for hosts. """ - if address_list is None: - # Always use IP address instead of node name - address_list = self._address_lookup_fn(node_list) + assert len(hosts) == len(self._addresses) + return zip(hosts, self._addresses) - assert len(node_list) == len(address_list), \ - "Name and address lists must have the same length" - for node, address in zip(node_list, address_list): - self.ConnectNode(node, address, read_timeout=read_timeout) +def _CheckConfigNode(name, node): + """Checks if a node is online. - def ConnectNode(self, name, address=None, read_timeout=None): - """Add a node to the target list. + @type name: string + @param name: Node name + @type node: L{objects.Node} or None + @param node: Node object - @type name: str - @param name: the node name - @type address: str - @param address: the node address, if known - @type read_timeout: int - @param read_timeout: overwrites default timeout for operation + """ + if node is None: + # Depend on DNS for name resolution + ip = name + elif node.offline: + ip = _OFFLINE + else: + ip = node.primary_ip + return (name, ip) - """ - if address is None: - # Always use IP address instead of node name - address = self._address_lookup_fn([name])[0] - assert(address is not None) +def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts): + """Calculate node addresses using configuration. - if read_timeout is None: - read_timeout = _TIMEOUTS[self.procedure] + """ + # Special case for single-host lookups + if len(hosts) == 1: + (name, ) = hosts + return [_CheckConfigNode(name, single_node_fn(name))] + else: + all_nodes = all_nodes_fn() + return [_CheckConfigNode(name, all_nodes.get(name, None)) + for name in hosts] - self._request[name] = \ - http.client.HttpClientRequest(str(address), self.port, - http.HTTP_PUT, str("/%s" % self.procedure), - headers=_RPC_CLIENT_HEADERS, - post_data=str(self.body), - read_timeout=read_timeout) - def GetResults(self, http_pool=None): - """Call nodes and return results. +class _RpcProcessor: + def __init__(self, resolver, port, lock_monitor_cb=None): + """Initializes this class. - @rtype: list - @return: List of RPC results + @param resolver: callable accepting a list of hostnames, returning a list + of tuples containing name and IP address (IP address can be the name or + the special value L{_OFFLINE} to mark offline machines) + @type port: int + @param port: TCP port + @param lock_monitor_cb: Callable for registering with lock monitor """ - if not http_pool: - http_pool = _thread_local.GetHttpClientPool() + self._resolver = resolver + self._port = port + self._lock_monitor_cb = lock_monitor_cb - http_pool.ProcessRequests(self._request.values()) + @staticmethod + def _PrepareRequests(hosts, port, procedure, body, read_timeout): + """Prepares requests by sorting offline hosts into separate list. + """ results = {} + requests = {} - for name, req in self._request.iteritems(): - if req.success and req.resp_status_code == http.HTTP_OK: - results[name] = RpcResult(data=serializer.LoadJson(req.resp_body), - node=name, call=self.procedure) - continue + for (name, ip) in hosts: + if ip is _OFFLINE: + # Node is marked as offline + results[name] = RpcResult(node=name, offline=True, call=procedure) + else: + requests[name] = \ + http.client.HttpClientRequest(str(ip), port, + http.HTTP_PUT, str("/%s" % procedure), + headers=_RPC_CLIENT_HEADERS, + post_data=body, + read_timeout=read_timeout, + nicename="%s/%s" % (name, procedure), + curl_config_fn=_ConfigRpcCurl) + + return (results, requests) - # TODO: Better error reporting - if req.error: - msg = req.error + @staticmethod + def _CombineResults(results, requests, procedure): + """Combines pre-computed results for offline hosts with actual call results. + + """ + for name, req in requests.items(): + if req.success and req.resp_status_code == http.HTTP_OK: + host_result = RpcResult(data=serializer.LoadJson(req.resp_body), + node=name, call=procedure) else: - msg = req.resp_body + # TODO: Better error reporting + if req.error: + msg = req.error + else: + msg = req.resp_body - logging.error("RPC error in %s from node %s: %s", - self.procedure, name, msg) - results[name] = RpcResult(data=msg, failed=True, node=name, - call=self.procedure) + logging.error("RPC error in %s on node %s: %s", procedure, name, msg) + host_result = RpcResult(data=msg, failed=True, node=name, + call=procedure) + + results[name] = host_result return results + def __call__(self, hosts, procedure, body, read_timeout=None, + _req_process_fn=http.client.ProcessRequests): + """Makes an RPC request to a number of nodes. + + @type hosts: sequence + @param hosts: Hostnames + @type procedure: string + @param procedure: Request path + @type body: string + @param body: Request body + @type read_timeout: int or None + @param read_timeout: Read timeout for request + + """ + assert procedure in _TIMEOUTS, "RPC call not declared in the timeouts table" + + if read_timeout is None: + read_timeout = _TIMEOUTS[procedure] + + (results, requests) = \ + self._PrepareRequests(self._resolver(hosts), self._port, procedure, + str(body), read_timeout) + + _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb) + + assert not frozenset(results).intersection(requests) + + return self._CombineResults(results, requests, procedure) + def _EncodeImportExportIO(ieio, ieioargs): """Encodes import/export I/O information. @@ -413,18 +449,22 @@ def _EncodeImportExportIO(ieio, ieioargs): class RpcRunner(object): - """RPC runner class""" + """RPC runner class. - def __init__(self, cfg): - """Initialized the rpc runner. + """ + def __init__(self, context): + """Initialized the RPC runner. - @type cfg: C{config.ConfigWriter} - @param cfg: the configuration object that will be used to get data - about the cluster + @type context: C{masterd.GanetiContext} + @param context: Ganeti context """ - self._cfg = cfg - self.port = netutils.GetDaemonPort(constants.NODED) + self._cfg = context.cfg + self._proc = _RpcProcessor(compat.partial(_NodeConfigResolver, + self._cfg.GetNodeInfo, + self._cfg.GetAllNodesInfo), + netutils.GetDaemonPort(constants.NODED), + lock_monitor_cb=context.glm.AddToLockMonitor) def _InstDict(self, instance, hvp=None, bep=None, osp=None): """Convert the given instance to a dict. @@ -462,98 +502,37 @@ class RpcRunner(object): nic['nicparams']) return idict - def _ConnectList(self, client, node_list, call, read_timeout=None): - """Helper for computing node addresses. - - @type client: L{ganeti.rpc.Client} - @param client: a C{Client} instance - @type node_list: list - @param node_list: the node list we should connect - @type call: string - @param call: the name of the remote procedure call, for filling in - correctly any eventual offline nodes' results - @type read_timeout: int - @param read_timeout: overwrites the default read timeout for the - given operation - - """ - all_nodes = self._cfg.GetAllNodesInfo() - name_list = [] - addr_list = [] - skip_dict = {} - for node in node_list: - if node in all_nodes: - if all_nodes[node].offline: - skip_dict[node] = RpcResult(node=node, offline=True, call=call) - continue - val = all_nodes[node].primary_ip - else: - val = None - addr_list.append(val) - name_list.append(node) - if name_list: - client.ConnectList(name_list, address_list=addr_list, - read_timeout=read_timeout) - return skip_dict - - def _ConnectNode(self, client, node, call, read_timeout=None): - """Helper for computing one node's address. - - @type client: L{ganeti.rpc.Client} - @param client: a C{Client} instance - @type node: str - @param node: the node we should connect - @type call: string - @param call: the name of the remote procedure call, for filling in - correctly any eventual offline nodes' results - @type read_timeout: int - @param read_timeout: overwrites the default read timeout for the - given operation - - """ - node_info = self._cfg.GetNodeInfo(node) - if node_info is not None: - if node_info.offline: - return RpcResult(node=node, offline=True, call=call) - addr = node_info.primary_ip - else: - addr = None - client.ConnectNode(node, address=addr, read_timeout=read_timeout) - def _MultiNodeCall(self, node_list, procedure, args, read_timeout=None): """Helper for making a multi-node call """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, self.port) - skip_dict = self._ConnectList(c, node_list, procedure, - read_timeout=read_timeout) - skip_dict.update(c.GetResults()) - return skip_dict + return self._proc(node_list, procedure, body, read_timeout=read_timeout) - @classmethod - def _StaticMultiNodeCall(cls, node_list, procedure, args, + @staticmethod + def _StaticMultiNodeCall(node_list, procedure, args, address_list=None, read_timeout=None): """Helper for making a multi-node static call """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED)) - c.ConnectList(node_list, address_list=address_list, - read_timeout=read_timeout) - return c.GetResults() + + if address_list is None: + resolver = _SsconfResolver + else: + # Caller provided an address list + resolver = _StaticResolver(address_list) + + proc = _RpcProcessor(resolver, + netutils.GetDaemonPort(constants.NODED)) + return proc(node_list, procedure, body, read_timeout=read_timeout) def _SingleNodeCall(self, node, procedure, args, read_timeout=None): """Helper for making a single-node call """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, self.port) - result = self._ConnectNode(c, node, procedure, read_timeout=read_timeout) - if result is None: - # we did connect, node is not offline - result = c.GetResults()[node] - return result + return self._proc([node], procedure, body, read_timeout=read_timeout)[node] @classmethod def _StaticSingleNodeCall(cls, node, procedure, args, read_timeout=None): @@ -561,33 +540,22 @@ class RpcRunner(object): """ body = serializer.DumpJson(args, indent=False) - c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED)) - c.ConnectNode(node, read_timeout=read_timeout) - return c.GetResults()[node] + proc = _RpcProcessor(_SsconfResolver, + netutils.GetDaemonPort(constants.NODED)) + return proc([node], procedure, body, read_timeout=read_timeout)[node] - @staticmethod - def _Compress(data): - """Compresses a string for transport over RPC. + # + # Begin RPC calls + # - Small amounts of data are not compressed. + @_RpcTimeout(_TMO_URGENT) + def call_bdev_sizes(self, node_list, devices): + """Gets the sizes of requested block devices present on a node - @type data: str - @param data: Data - @rtype: tuple - @return: Encoded data to send + This is a multi-node call. """ - # Small amounts of data are not compressed - if len(data) < 512: - return (constants.RPC_ENCODING_NONE, data) - - # Compress with zlib and encode in base64 - return (constants.RPC_ENCODING_ZLIB_BASE64, - base64.b64encode(zlib.compress(data, 3))) - - # - # Begin RPC calls - # + return self._MultiNodeCall(node_list, "bdev_sizes", [devices]) @_RpcTimeout(_TMO_URGENT) def call_lv_list(self, node_list, vg_name): @@ -651,14 +619,14 @@ class RpcRunner(object): return self._SingleNodeCall(node, "bridges_exist", [bridges_list]) @_RpcTimeout(_TMO_NORMAL) - def call_instance_start(self, node, instance, hvp, bep): + def call_instance_start(self, node, instance, hvp, bep, startup_paused): """Starts an instance. This is a single-node call. """ idict = self._InstDict(instance, hvp=hvp, bep=bep) - return self._SingleNodeCall(node, "instance_start", [idict]) + return self._SingleNodeCall(node, "instance_start", [idict, startup_paused]) @_RpcTimeout(_TMO_NORMAL) def call_instance_shutdown(self, node, instance, timeout): @@ -705,7 +673,7 @@ class RpcRunner(object): [self._InstDict(instance), info, target]) @_RpcTimeout(_TMO_NORMAL) - def call_finalize_migration(self, node, instance, info, success): + def call_instance_finalize_migration_dst(self, node, instance, info, success): """Finalize any target-node migration specific operation. This is called both in case of a successful migration and in case of error @@ -723,7 +691,7 @@ class RpcRunner(object): @param success: whether the migration was a success or a failure """ - return self._SingleNodeCall(node, "finalize_migration", + return self._SingleNodeCall(node, "instance_finalize_migration_dst", [self._InstDict(instance), info, success]) @_RpcTimeout(_TMO_SLOW) @@ -746,6 +714,43 @@ class RpcRunner(object): return self._SingleNodeCall(node, "instance_migrate", [self._InstDict(instance), target, live]) + @_RpcTimeout(_TMO_SLOW) + def call_instance_finalize_migration_src(self, node, instance, success, live): + """Finalize the instance migration on the source node. + + This is a single-node call. + + @type instance: L{objects.Instance} + @param instance: the instance that was migrated + @type success: bool + @param success: whether the migration succeeded or not + @type live: bool + @param live: whether the user requested a live migration or not + + """ + return self._SingleNodeCall(node, "instance_finalize_migration_src", + [self._InstDict(instance), success, live]) + + @_RpcTimeout(_TMO_SLOW) + def call_instance_get_migration_status(self, node, instance): + """Report migration status. + + This is a single-node call that must be executed on the source node. + + @type instance: L{objects.Instance} + @param instance: the instance that is being migrated + @rtype: L{objects.MigrationStatus} + @return: the status of the current migration (one of + L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional + progress info that can be retrieved from the hypervisor + + """ + result = self._SingleNodeCall(node, "instance_get_migration_status", + [self._InstDict(instance)]) + if not result.fail_msg and result.payload is not None: + result.payload = objects.MigrationStatus.FromDict(result.payload) + return result + @_RpcTimeout(_TMO_NORMAL) def call_instance_reboot(self, node, inst, reboot_type, shutdown_timeout): """Reboots an instance. @@ -839,18 +844,6 @@ class RpcRunner(object): return self._MultiNodeCall(node_list, "instance_list", [hypervisor_list]) @_RpcTimeout(_TMO_FAST) - def call_node_tcp_ping(self, node, source, target, port, timeout, - live_port_needed): - """Do a TcpPing on the remote node - - This is a single-node call. - - """ - return self._SingleNodeCall(node, "node_tcp_ping", - [source, target, port, timeout, - live_port_needed]) - - @_RpcTimeout(_TMO_FAST) def call_node_has_ip_address(self, node, address): """Checks if a node has the given IP address. @@ -909,24 +902,55 @@ class RpcRunner(object): @classmethod @_RpcTimeout(_TMO_FAST) - def call_node_start_master(cls, node, start_daemons, no_voting): - """Tells a node to activate itself as a master. + def call_node_start_master_daemons(cls, node, no_voting): + """Starts master daemons on a node. + + This is a single-node call. + + """ + return cls._StaticSingleNodeCall(node, "node_start_master_daemons", + [no_voting]) + + @classmethod + @_RpcTimeout(_TMO_FAST) + def call_node_activate_master_ip(cls, node): + """Activates master IP on a node. This is a single-node call. """ - return cls._StaticSingleNodeCall(node, "node_start_master", - [start_daemons, no_voting]) + return cls._StaticSingleNodeCall(node, "node_activate_master_ip", []) @classmethod @_RpcTimeout(_TMO_FAST) - def call_node_stop_master(cls, node, stop_daemons): - """Tells a node to demote itself from master status. + def call_node_stop_master(cls, node): + """Deactivates master IP and stops master daemons on a node. This is a single-node call. """ - return cls._StaticSingleNodeCall(node, "node_stop_master", [stop_daemons]) + return cls._StaticSingleNodeCall(node, "node_stop_master", []) + + @classmethod + @_RpcTimeout(_TMO_FAST) + def call_node_deactivate_master_ip(cls, node): + """Deactivates master IP on a node. + + This is a single-node call. + + """ + return cls._StaticSingleNodeCall(node, "node_deactivate_master_ip", []) + + @classmethod + @_RpcTimeout(_TMO_FAST) + def call_node_change_master_netmask(cls, node, netmask): + """Change master IP netmask. + + This is a single-node call. + + """ + return cls._StaticSingleNodeCall(node, "node_change_master_netmask", + [netmask]) @classmethod @_RpcTimeout(_TMO_URGENT) @@ -989,14 +1013,24 @@ class RpcRunner(object): [(d.ToDict(), uid) for d, uid in devlist]) @_RpcTimeout(_TMO_NORMAL) - def call_blockdev_assemble(self, node, disk, owner, on_primary): + def call_blockdev_pause_resume_sync(self, node, disks, pause): + """Request a pause/resume of given block device. + + This is a single-node call. + + """ + return self._SingleNodeCall(node, "blockdev_pause_resume_sync", + [[bdev.ToDict() for bdev in disks], pause]) + + @_RpcTimeout(_TMO_NORMAL) + def call_blockdev_assemble(self, node, disk, owner, on_primary, idx): """Request assembling of a given block device. This is a single-node call. """ return self._SingleNodeCall(node, "blockdev_assemble", - [disk.ToDict(), owner, on_primary]) + [disk.ToDict(), owner, on_primary, idx]) @_RpcTimeout(_TMO_NORMAL) def call_blockdev_shutdown(self, node, disk): @@ -1086,7 +1120,7 @@ class RpcRunner(object): return self._SingleNodeCall(node, "blockdev_close", params) @_RpcTimeout(_TMO_NORMAL) - def call_blockdev_getsizes(self, node, disks): + def call_blockdev_getsize(self, node, disks): """Returns the size of the given disks. This is a single-node call. @@ -1156,10 +1190,11 @@ class RpcRunner(object): """ file_contents = utils.ReadFile(file_name) - data = cls._Compress(file_contents) + data = _Compress(file_contents) st = os.stat(file_name) - params = [file_name, data, st.st_mode, st.st_uid, st.st_gid, - st.st_atime, st.st_mtime] + getents = runtime.GetEnts() + params = [file_name, data, st.st_mode, getents.LookupUid(st.st_uid), + getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime] return cls._StaticMultiNodeCall(node_list, "upload_file", params, address_list=address_list) @@ -1242,14 +1277,14 @@ class RpcRunner(object): return self._SingleNodeCall(node, "iallocator_runner", [name, idata]) @_RpcTimeout(_TMO_NORMAL) - def call_blockdev_grow(self, node, cf_bdev, amount): + def call_blockdev_grow(self, node, cf_bdev, amount, dryrun): """Request a snapshot of the given block device. This is a single-node call. """ return self._SingleNodeCall(node, "blockdev_grow", - [cf_bdev.ToDict(), amount]) + [cf_bdev.ToDict(), amount, dryrun]) @_RpcTimeout(_TMO_1DAY) def call_blockdev_export(self, node, cf_bdev, @@ -1401,7 +1436,7 @@ class RpcRunner(object): [old_file_storage_dir, new_file_storage_dir]) @classmethod - @_RpcTimeout(_TMO_FAST) + @_RpcTimeout(_TMO_URGENT) def call_jobqueue_update(cls, node_list, address_list, file_name, content): """Update job queue. @@ -1409,7 +1444,7 @@ class RpcRunner(object): """ return cls._StaticMultiNodeCall(node_list, "jobqueue_update", - [file_name, cls._Compress(content)], + [file_name, _Compress(content)], address_list=address_list) @classmethod @@ -1423,7 +1458,7 @@ class RpcRunner(object): return cls._StaticSingleNodeCall(node, "jobqueue_purge", []) @classmethod - @_RpcTimeout(_TMO_FAST) + @_RpcTimeout(_TMO_URGENT) def call_jobqueue_rename(cls, node_list, address_list, rename): """Rename a job queue file. @@ -1477,7 +1512,8 @@ class RpcRunner(object): return self._SingleNodeCall(node, "x509_cert_remove", [name]) @_RpcTimeout(_TMO_NORMAL) - def call_import_start(self, node, opts, instance, dest, dest_args): + def call_import_start(self, node, opts, instance, component, + dest, dest_args): """Starts a listener for an import. This is a single-node call. @@ -1486,16 +1522,18 @@ class RpcRunner(object): @param node: Node name @type instance: C{objects.Instance} @param instance: Instance object + @type component: string + @param component: which part of the instance is being imported """ return self._SingleNodeCall(node, "import_start", [opts.ToDict(), - self._InstDict(instance), dest, + self._InstDict(instance), component, dest, _EncodeImportExportIO(dest, dest_args)]) @_RpcTimeout(_TMO_NORMAL) def call_export_start(self, node, opts, host, port, - instance, source, source_args): + instance, component, source, source_args): """Starts an export daemon. This is a single-node call. @@ -1504,11 +1542,14 @@ class RpcRunner(object): @param node: Node name @type instance: C{objects.Instance} @param instance: Instance object + @type component: string + @param component: which part of the instance is being imported """ return self._SingleNodeCall(node, "export_start", [opts.ToDict(), host, port, - self._InstDict(instance), source, + self._InstDict(instance), + component, source, _EncodeImportExportIO(source, source_args)]) @_RpcTimeout(_TMO_FAST)