#
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""
-# pylint: disable-msg=C0103,R0201,R0904
+# pylint: disable=C0103,R0201,R0904
# C0103: Invalid name, since call_ are not valid
# R0201: Method could be a function, we keep all rpcs instance methods
# as not to change them back and forth between static/instance methods
import logging
import zlib
import base64
+import pycurl
+import threading
from ganeti import utils
from ganeti import objects
from ganeti import serializer
from ganeti import constants
from ganeti import errors
+from ganeti import netutils
+from ganeti import ssconf
+from ganeti import runtime
+from ganeti import compat
# pylint has a bug here, doesn't see this import
-import ganeti.http.client # pylint: disable-msg=W0611
+import ganeti.http.client # pylint: disable=W0611
-# Module level variable
-_http_manager = None
+# Timeout for connecting to nodes (seconds)
+_RPC_CONNECT_TIMEOUT = 5
+
+_RPC_CLIENT_HEADERS = [
+ "Content-type: %s" % http.HTTP_APP_JSON,
+ "Expect:",
+ ]
# Various time constants for the timeout table
_TMO_URGENT = 60 # one minute
_TIMEOUTS = {
}
+#: Special value to describe an offline host
+_OFFLINE = object()
+
def Init():
"""Initializes the module-global HTTP client manager.
- Must be called before using any RPC function.
+ Must be called before using any RPC function and while exactly one thread is
+ running.
"""
- global _http_manager # pylint: disable-msg=W0603
-
- assert not _http_manager, "RPC module initialized more than once"
+ # curl_global_init(3) and curl_global_cleanup(3) must be called with only
+ # one thread running. This check is just a safety measure -- it doesn't
+ # cover all cases.
+ assert threading.activeCount() == 1, \
+ "Found more than one active thread when initializing pycURL"
- http.InitSsl()
+ logging.info("Using PycURL %s", pycurl.version)
- _http_manager = http.client.HttpClientManager()
+ pycurl.global_init(pycurl.GLOBAL_ALL)
def Shutdown():
"""Stops the module-global HTTP client manager.
- Must be called before quitting the program.
+ Must be called before quitting the program and while exactly one thread is
+ running.
"""
- global _http_manager # pylint: disable-msg=W0603
+ pycurl.global_cleanup()
- if _http_manager:
- _http_manager.Shutdown()
- _http_manager = None
+
+def _ConfigRpcCurl(curl):
+ noded_cert = str(constants.NODED_CERT_FILE)
+
+ curl.setopt(pycurl.FOLLOWLOCATION, False)
+ curl.setopt(pycurl.CAINFO, noded_cert)
+ curl.setopt(pycurl.SSL_VERIFYHOST, 0)
+ curl.setopt(pycurl.SSL_VERIFYPEER, True)
+ curl.setopt(pycurl.SSLCERTTYPE, "PEM")
+ curl.setopt(pycurl.SSLCERT, noded_cert)
+ curl.setopt(pycurl.SSLKEYTYPE, "PEM")
+ curl.setopt(pycurl.SSLKEY, noded_cert)
+ curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
def _RpcTimeout(secs):
return decorator
+def RunWithRPC(fn):
+ """RPC-wrapper decorator.
+
+ When applied to a function, it runs it with the RPC system
+ initialized, and it shutsdown the system afterwards. This means the
+ function must be called without RPC being initialized.
+
+ """
+ def wrapper(*args, **kwargs):
+ Init()
+ try:
+ return fn(*args, **kwargs)
+ finally:
+ Shutdown()
+ return wrapper
+
+
+def _Compress(data):
+ """Compresses a string for transport over RPC.
+
+ Small amounts of data are not compressed.
+
+ @type data: str
+ @param data: Data
+ @rtype: tuple
+ @return: Encoded data to send
+
+ """
+ # Small amounts of data are not compressed
+ if len(data) < 512:
+ return (constants.RPC_ENCODING_NONE, data)
+
+ # Compress with zlib and encode in base64
+ return (constants.RPC_ENCODING_ZLIB_BASE64,
+ base64.b64encode(zlib.compress(data, 3)))
+
+
class RpcResult(object):
"""RPC Result class.
self.fail_msg = None
self.payload = data[1]
- assert hasattr(self, "call")
- assert hasattr(self, "data")
- assert hasattr(self, "fail_msg")
- assert hasattr(self, "node")
- assert hasattr(self, "offline")
- assert hasattr(self, "payload")
+ for attr_name in ["call", "data", "fail_msg",
+ "node", "offline", "payload"]:
+ assert hasattr(self, attr_name), "Missing attribute %s" % attr_name
@staticmethod
def _EnsureErr(val):
else:
ec = errors.OpExecError
if ecode is not None:
- args = (msg, prereq)
+ args = (msg, ecode)
else:
args = (msg, )
- raise ec(*args) # pylint: disable-msg=W0142
-
+ raise ec(*args) # pylint: disable=W0142
-class Client:
- """RPC Client class.
- This class, given a (remote) method name, a list of parameters and a
- list of nodes, will contact (in parallel) all nodes, and return a
- dict of results (key: node name, value: result).
+def _SsconfResolver(node_list,
+ ssc=ssconf.SimpleStore,
+ nslookup_fn=netutils.Hostname.GetIP):
+ """Return addresses for given node names.
- One current bug is that generic failure is still signaled by
- 'False' result, which is not good. This overloading of values can
- cause bugs.
+ @type node_list: list
+ @param node_list: List of node names
+ @type ssc: class
+ @param ssc: SimpleStore class that is used to obtain node->ip mappings
+ @type nslookup_fn: callable
+ @param nslookup_fn: function use to do NS lookup
+ @rtype: list of tuple; (string, string)
+ @return: List of tuples containing node name and IP address
"""
- def __init__(self, procedure, body, port):
- assert procedure in _TIMEOUTS, ("New RPC call not declared in the"
- " timeouts table")
- self.procedure = procedure
- self.body = body
- self.port = port
- self.nc = {}
+ ss = ssc()
+ iplist = ss.GetNodePrimaryIPList()
+ family = ss.GetPrimaryIPFamily()
+ ipmap = dict(entry.split() for entry in iplist)
- self._ssl_params = \
- http.HttpSslParams(ssl_key_path=constants.NODED_CERT_FILE,
- ssl_cert_path=constants.NODED_CERT_FILE)
+ result = []
+ for node in node_list:
+ ip = ipmap.get(node)
+ if ip is None:
+ ip = nslookup_fn(node, family=family)
+ result.append((node, ip))
- def ConnectList(self, node_list, address_list=None, read_timeout=None):
- """Add a list of nodes to the target nodes.
+ return result
- @type node_list: list
- @param node_list: the list of node names to connect
- @type address_list: list or None
- @keyword address_list: either None or a list with node addresses,
- which must have the same length as the node list
- @type read_timeout: int
- @param read_timeout: overwrites the default read timeout for the
- given operation
- """
- if address_list is None:
- address_list = [None for _ in node_list]
- else:
- assert len(node_list) == len(address_list), \
- "Name and address lists should have the same length"
- for node, address in zip(node_list, address_list):
- self.ConnectNode(node, address, read_timeout=read_timeout)
+class _StaticResolver:
+ def __init__(self, addresses):
+ """Initializes this class.
- def ConnectNode(self, name, address=None, read_timeout=None):
- """Add a node to the target list.
+ """
+ self._addresses = addresses
- @type name: str
- @param name: the node name
- @type address: str
- @keyword address: the node address, if known
+ def __call__(self, hosts):
+ """Returns static addresses for hosts.
"""
- if address is None:
- address = name
+ assert len(hosts) == len(self._addresses)
+ return zip(hosts, self._addresses)
- if read_timeout is None:
- read_timeout = _TIMEOUTS[self.procedure]
- self.nc[name] = \
- http.client.HttpClientRequest(address, self.port, http.HTTP_PUT,
- "/%s" % self.procedure,
- post_data=self.body,
- ssl_params=self._ssl_params,
- ssl_verify_peer=True,
- read_timeout=read_timeout)
+def _CheckConfigNode(name, node):
+ """Checks if a node is online.
+
+ @type name: string
+ @param name: Node name
+ @type node: L{objects.Node} or None
+ @param node: Node object
+
+ """
+ if node is None:
+ # Depend on DNS for name resolution
+ ip = name
+ elif node.offline:
+ ip = _OFFLINE
+ else:
+ ip = node.primary_ip
+ return (name, ip)
- def GetResults(self):
- """Call nodes and return results.
- @rtype: list
- @return: List of RPC results
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts):
+ """Calculate node addresses using configuration.
+
+ """
+ # Special case for single-host lookups
+ if len(hosts) == 1:
+ (name, ) = hosts
+ return [_CheckConfigNode(name, single_node_fn(name))]
+ else:
+ all_nodes = all_nodes_fn()
+ return [_CheckConfigNode(name, all_nodes.get(name, None))
+ for name in hosts]
+
+
+class _RpcProcessor:
+ def __init__(self, resolver, port, lock_monitor_cb=None):
+ """Initializes this class.
+
+ @param resolver: callable accepting a list of hostnames, returning a list
+ of tuples containing name and IP address (IP address can be the name or
+ the special value L{_OFFLINE} to mark offline machines)
+ @type port: int
+ @param port: TCP port
+ @param lock_monitor_cb: Callable for registering with lock monitor
"""
- assert _http_manager, "RPC module not initialized"
+ self._resolver = resolver
+ self._port = port
+ self._lock_monitor_cb = lock_monitor_cb
- _http_manager.ExecRequests(self.nc.values())
+ @staticmethod
+ def _PrepareRequests(hosts, port, procedure, body, read_timeout):
+ """Prepares requests by sorting offline hosts into separate list.
+ """
results = {}
+ requests = {}
- for name, req in self.nc.iteritems():
- if req.success and req.resp_status_code == http.HTTP_OK:
- results[name] = RpcResult(data=serializer.LoadJson(req.resp_body),
- node=name, call=self.procedure)
- continue
+ for (name, ip) in hosts:
+ if ip is _OFFLINE:
+ # Node is marked as offline
+ results[name] = RpcResult(node=name, offline=True, call=procedure)
+ else:
+ requests[name] = \
+ http.client.HttpClientRequest(str(ip), port,
+ http.HTTP_PUT, str("/%s" % procedure),
+ headers=_RPC_CLIENT_HEADERS,
+ post_data=body,
+ read_timeout=read_timeout,
+ nicename="%s/%s" % (name, procedure),
+ curl_config_fn=_ConfigRpcCurl)
+
+ return (results, requests)
- # TODO: Better error reporting
- if req.error:
- msg = req.error
+ @staticmethod
+ def _CombineResults(results, requests, procedure):
+ """Combines pre-computed results for offline hosts with actual call results.
+
+ """
+ for name, req in requests.items():
+ if req.success and req.resp_status_code == http.HTTP_OK:
+ host_result = RpcResult(data=serializer.LoadJson(req.resp_body),
+ node=name, call=procedure)
else:
- msg = req.resp_body
+ # TODO: Better error reporting
+ if req.error:
+ msg = req.error
+ else:
+ msg = req.resp_body
- logging.error("RPC error in %s from node %s: %s",
- self.procedure, name, msg)
- results[name] = RpcResult(data=msg, failed=True, node=name,
- call=self.procedure)
+ logging.error("RPC error in %s on node %s: %s", procedure, name, msg)
+ host_result = RpcResult(data=msg, failed=True, node=name,
+ call=procedure)
+
+ results[name] = host_result
return results
+ def __call__(self, hosts, procedure, body, read_timeout=None,
+ _req_process_fn=http.client.ProcessRequests):
+ """Makes an RPC request to a number of nodes.
+
+ @type hosts: sequence
+ @param hosts: Hostnames
+ @type procedure: string
+ @param procedure: Request path
+ @type body: string
+ @param body: Request body
+ @type read_timeout: int or None
+ @param read_timeout: Read timeout for request
+
+ """
+ assert procedure in _TIMEOUTS, "RPC call not declared in the timeouts table"
+
+ if read_timeout is None:
+ read_timeout = _TIMEOUTS[procedure]
+
+ (results, requests) = \
+ self._PrepareRequests(self._resolver(hosts), self._port, procedure,
+ str(body), read_timeout)
+
+ _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
+
+ assert not frozenset(results).intersection(requests)
+
+ return self._CombineResults(results, requests, procedure)
+
def _EncodeImportExportIO(ieio, ieioargs):
"""Encodes import/export I/O information.
class RpcRunner(object):
- """RPC runner class"""
+ """RPC runner class.
- def __init__(self, cfg):
- """Initialized the rpc runner.
+ """
+ def __init__(self, context):
+ """Initialized the RPC runner.
- @type cfg: C{config.ConfigWriter}
- @param cfg: the configuration object that will be used to get data
- about the cluster
+ @type context: C{masterd.GanetiContext}
+ @param context: Ganeti context
"""
- self._cfg = cfg
- self.port = utils.GetDaemonPort(constants.NODED)
+ self._cfg = context.cfg
+ self._proc = _RpcProcessor(compat.partial(_NodeConfigResolver,
+ self._cfg.GetNodeInfo,
+ self._cfg.GetAllNodesInfo),
+ netutils.GetDaemonPort(constants.NODED),
+ lock_monitor_cb=context.glm.AddToLockMonitor)
- def _InstDict(self, instance, hvp=None, bep=None):
+ def _InstDict(self, instance, hvp=None, bep=None, osp=None):
"""Convert the given instance to a dict.
This is done via the instance's ToDict() method and additionally
@param hvp: a dictionary with overridden hypervisor parameters
@type bep: dict or None
@param bep: a dictionary with overridden backend parameters
+ @type osp: dict or None
+ @param osp: a dictionary with overridden os parameters
@rtype: dict
@return: the instance dict, with the hvparams filled with the
cluster defaults
idict["beparams"] = cluster.FillBE(instance)
if bep is not None:
idict["beparams"].update(bep)
+ idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
+ if osp is not None:
+ idict["osparams"].update(osp)
for nic in idict["nics"]:
nic['nicparams'] = objects.FillDict(
cluster.nicparams[constants.PP_DEFAULT],
nic['nicparams'])
return idict
- def _ConnectList(self, client, node_list, call, read_timeout=None):
- """Helper for computing node addresses.
-
- @type client: L{ganeti.rpc.Client}
- @param client: a C{Client} instance
- @type node_list: list
- @param node_list: the node list we should connect
- @type call: string
- @param call: the name of the remote procedure call, for filling in
- correctly any eventual offline nodes' results
- @type read_timeout: int
- @param read_timeout: overwrites the default read timeout for the
- given operation
-
- """
- all_nodes = self._cfg.GetAllNodesInfo()
- name_list = []
- addr_list = []
- skip_dict = {}
- for node in node_list:
- if node in all_nodes:
- if all_nodes[node].offline:
- skip_dict[node] = RpcResult(node=node, offline=True, call=call)
- continue
- val = all_nodes[node].primary_ip
- else:
- val = None
- addr_list.append(val)
- name_list.append(node)
- if name_list:
- client.ConnectList(name_list, address_list=addr_list,
- read_timeout=read_timeout)
- return skip_dict
-
- def _ConnectNode(self, client, node, call, read_timeout=None):
- """Helper for computing one node's address.
-
- @type client: L{ganeti.rpc.Client}
- @param client: a C{Client} instance
- @type node: str
- @param node: the node we should connect
- @type call: string
- @param call: the name of the remote procedure call, for filling in
- correctly any eventual offline nodes' results
- @type read_timeout: int
- @param read_timeout: overwrites the default read timeout for the
- given operation
-
- """
- node_info = self._cfg.GetNodeInfo(node)
- if node_info is not None:
- if node_info.offline:
- return RpcResult(node=node, offline=True, call=call)
- addr = node_info.primary_ip
- else:
- addr = None
- client.ConnectNode(node, address=addr, read_timeout=read_timeout)
-
def _MultiNodeCall(self, node_list, procedure, args, read_timeout=None):
"""Helper for making a multi-node call
"""
body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, self.port)
- skip_dict = self._ConnectList(c, node_list, procedure,
- read_timeout=read_timeout)
- skip_dict.update(c.GetResults())
- return skip_dict
+ return self._proc(node_list, procedure, body, read_timeout=read_timeout)
- @classmethod
- def _StaticMultiNodeCall(cls, node_list, procedure, args,
+ @staticmethod
+ def _StaticMultiNodeCall(node_list, procedure, args,
address_list=None, read_timeout=None):
"""Helper for making a multi-node static call
"""
body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, utils.GetDaemonPort(constants.NODED))
- c.ConnectList(node_list, address_list=address_list,
- read_timeout=read_timeout)
- return c.GetResults()
+
+ if address_list is None:
+ resolver = _SsconfResolver
+ else:
+ # Caller provided an address list
+ resolver = _StaticResolver(address_list)
+
+ proc = _RpcProcessor(resolver,
+ netutils.GetDaemonPort(constants.NODED))
+ return proc(node_list, procedure, body, read_timeout=read_timeout)
def _SingleNodeCall(self, node, procedure, args, read_timeout=None):
"""Helper for making a single-node call
"""
body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, self.port)
- result = self._ConnectNode(c, node, procedure, read_timeout=read_timeout)
- if result is None:
- # we did connect, node is not offline
- result = c.GetResults()[node]
- return result
+ return self._proc([node], procedure, body, read_timeout=read_timeout)[node]
@classmethod
def _StaticSingleNodeCall(cls, node, procedure, args, read_timeout=None):
"""
body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, utils.GetDaemonPort(constants.NODED))
- c.ConnectNode(node, read_timeout=read_timeout)
- return c.GetResults()[node]
+ proc = _RpcProcessor(_SsconfResolver,
+ netutils.GetDaemonPort(constants.NODED))
+ return proc([node], procedure, body, read_timeout=read_timeout)[node]
- @staticmethod
- def _Compress(data):
- """Compresses a string for transport over RPC.
+ #
+ # Begin RPC calls
+ #
- Small amounts of data are not compressed.
+ @_RpcTimeout(_TMO_URGENT)
+ def call_bdev_sizes(self, node_list, devices):
+ """Gets the sizes of requested block devices present on a node
- @type data: str
- @param data: Data
- @rtype: tuple
- @return: Encoded data to send
+ This is a multi-node call.
"""
- # Small amounts of data are not compressed
- if len(data) < 512:
- return (constants.RPC_ENCODING_NONE, data)
-
- # Compress with zlib and encode in base64
- return (constants.RPC_ENCODING_ZLIB_BASE64,
- base64.b64encode(zlib.compress(data, 3)))
-
- #
- # Begin RPC calls
- #
+ return self._MultiNodeCall(node_list, "bdev_sizes", [devices])
@_RpcTimeout(_TMO_URGENT)
def call_lv_list(self, node_list, vg_name):
return self._SingleNodeCall(node, "bridges_exist", [bridges_list])
@_RpcTimeout(_TMO_NORMAL)
- def call_instance_start(self, node, instance, hvp, bep):
+ def call_instance_start(self, node, instance, hvp, bep, startup_paused):
"""Starts an instance.
This is a single-node call.
"""
idict = self._InstDict(instance, hvp=hvp, bep=bep)
- return self._SingleNodeCall(node, "instance_start", [idict])
+ return self._SingleNodeCall(node, "instance_start", [idict, startup_paused])
@_RpcTimeout(_TMO_NORMAL)
def call_instance_shutdown(self, node, instance, timeout):
[self._InstDict(instance), info, target])
@_RpcTimeout(_TMO_NORMAL)
- def call_finalize_migration(self, node, instance, info, success):
+ def call_instance_finalize_migration_dst(self, node, instance, info, success):
"""Finalize any target-node migration specific operation.
This is called both in case of a successful migration and in case of error
@param success: whether the migration was a success or a failure
"""
- return self._SingleNodeCall(node, "finalize_migration",
+ return self._SingleNodeCall(node, "instance_finalize_migration_dst",
[self._InstDict(instance), info, success])
@_RpcTimeout(_TMO_SLOW)
return self._SingleNodeCall(node, "instance_migrate",
[self._InstDict(instance), target, live])
+ @_RpcTimeout(_TMO_SLOW)
+ def call_instance_finalize_migration_src(self, node, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ This is a single-node call.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ return self._SingleNodeCall(node, "instance_finalize_migration_src",
+ [self._InstDict(instance), success, live])
+
+ @_RpcTimeout(_TMO_SLOW)
+ def call_instance_get_migration_status(self, node, instance):
+ """Report migration status.
+
+ This is a single-node call that must be executed on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ result = self._SingleNodeCall(node, "instance_get_migration_status",
+ [self._InstDict(instance)])
+ if not result.fail_msg and result.payload is not None:
+ result.payload = objects.MigrationStatus.FromDict(result.payload)
+ return result
+
@_RpcTimeout(_TMO_NORMAL)
def call_instance_reboot(self, node, inst, reboot_type, shutdown_timeout):
"""Reboots an instance.
shutdown_timeout])
@_RpcTimeout(_TMO_1DAY)
- def call_instance_os_add(self, node, inst, reinstall, debug):
+ def call_instance_os_add(self, node, inst, reinstall, debug, osparams=None):
"""Installs an OS on the given instance.
This is a single-node call.
"""
return self._SingleNodeCall(node, "instance_os_add",
- [self._InstDict(inst), reinstall, debug])
+ [self._InstDict(inst, osp=osparams),
+ reinstall, debug])
@_RpcTimeout(_TMO_SLOW)
def call_instance_run_rename(self, node, inst, old_name, debug):
return self._MultiNodeCall(node_list, "instance_list", [hypervisor_list])
@_RpcTimeout(_TMO_FAST)
- def call_node_tcp_ping(self, node, source, target, port, timeout,
- live_port_needed):
- """Do a TcpPing on the remote node
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "node_tcp_ping",
- [source, target, port, timeout,
- live_port_needed])
-
- @_RpcTimeout(_TMO_FAST)
def call_node_has_ip_address(self, node, address):
"""Checks if a node has the given IP address.
[vg_name, hypervisor_type])
@_RpcTimeout(_TMO_NORMAL)
- def call_node_add(self, node, dsa, dsapub, rsa, rsapub, ssh, sshpub):
- """Add a node to the cluster.
+ def call_etc_hosts_modify(self, node, mode, name, ip):
+ """Modify hosts file with name
- This is a single-node call.
+ @type node: string
+ @param node: The node to call
+ @type mode: string
+ @param mode: The mode to operate. Currently "add" or "remove"
+ @type name: string
+ @param name: The host name to be modified
+ @type ip: string
+ @param ip: The ip of the entry (just valid if mode is "add")
"""
- return self._SingleNodeCall(node, "node_add",
- [dsa, dsapub, rsa, rsapub, ssh, sshpub])
+ return self._SingleNodeCall(node, "etc_hosts_modify", [mode, name, ip])
@_RpcTimeout(_TMO_NORMAL)
def call_node_verify(self, node_list, checkdict, cluster_name):
@classmethod
@_RpcTimeout(_TMO_FAST)
- def call_node_start_master(cls, node, start_daemons, no_voting):
- """Tells a node to activate itself as a master.
+ def call_node_start_master_daemons(cls, node, no_voting):
+ """Starts master daemons on a node.
This is a single-node call.
"""
- return cls._StaticSingleNodeCall(node, "node_start_master",
- [start_daemons, no_voting])
+ return cls._StaticSingleNodeCall(node, "node_start_master_daemons",
+ [no_voting])
@classmethod
@_RpcTimeout(_TMO_FAST)
- def call_node_stop_master(cls, node, stop_daemons):
- """Tells a node to demote itself from master status.
+ def call_node_activate_master_ip(cls, node):
+ """Activates master IP on a node.
This is a single-node call.
"""
- return cls._StaticSingleNodeCall(node, "node_stop_master", [stop_daemons])
+ return cls._StaticSingleNodeCall(node, "node_activate_master_ip", [])
+
+ @classmethod
+ @_RpcTimeout(_TMO_FAST)
+ def call_node_stop_master(cls, node):
+ """Deactivates master IP and stops master daemons on a node.
+
+ This is a single-node call.
+
+ """
+ return cls._StaticSingleNodeCall(node, "node_stop_master", [])
+
+ @classmethod
+ @_RpcTimeout(_TMO_FAST)
+ def call_node_deactivate_master_ip(cls, node):
+ """Deactivates master IP on a node.
+
+ This is a single-node call.
+
+ """
+ return cls._StaticSingleNodeCall(node, "node_deactivate_master_ip", [])
+
+ @classmethod
+ @_RpcTimeout(_TMO_FAST)
+ def call_node_change_master_netmask(cls, node, netmask):
+ """Change master IP netmask.
+
+ This is a single-node call.
+
+ """
+ return cls._StaticSingleNodeCall(node, "node_change_master_netmask",
+ [netmask])
@classmethod
@_RpcTimeout(_TMO_URGENT)
return self._SingleNodeCall(node, "blockdev_create",
[bdev.ToDict(), size, owner, on_primary, info])
+ @_RpcTimeout(_TMO_SLOW)
+ def call_blockdev_wipe(self, node, bdev, offset, size):
+ """Request wipe at given offset with given size of a block device.
+
+ This is a single-node call.
+
+ """
+ return self._SingleNodeCall(node, "blockdev_wipe",
+ [bdev.ToDict(), offset, size])
+
@_RpcTimeout(_TMO_NORMAL)
def call_blockdev_remove(self, node, bdev):
"""Request removal of a given block device.
[(d.ToDict(), uid) for d, uid in devlist])
@_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_assemble(self, node, disk, owner, on_primary):
+ def call_blockdev_pause_resume_sync(self, node, disks, pause):
+ """Request a pause/resume of given block device.
+
+ This is a single-node call.
+
+ """
+ return self._SingleNodeCall(node, "blockdev_pause_resume_sync",
+ [[bdev.ToDict() for bdev in disks], pause])
+
+ @_RpcTimeout(_TMO_NORMAL)
+ def call_blockdev_assemble(self, node, disk, owner, on_primary, idx):
"""Request assembling of a given block device.
This is a single-node call.
"""
return self._SingleNodeCall(node, "blockdev_assemble",
- [disk.ToDict(), owner, on_primary])
+ [disk.ToDict(), owner, on_primary, idx])
@_RpcTimeout(_TMO_NORMAL)
def call_blockdev_shutdown(self, node, disk):
return result
@_RpcTimeout(_TMO_NORMAL)
+ def call_blockdev_getmirrorstatus_multi(self, node_list, node_disks):
+ """Request status of (mirroring) devices from multiple nodes.
+
+ This is a multi-node call.
+
+ """
+ result = self._MultiNodeCall(node_list, "blockdev_getmirrorstatus_multi",
+ [dict((name, [dsk.ToDict() for dsk in disks])
+ for name, disks in node_disks.items())])
+ for nres in result.values():
+ if nres.fail_msg:
+ continue
+
+ for idx, (success, status) in enumerate(nres.payload):
+ if success:
+ nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
+
+ return result
+
+ @_RpcTimeout(_TMO_NORMAL)
def call_blockdev_find(self, node, disk):
"""Request identification of a given block device.
return self._SingleNodeCall(node, "blockdev_close", params)
@_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_getsizes(self, node, disks):
+ def call_blockdev_getsize(self, node, disks):
"""Returns the size of the given disks.
This is a single-node call.
return self._MultiNodeCall(node_list, "drbd_wait_sync",
[nodes_ip, [cf.ToDict() for cf in disks]])
+ @_RpcTimeout(_TMO_URGENT)
+ def call_drbd_helper(self, node_list):
+ """Gets drbd helper.
+
+ This is a multi-node call.
+
+ """
+ return self._MultiNodeCall(node_list, "drbd_helper", [])
+
@classmethod
@_RpcTimeout(_TMO_NORMAL)
def call_upload_file(cls, node_list, file_name, address_list=None):
"""
file_contents = utils.ReadFile(file_name)
- data = cls._Compress(file_contents)
+ data = _Compress(file_contents)
st = os.stat(file_name)
- params = [file_name, data, st.st_mode, st.st_uid, st.st_gid,
- st.st_atime, st.st_mtime]
+ getents = runtime.GetEnts()
+ params = [file_name, data, st.st_mode, getents.LookupUid(st.st_uid),
+ getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
return cls._StaticMultiNodeCall(node_list, "upload_file", params,
address_list=address_list)
"""
return cls._StaticMultiNodeCall(node_list, "write_ssconf_files", [values])
+ @_RpcTimeout(_TMO_NORMAL)
+ def call_run_oob(self, node, oob_program, command, remote_node, timeout):
+ """Runs OOB.
+
+ This is a single-node call.
+
+ """
+ return self._SingleNodeCall(node, "run_oob", [oob_program, command,
+ remote_node, timeout])
+
@_RpcTimeout(_TMO_FAST)
def call_os_diagnose(self, node_list):
"""Request a diagnose of OS definitions.
result.payload = objects.OS.FromDict(result.payload)
return result
+ @_RpcTimeout(_TMO_FAST)
+ def call_os_validate(self, required, nodes, name, checks, params):
+ """Run a validation routine for a given OS.
+
+ This is a multi-node call.
+
+ """
+ return self._MultiNodeCall(nodes, "os_validate",
+ [required, name, checks, params])
+
@_RpcTimeout(_TMO_NORMAL)
def call_hooks_runner(self, node_list, hpath, phase, env):
"""Call the hooks runner.
return self._SingleNodeCall(node, "iallocator_runner", [name, idata])
@_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_grow(self, node, cf_bdev, amount):
+ def call_blockdev_grow(self, node, cf_bdev, amount, dryrun):
"""Request a snapshot of the given block device.
This is a single-node call.
"""
return self._SingleNodeCall(node, "blockdev_grow",
- [cf_bdev.ToDict(), amount])
+ [cf_bdev.ToDict(), amount, dryrun])
@_RpcTimeout(_TMO_1DAY)
def call_blockdev_export(self, node, cf_bdev,
[old_file_storage_dir, new_file_storage_dir])
@classmethod
- @_RpcTimeout(_TMO_FAST)
+ @_RpcTimeout(_TMO_URGENT)
def call_jobqueue_update(cls, node_list, address_list, file_name, content):
"""Update job queue.
"""
return cls._StaticMultiNodeCall(node_list, "jobqueue_update",
- [file_name, cls._Compress(content)],
+ [file_name, _Compress(content)],
address_list=address_list)
@classmethod
return cls._StaticSingleNodeCall(node, "jobqueue_purge", [])
@classmethod
- @_RpcTimeout(_TMO_FAST)
+ @_RpcTimeout(_TMO_URGENT)
def call_jobqueue_rename(cls, node_list, address_list, rename):
"""Rename a job queue file.
return cls._StaticMultiNodeCall(node_list, "jobqueue_rename", rename,
address_list=address_list)
- @classmethod
- @_RpcTimeout(_TMO_FAST)
- def call_jobqueue_set_drain(cls, node_list, drain_flag):
- """Set the drain flag on the queue.
-
- This is a multi-node call.
-
- @type node_list: list
- @param node_list: the list of nodes to query
- @type drain_flag: bool
- @param drain_flag: if True, will set the drain flag, otherwise reset it.
-
- """
- return cls._StaticMultiNodeCall(node_list, "jobqueue_set_drain",
- [drain_flag])
-
@_RpcTimeout(_TMO_NORMAL)
def call_hypervisor_validate_params(self, node_list, hvname, hvparams):
"""Validate the hypervisor params.
return self._SingleNodeCall(node, "x509_cert_remove", [name])
@_RpcTimeout(_TMO_NORMAL)
- def call_import_start(self, node, opts, instance, dest, dest_args):
+ def call_import_start(self, node, opts, instance, component,
+ dest, dest_args):
"""Starts a listener for an import.
This is a single-node call.
@param node: Node name
@type instance: C{objects.Instance}
@param instance: Instance object
+ @type component: string
+ @param component: which part of the instance is being imported
"""
return self._SingleNodeCall(node, "import_start",
[opts.ToDict(),
- self._InstDict(instance), dest,
+ self._InstDict(instance), component, dest,
_EncodeImportExportIO(dest, dest_args)])
@_RpcTimeout(_TMO_NORMAL)
def call_export_start(self, node, opts, host, port,
- instance, source, source_args):
+ instance, component, source, source_args):
"""Starts an export daemon.
This is a single-node call.
@param node: Node name
@type instance: C{objects.Instance}
@param instance: Instance object
+ @type component: string
+ @param component: which part of the instance is being imported
"""
return self._SingleNodeCall(node, "export_start",
[opts.ToDict(), host, port,
- self._InstDict(instance), source,
+ self._InstDict(instance),
+ component, source,
_EncodeImportExportIO(source, source_args)])
@_RpcTimeout(_TMO_FAST)