#
#
-# Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import re
import logging
import time
+import tempfile
from ganeti import rpc
from ganeti import ssh
from ganeti import hypervisor
from ganeti import bdev
from ganeti import netutils
-from ganeti import backend
from ganeti import luxi
from ganeti import jstore
+from ganeti import pathutils
# ec_id for InitConfig's temporary reservation manager
permitted hosts and adds the hostkey to its own known hosts.
"""
- priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
+ priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
for name in priv_key, pub_key:
if os.path.exists(name):
backup=True)
-def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
- new_cds, rapi_cert_pem=None, cds=None,
- nodecert_file=constants.NODED_CERT_FILE,
- rapicert_file=constants.RAPI_CERT_FILE,
- hmackey_file=constants.CONFD_HMAC_KEY,
- cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
+def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
+ new_confd_hmac_key, new_cds,
+ rapi_cert_pem=None, spice_cert_pem=None,
+ spice_cacert_pem=None, cds=None,
+ nodecert_file=pathutils.NODED_CERT_FILE,
+ rapicert_file=pathutils.RAPI_CERT_FILE,
+ spicecert_file=pathutils.SPICE_CERT_FILE,
+ spicecacert_file=pathutils.SPICE_CACERT_FILE,
+ hmackey_file=pathutils.CONFD_HMAC_KEY,
+ cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
+ @type new_spice_cert: bool
+ @param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
+ @type spice_cert_pem: string
+ @param spice_cert_pem: New SPICE certificate in PEM format
+ @type spice_cacert_pem: string
+ @param spice_cacert_pem: Certificate of the CA that signed the SPICE
+ certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
+ @type spicecert_file: string
+ @param spicecert_file: optional override of the spice cert file path
+ @type spicecacert_file: string
+ @param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
logging.debug("Generating new RAPI certificate at %s", rapicert_file)
utils.GenerateSelfSignedSslCert(rapicert_file)
+ # SPICE
+ spice_cert_exists = os.path.exists(spicecert_file)
+ spice_cacert_exists = os.path.exists(spicecacert_file)
+ if spice_cert_pem:
+ # spice_cert_pem implies also spice_cacert_pem
+ logging.debug("Writing SPICE certificate at %s", spicecert_file)
+ utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
+ logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
+ utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
+ elif new_spice_cert or not spice_cert_exists:
+ if spice_cert_exists:
+ utils.CreateBackup(spicecert_file)
+ if spice_cacert_exists:
+ utils.CreateBackup(spicecacert_file)
+
+ logging.debug("Generating new self-signed SPICE certificate at %s",
+ spicecert_file)
+ (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
+
+ # Self-signed certificate -> the public certificate is also the CA public
+ # certificate
+ logging.debug("Writing the public certificate to %s",
+ spicecert_file)
+ utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
+
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
"""
# Generate cluster secrets
- GenerateClusterCrypto(True, False, False, False)
+ GenerateClusterCrypto(True, False, False, False, False)
- result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
+ result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
if result.failed:
raise errors.OpExecError("Could not start the node daemon, command %s"
" had exitcode %s and error %s" %
"""
def _CheckNodeDaemon():
- result = rpc.RpcRunner.call_version([node_name])[node_name]
+ # Pylint bug <http://www.logilab.org/ticket/35642>
+ # pylint: disable=E1101
+ result = rpc.BootstrapRunner().call_version([node_name])[node_name]
if result.fail_msg:
raise utils.RetryAgain()
" %s seconds" % _DAEMON_READY_TIMEOUT)
+def _WaitForSshDaemon(hostname, port, family):
+ """Wait for SSH daemon to become responsive.
+
+ """
+ hostip = netutils.GetHostname(name=hostname, family=family).ip
+
+ def _CheckSshDaemon():
+ if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
+ logging.debug("SSH daemon on %s:%s (IP address %s) has become"
+ " responsive", hostname, port, hostip)
+ else:
+ raise utils.RetryAgain()
+
+ try:
+ utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
+ except utils.RetryTimeout:
+ raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
+ " become responsive within %s seconds" %
+ (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
+
+
+def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
+ use_cluster_key, ask_key, strict_host_check, data):
+ """Runs a command to configure something on a remote machine.
+
+ @type cluster_name: string
+ @param cluster_name: Cluster name
+ @type node: string
+ @param node: Node name
+ @type basecmd: string
+ @param basecmd: Base command (path on the remote machine)
+ @type debug: bool
+ @param debug: Enable debug output
+ @type verbose: bool
+ @param verbose: Enable verbose output
+ @type use_cluster_key: bool
+ @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
+ @type ask_key: bool
+ @param ask_key: See L{ssh.SshRunner.BuildCmd}
+ @type strict_host_check: bool
+ @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
+ @param data: JSON-serializable input data for script (passed to stdin)
+
+ """
+ cmd = [basecmd]
+
+ # Pass --debug/--verbose to the external script if set on our invocation
+ if debug:
+ cmd.append("--debug")
+
+ if verbose:
+ cmd.append("--verbose")
+
+ family = ssconf.SimpleStore().GetPrimaryIPFamily()
+ srun = ssh.SshRunner(cluster_name,
+ ipv6=(family == netutils.IP6Address.family))
+ scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER,
+ utils.ShellQuoteArgs(cmd),
+ batch=False, ask_key=ask_key, quiet=False,
+ strict_host_check=strict_host_check,
+ use_cluster_key=use_cluster_key)
+
+ tempfh = tempfile.TemporaryFile()
+ try:
+ tempfh.write(serializer.DumpJson(data))
+ tempfh.seek(0)
+
+ result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
+ finally:
+ tempfh.close()
+
+ if result.failed:
+ raise errors.OpExecError("Command '%s' failed: %s" %
+ (result.cmd, result.fail_reason))
+
+ _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
+
+
def _InitFileStorage(file_storage_dir):
"""Initialize if needed the file storage.
time) or the normalized path to the storage directory
"""
- if not constants.ENABLE_FILE_STORAGE:
- return ""
-
file_storage_dir = os.path.normpath(file_storage_dir)
if not os.path.isabs(file_storage_dir):
- raise errors.OpPrereqError("The file storage directory you passed is"
- " not an absolute path.", errors.ECODE_INVAL)
+ raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
+ " path" % file_storage_dir, errors.ECODE_INVAL)
if not os.path.exists(file_storage_dir):
try:
return file_storage_dir
-def _InitSharedFileStorage(shared_file_storage_dir):
- """Initialize if needed the shared file storage.
-
- @param shared_file_storage_dir: the user-supplied value
- @return: either empty string (if file storage was disabled at build
- time) or the normalized path to the storage directory
-
- """
- if not constants.ENABLE_SHARED_FILE_STORAGE:
- return ""
-
- shared_file_storage_dir = os.path.normpath(shared_file_storage_dir)
-
- if not os.path.isabs(shared_file_storage_dir):
- raise errors.OpPrereqError("The shared file storage directory you"
- " passed is not an absolute path.",
- errors.ECODE_INVAL)
-
- if not os.path.exists(shared_file_storage_dir):
- try:
- os.makedirs(shared_file_storage_dir, 0750)
- except OSError, err:
- raise errors.OpPrereqError("Cannot create file storage directory"
- " '%s': %s" % (shared_file_storage_dir, err),
- errors.ECODE_ENVIRON)
-
- if not os.path.isdir(shared_file_storage_dir):
- raise errors.OpPrereqError("The file storage directory '%s' is not"
- " a directory." % shared_file_storage_dir,
- errors.ECODE_ENVIRON)
- return shared_file_storage_dir
-
-
-def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913
- master_netdev, file_storage_dir, shared_file_storage_dir,
- candidate_pool_size, secondary_ip=None, vg_name=None,
- beparams=None, nicparams=None, ndparams=None, hvparams=None,
- enabled_hypervisors=None, modify_etc_hosts=True,
- modify_ssh_setup=True, maintain_node_health=False,
- drbd_helper=None, uid_pool=None, default_iallocator=None,
- primary_ip_version=None, prealloc_wipe_disks=False):
+def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
+ master_netmask, master_netdev, file_storage_dir,
+ shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
+ vg_name=None, beparams=None, nicparams=None, ndparams=None,
+ hvparams=None, diskparams=None, enabled_hypervisors=None,
+ modify_etc_hosts=True, modify_ssh_setup=True,
+ maintain_node_health=False, drbd_helper=None, uid_pool=None,
+ default_iallocator=None, primary_ip_version=None, ipolicy=None,
+ prealloc_wipe_disks=False, use_external_mip_script=False,
+ hv_state=None, disk_state=None, enabled_disk_templates=None):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
+ @type enabled_disk_templates: list of string
+ @param enabled_disk_templates: list of disk_templates to be used in this
+ cluster
"""
# TODO: complete the docstring
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
+ if not enabled_disk_templates:
+ raise errors.OpPrereqError("Enabled disk templates list must contain at"
+ " least one member", errors.ECODE_INVAL)
+ invalid_disk_templates = \
+ set(enabled_disk_templates) - constants.DISK_TEMPLATES
+ if invalid_disk_templates:
+ raise errors.OpPrereqError("Enabled disk templates list contains invalid"
+ " entries: %s" % invalid_disk_templates,
+ errors.ECODE_INVAL)
- ipcls = None
- if primary_ip_version == constants.IP4_VERSION:
- ipcls = netutils.IP4Address
- elif primary_ip_version == constants.IP6_VERSION:
- ipcls = netutils.IP6Address
- else:
+ try:
+ ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
+ except errors.ProgrammerError:
raise errors.OpPrereqError("Invalid primary ip version: %d." %
- primary_ip_version)
+ primary_ip_version, errors.ECODE_INVAL)
hostname = netutils.GetHostname(family=ipcls.family)
if not ipcls.IsValid(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
- " address." % (hostname.ip, primary_ip_version))
+ " address." % (hostname.ip, primary_ip_version),
+ errors.ECODE_INVAL)
if ipcls.IsLoopback(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
" address. Please fix DNS or %s." %
- (hostname.ip, constants.ETC_HOSTS),
+ (hostname.ip, pathutils.ETC_HOSTS),
errors.ECODE_ENVIRON)
if not ipcls.Own(hostname.ip):
" but it does not belong to this host." %
secondary_ip, errors.ECODE_ENVIRON)
+ if master_netmask is not None:
+ if not ipcls.ValidateNetmask(master_netmask):
+ raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
+ (master_netmask, primary_ip_version),
+ errors.ECODE_INVAL)
+ else:
+ master_netmask = ipcls.iplen
+
if vg_name is not None:
# Check if volume group is valid
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
curr_helper),
errors.ECODE_INVAL)
- file_storage_dir = _InitFileStorage(file_storage_dir)
- shared_file_storage_dir = _InitSharedFileStorage(shared_file_storage_dir)
+ logging.debug("Stopping daemons (if any are running)")
+ result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
+ if result.failed:
+ raise errors.OpExecError("Could not stop daemons, command %s"
+ " had exitcode %s and error '%s'" %
+ (result.cmd, result.exit_code, result.output))
+
+ if constants.ENABLE_FILE_STORAGE:
+ file_storage_dir = _InitFileStorage(file_storage_dir)
+ else:
+ file_storage_dir = ""
+
+ if constants.ENABLE_SHARED_FILE_STORAGE:
+ shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
+ else:
+ shared_file_storage_dir = ""
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
- dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
+ dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
+ objects.UpgradeBeParams(beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
+
objects.NIC.CheckParameterSyntax(nicparams)
+ full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
+
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
else:
ndparams = dict(constants.NDC_DEFAULTS)
+ # This is ugly, as we modify the dict itself
+ # FIXME: Make utils.ForceDictType pure functional or write a wrapper
+ # around it
+ if hv_state:
+ for hvname, hvs_data in hv_state.items():
+ utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
+ hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
+ else:
+ hv_state = dict((hvname, constants.HVST_DEFAULTS)
+ for hvname in enabled_hypervisors)
+
+ # FIXME: disk_state has no default values yet
+ if disk_state:
+ for storage, ds_data in disk_state.items():
+ if storage not in constants.DS_VALID_TYPES:
+ raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
+ storage, errors.ECODE_INVAL)
+ for ds_name, state in ds_data.items():
+ utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
+ ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
+
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
+ # diskparams is a mapping of disk-template->diskparams dict
+ for template, dt_params in diskparams.items():
+ param_keys = set(dt_params.keys())
+ default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
+ if not (param_keys <= default_param_keys):
+ unknown_params = param_keys - default_param_keys
+ raise errors.OpPrereqError("Invalid parameters for disk template %s:"
+ " %s" % (template,
+ utils.CommaJoin(unknown_params)),
+ errors.ECODE_INVAL)
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+ if template == constants.DT_DRBD8 and vg_name is not None:
+ # The default METAVG value is equal to the VG name set at init time,
+ # if provided
+ dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
+
+ try:
+ utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
+ except errors.OpPrereqError, err:
+ raise errors.OpPrereqError("While verify diskparam options: %s" % err,
+ errors.ECODE_INVAL)
+
# set up ssh config and /etc/hosts
- sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
- sshkey = sshline.split(" ")[1]
+ rsa_sshkey = ""
+ dsa_sshkey = ""
+ if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
+ sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
+ rsa_sshkey = sshline.split(" ")[1]
+ if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
+ sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
+ dsa_sshkey = sshline.split(" ")[1]
+ if not rsa_sshkey and not dsa_sshkey:
+ raise errors.OpPrereqError("Failed to find SSH public keys",
+ errors.ECODE_ENVIRON)
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name, hostname.ip)
# init of cluster config file
cluster_config = objects.Cluster(
serial_no=1,
- rsahostkeypub=sshkey,
+ rsahostkeypub=rsa_sshkey,
+ dsahostkeypub=dsa_sshkey,
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
mac_prefix=mac_prefix,
volume_group_name=vg_name,
tcpudp_port_pool=set(),
master_node=hostname.name,
master_ip=clustername.ip,
+ master_netmask=master_netmask,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
+ diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
default_iallocator=default_iallocator,
primary_ip_family=ipcls.family,
prealloc_wipe_disks=prealloc_wipe_disks,
+ use_external_mip_script=use_external_mip_script,
+ ipolicy=full_ipolicy,
+ hv_state_static=hv_state,
+ disk_state_static=disk_state,
+ enabled_disk_templates=enabled_disk_templates,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
- ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
+ ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
cfg.Update(cfg.GetClusterInfo(), logging.error)
- backend.WriteSsconfFiles(cfg.GetSsconfValues())
+ ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
# set up the inter-node password and certificate
_InitGanetiServerSetup(hostname.name)
logging.debug("Starting daemons")
- result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
+ result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
if result.failed:
raise errors.OpExecError("Could not start daemons, command %s"
" had exitcode %s and error %s" %
def InitConfig(version, cluster_config, master_node_config,
- cfg_file=constants.CLUSTER_CONF_FILE):
+ cfg_file=pathutils.CLUSTER_CONF_FILE):
"""Create the initial cluster configuration.
It will contain the current node, which will also be the master
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.name],
+ diskparams={},
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
nodegroups=nodegroups,
nodes=nodes,
instances={},
+ networks={},
serial_no=1,
ctime=now, mtime=now)
utils.WriteFile(cfg_file,
"""
cfg = config.ConfigWriter()
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
- result = rpc.RpcRunner.call_node_stop_master(master, True)
+ runner = rpc.BootstrapRunner()
+
+ master_params = cfg.GetMasterNetworkParameters()
+ master_params.name = master
+ ems = cfg.GetUseExternalMipScript()
+ result = runner.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
+
+ msg = result.fail_msg
+ if msg:
+ logging.warning("Could not disable the master IP: %s", msg)
+
+ result = runner.call_node_stop_master(master)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s", msg)
- result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
+
+ result = runner.call_node_leave_cluster(master, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
" the node: %s", msg)
-def SetupNodeDaemon(cluster_name, node, ssh_key_check):
+def SetupNodeDaemon(opts, cluster_name, node):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
@param cluster_name: the cluster name
@param node: the name of the new node
- @param ssh_key_check: whether to do a strict key check
"""
- family = ssconf.SimpleStore().GetPrimaryIPFamily()
- sshrunner = ssh.SshRunner(cluster_name,
- ipv6=(family == netutils.IP6Address.family))
-
- bind_address = constants.IP4_ADDRESS_ANY
- if family == netutils.IP6Address.family:
- bind_address = constants.IP6_ADDRESS_ANY
-
- # set up inter-node password and certificate and restarts the node daemon
- # and then connect with ssh to set password and start ganeti-noded
- # note that all the below variables are sanitized at this point,
- # either by being constants or by the checks above
- sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
- sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
- sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
- mycommand = ("%s stop-all; %s start %s -b %s" %
- (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
- utils.ShellQuote(bind_address)))
-
- result = sshrunner.Run(node, 'root', mycommand, batch=False,
- ask_key=ssh_key_check,
- use_cluster_key=True,
- strict_host_check=ssh_key_check)
- if result.failed:
- raise errors.OpExecError("Remote command on node %s, error: %s,"
- " output: %s" %
- (node, result.fail_reason, result.output))
+ data = {
+ constants.NDS_CLUSTER_NAME: cluster_name,
+ constants.NDS_NODE_DAEMON_CERTIFICATE:
+ utils.ReadFile(pathutils.NODED_CERT_FILE),
+ constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
+ constants.NDS_START_NODE_DAEMON: True,
+ }
+
+ RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
+ opts.debug, opts.verbose,
+ True, opts.ssh_key_check, opts.ssh_key_check, data)
_WaitForNodeDaemon(node)
" as master candidates. Only these nodes"
" can become masters. Current list of"
" master candidates is:\n"
- "%s" % ('\n'.join(mc_no_master)),
+ "%s" % ("\n".join(mc_no_master)),
errors.ECODE_STATE)
if not no_voting:
logging.info("Stopping the master daemon on node %s", old_master)
- result = rpc.RpcRunner.call_node_stop_master(old_master, True)
+ runner = rpc.BootstrapRunner()
+ master_params = cfg.GetMasterNetworkParameters()
+ master_params.name = old_master
+ ems = cfg.GetUseExternalMipScript()
+ result = runner.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
+
+ msg = result.fail_msg
+ if msg:
+ logging.warning("Could not disable the master IP: %s", msg)
+
+ result = runner.call_node_stop_master(old_master)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
- " %s, please disable manually: %s", old_master, msg)
+ " %s, please disable manually: %s", old_master, msg)
logging.info("Checking master IP non-reachability...")
master_ip = sstore.GetMasterIP()
total_timeout = 30
+
# Here we have a phase where no master should be running
def _check_ip():
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
logging.info("Starting the master daemons on the new master")
- result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
+ result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
+ no_voting)
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
if not node_list:
# no nodes left (eventually after removing myself)
return []
- results = rpc.RpcRunner.call_master_info(node_list)
+ results = rpc.BootstrapRunner().call_master_info(node_list)
if not isinstance(results, dict):
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
if msg:
logging.warning("Error contacting node %s: %s", node, msg)
fail = True
- # for now we accept both length 3 and 4 (data[3] is primary ip version)
+ # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
+ # and data[4] is the master netmask)
elif not isinstance(data, (tuple, list)) or len(data) < 3:
logging.warning("Invalid data received from node %s: %s", node, data)
fail = True