import os
import os.path
-import sha
import re
import logging
+import tempfile
from ganeti import rpc
from ganeti import ssh
from ganeti import constants
from ganeti import objects
from ganeti import ssconf
+from ganeti import hypervisor
-def _InitSSHSetup(node):
+def _InitSSHSetup():
"""Setup the SSH configuration for the cluster.
-
This generates a dsa keypair for root, adds the pub key to the
permitted hosts and adds the hostkey to its own known hosts.
- Args:
- node: the name of this host as a fqdn
-
"""
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
f.close()
+def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
+ """Generates a self-signed SSL certificate.
+
+ @type file_name: str
+ @param file_name: Path to output file
+ @type validity: int
+ @param validity: Validity for certificate in days
+
+ """
+ (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
+ try:
+ # Set permissions before writing key
+ os.chmod(tmp_file_name, 0600)
+
+ result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
+ "-days", str(validity), "-nodes", "-x509",
+ "-keyout", tmp_file_name, "-out", tmp_file_name,
+ "-batch"])
+ if result.failed:
+ raise errors.OpExecError("Could not generate SSL certificate, command"
+ " %s had exitcode %s and error message %s" %
+ (result.cmd, result.exit_code, result.output))
+
+ # Make read-only
+ os.chmod(tmp_file_name, 0400)
+
+ os.rename(tmp_file_name, file_name)
+ finally:
+ utils.RemoveFile(tmp_file_name)
+
+
def _InitGanetiServerSetup():
"""Setup the necessary configuration for the initial node daemon.
the cluster and also generates the SSL certificate.
"""
- # Create pseudo random password
- randpass = utils.GenerateSecret()
-
- # and write it into the config file
- utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
- data="%s\n" % randpass, mode=0400)
+ _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
- result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
- "-days", str(365*5), "-nodes", "-x509",
- "-keyout", constants.SSL_CERT_FILE,
- "-out", constants.SSL_CERT_FILE, "-batch"])
- if result.failed:
- raise errors.OpExecError("could not generate server ssl cert, command"
- " %s had exitcode %s and error message %s" %
- (result.cmd, result.exit_code, result.output))
-
- os.chmod(constants.SSL_CERT_FILE, 0400)
+ # Don't overwrite existing file
+ if not os.path.exists(constants.RAPI_CERT_FILE):
+ _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
(result.cmd, result.exit_code, result.output))
-def InitCluster(cluster_name, hypervisor_type, mac_prefix, def_bridge,
- master_netdev, file_storage_dir,
- secondary_ip=None,
- vg_name=None):
+def InitCluster(cluster_name, mac_prefix, def_bridge,
+ master_netdev, file_storage_dir, candidate_pool_size,
+ secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
+ enabled_hypervisors=None, default_hypervisor=None,
+ modify_etc_hosts=True):
"""Initialise the cluster.
+ @type candidate_pool_size: int
+ @param candidate_pool_size: master candidate pool size
+
"""
+ # TODO: complete the docstring
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised")
- if hypervisor_type == constants.HT_XEN_HVM31:
- if not os.path.exists(constants.VNC_PASSWORD_FILE):
- raise errors.OpPrereqError("Please prepare the cluster VNC"
- "password file %s" %
- constants.VNC_PASSWORD_FILE)
-
hostname = utils.HostInfo()
if hostname.ip.startswith("127."):
" range (%s). Please fix DNS or %s." %
(hostname.ip, constants.ETC_HOSTS))
- if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
- source=constants.LOCALHOST_IP_ADDRESS):
+ if not utils.OwnIpAddress(hostname.ip):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host."
if not utils.IsValidIP(secondary_ip):
raise errors.OpPrereqError("Invalid secondary ip given")
if (secondary_ip != hostname.ip and
- (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
- source=constants.LOCALHOST_IP_ADDRESS))):
+ not utils.OwnIpAddress(secondary_ip)):
raise errors.OpPrereqError("You gave %s as secondary IP,"
" but it does not belong to this host." %
secondary_ip)
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
- if hypervisor_type not in constants.HYPER_TYPES:
- raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
- hypervisor_type)
-
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
raise errors.OpPrereqError("Init.d script '%s' missing or not"
" executable." % constants.NODE_INITD_SCRIPT)
+ dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
+ utils.EnsureDirs(dirs)
+
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+ # hvparams is a mapping of hypervisor->hvparams dict
+ for hv_name, hv_params in hvparams.iteritems():
+ utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+ hv_class = hypervisor.GetHypervisor(hv_name)
+ hv_class.CheckParameterSyntax(hv_params)
+
# set up the inter-node password and certificate
_InitGanetiServerSetup()
f.close()
sshkey = sshline.split(" ")[1]
- utils.AddHostToEtcHosts(hostname.name)
- _InitSSHSetup(hostname.name)
+ if modify_etc_hosts:
+ utils.AddHostToEtcHosts(hostname.name)
+
+ _InitSSHSetup()
# init of cluster config file
cluster_config = objects.Cluster(
volume_group_name=vg_name,
default_bridge=def_bridge,
tcpudp_port_pool=set(),
- hypervisor=hypervisor_type,
master_node=hostname.name,
master_ip=clustername.ip,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
+ enabled_hypervisors=enabled_hypervisors,
+ default_hypervisor=default_hypervisor,
+ beparams={constants.BEGR_DEFAULT: beparams},
+ hvparams=hvparams,
+ candidate_pool_size=candidate_pool_size,
+ modify_etc_hosts=opts.modify_etc_hosts,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
- secondary_ip=secondary_ip)
+ secondary_ip=secondary_ip,
+ serial_no=1,
+ master_candidate=True,
+ offline=False, drained=False,
+ )
+
+ sscfg = InitConfig(constants.CONFIG_VERSION,
+ cluster_config, master_node_config)
+ ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
cfg = config.ConfigWriter()
- cfg.InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
-
- ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
+ cfg.Update(cfg.GetClusterInfo())
# start the master ip
# TODO: Review rpc call from bootstrap
- rpc.call_node_start_master(hostname.name, True)
+ rpc.RpcRunner.call_node_start_master(hostname.name, True)
+
+
+def InitConfig(version, cluster_config, master_node_config,
+ cfg_file=constants.CLUSTER_CONF_FILE):
+ """Create the initial cluster configuration.
+
+ It will contain the current node, which will also be the master
+ node, and no instances.
+
+ @type version: int
+ @param version: configuration version
+ @type cluster_config: L{objects.Cluster}
+ @param cluster_config: cluster configuration
+ @type master_node_config: L{objects.Node}
+ @param master_node_config: master node configuration
+ @type cfg_file: string
+ @param cfg_file: configuration file path
+
+ @rtype: L{ssconf.SimpleConfigWriter}
+ @return: initialized config instance
+
+ """
+ nodes = {
+ master_node_config.name: master_node_config,
+ }
+
+ config_data = objects.ConfigData(version=version,
+ cluster=cluster_config,
+ nodes=nodes,
+ instances={},
+ serial_no=1)
+ cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
+ cfg.Save()
+
+ return cfg
def FinalizeClusterDestroy(master):
begun in cmdlib.LUDestroyOpcode.
"""
- if not rpc.call_node_stop_master(master, True):
+ result = rpc.RpcRunner.call_node_stop_master(master, True)
+ if result.failed or not result.data:
logging.warning("Could not disable the master role")
- if not rpc.call_node_leave_cluster(master):
+ result = rpc.RpcRunner.call_node_leave_cluster(master)
+ if result.failed or not result.data:
logging.warning("Could not shutdown the node daemon and cleanup the node")
-def SetupNodeDaemon(node, ssh_key_check):
+def SetupNodeDaemon(cluster_name, node, ssh_key_check):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
to the remote node, copy the needed files, and start ganeti-noded,
allowing the master to do the rest via normal rpc calls.
- Args:
- node: fully qualified domain name for the new node
+ @param cluster_name: the cluster name
+ @param node: the name of the new node
+ @param ssh_key_check: whether to do a strict key check
"""
- cfg = ssconf.SimpleConfigReader()
- sshrunner = ssh.SshRunner(cfg)
- gntpass = utils.GetNodeDaemonPassword()
- if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
- raise errors.OpExecError("ganeti password corruption detected")
- f = open(constants.SSL_CERT_FILE)
- try:
- gntpem = f.read(8192)
- finally:
- f.close()
+ sshrunner = ssh.SshRunner(cluster_name)
+
+ noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
+ rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
+
# in the base64 pem encoding, neither '!' nor '.' are valid chars,
# so we use this to detect an invalid certificate; as long as the
# cert doesn't contain this, the here-document will be correctly
# parsed by the shell sequence below
- if re.search('^!EOF\.', gntpem, re.MULTILINE):
+ if (re.search('^!EOF\.', noded_cert, re.MULTILINE) or
+ re.search('^!EOF\.', rapi_cert, re.MULTILINE)):
raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
- if not gntpem.endswith("\n"):
- raise errors.OpExecError("PEM must end with newline")
+
+ if not noded_cert.endswith("\n"):
+ noded_cert += "\n"
+ if not rapi_cert.endswith("\n"):
+ rapi_cert += "\n"
# set up inter-node password and certificate and restarts the node daemon
# and then connect with ssh to set password and start ganeti-noded
# note that all the below variables are sanitized at this point,
# either by being constants or by the checks above
mycommand = ("umask 077 && "
- "echo '%s' > '%s' && "
"cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n%s restart" %
- (gntpass, constants.CLUSTER_PASSWORD_FILE,
- constants.SSL_CERT_FILE, gntpem,
+ "%s!EOF.\n"
+ "cat > '%s' << '!EOF.' && \n"
+ "%s!EOF.\n"
+ "chmod 0400 %s %s && "
+ "%s restart" %
+ (constants.SSL_CERT_FILE, noded_cert,
+ constants.RAPI_CERT_FILE, rapi_cert,
+ constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
constants.NODE_INITD_SCRIPT))
result = sshrunner.Run(node, 'root', mycommand, batch=False,
" output: %s" %
(node, result.fail_reason, result.output))
- return 0
-
def MasterFailover():
"""Failover the master node.
new master.
"""
- cfg = ssconf.SimpleConfigWriter()
+ sstore = ssconf.SimpleStore()
- new_master = utils.HostInfo().name
- old_master = cfg.GetMasterNode()
+ old_master, new_master = ssconf.GetMasterAndMyself(sstore)
+ node_list = sstore.GetNodeList()
+ mc_list = sstore.GetMasterCandidates()
if old_master == new_master:
raise errors.OpPrereqError("This commands must be run on the node"
" where you want the new master to be."
" %s is already the master" %
old_master)
+
+ if new_master not in mc_list:
+ mc_no_master = [name for name in mc_list if name != old_master]
+ raise errors.OpPrereqError("This node is not among the nodes marked"
+ " as master candidates. Only these nodes"
+ " can become masters. Current list of"
+ " master candidates is:\n"
+ "%s" % ('\n'.join(mc_no_master)))
+
+ vote_list = GatherMasterVotes(node_list)
+
+ if vote_list:
+ voted_master = vote_list[0][0]
+ if voted_master is None:
+ raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
+ " respond.")
+ elif voted_master != old_master:
+ raise errors.OpPrereqError("I have wrong configuration, I believe the"
+ " master is %s but the other nodes voted for"
+ " %s. Please resync the configuration of"
+ " this node." % (old_master, voted_master))
# end checks
rcode = 0
- logging.info("setting master to %s, old master: %s", new_master, old_master)
+ logging.info("Setting master to %s, old master: %s", new_master, old_master)
- if not rpc.call_node_stop_master(old_master, True):
- logging.error("could disable the master role on the old master"
+ result = rpc.RpcRunner.call_node_stop_master(old_master, True)
+ if result.failed or not result.data:
+ logging.error("Could not disable the master role on the old master"
" %s, please disable manually", old_master)
- cfg.SetMasterNode(new_master)
- cfg.Save()
-
# Here we have a phase where no master should be running
- if not rpc.call_upload_file(cfg.GetNodeList(),
- constants.CLUSTER_CONF_FILE):
- logging.error("could not distribute the new simple store master file"
- " to the other nodes, please check.")
+ # instantiate a real config writer, as we now know we have the
+ # configuration data
+ cfg = config.ConfigWriter()
- if not rpc.call_node_start_master(new_master, True):
- logging.error("could not start the master role on the new master"
+ cluster_info = cfg.GetClusterInfo()
+ cluster_info.master_node = new_master
+ # this will also regenerate the ssconf files, since we updated the
+ # cluster info
+ cfg.Update(cluster_info)
+
+ result = rpc.RpcRunner.call_node_start_master(new_master, True)
+ if result.failed or not result.data:
+ logging.error("Could not start the master role on the new master"
" %s, please check", new_master)
rcode = 1
return rcode
+
+
+def GetMaster():
+ """Returns the current master node.
+
+ This is a separate function in bootstrap since it's needed by
+ gnt-cluster, and instead of importing directly ssconf, it's better
+ to abstract it in bootstrap, where we do use ssconf in other
+ functions too.
+
+ """
+ sstore = ssconf.SimpleStore()
+
+ old_master, _ = ssconf.GetMasterAndMyself(sstore)
+
+ return old_master
+
+
+def GatherMasterVotes(node_list):
+ """Check the agreement on who is the master.
+
+ This function will return a list of (node, number of votes), ordered
+ by the number of votes. Errors will be denoted by the key 'None'.
+
+ Note that the sum of votes is the number of nodes this machine
+ knows, whereas the number of entries in the list could be different
+ (if some nodes vote for another master).
+
+ We remove ourselves from the list since we know that (bugs aside)
+ since we use the same source for configuration information for both
+ backend and boostrap, we'll always vote for ourselves.
+
+ @type node_list: list
+ @param node_list: the list of nodes to query for master info; the current
+ node wil be removed if it is in the list
+ @rtype: list
+ @return: list of (node, votes)
+
+ """
+ myself = utils.HostInfo().name
+ try:
+ node_list.remove(myself)
+ except ValueError:
+ pass
+ if not node_list:
+ # no nodes left (eventually after removing myself)
+ return []
+ results = rpc.RpcRunner.call_master_info(node_list)
+ if not isinstance(results, dict):
+ # this should not happen (unless internal error in rpc)
+ logging.critical("Can't complete rpc call, aborting master startup")
+ return [(None, len(node_list))]
+ votes = {}
+ for node in results:
+ nres = results[node]
+ data = nres.data
+ if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
+ # here the rpc layer should have already logged errors
+ if None not in votes:
+ votes[None] = 0
+ votes[None] += 1
+ continue
+ master_node = data[2]
+ if master_node not in votes:
+ votes[master_node] = 0
+ votes[master_node] += 1
+
+ vote_list = [v for v in votes.items()]
+ # sort first on number of votes then on name, since we want None
+ # sorted later if we have the half of the nodes not responding, and
+ # half voting all for the same master
+ vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
+
+ return vote_list