import sha
import re
import logging
+import tempfile
from ganeti import rpc
from ganeti import ssh
from ganeti import objects
from ganeti import ssconf
-def _InitSSHSetup(node):
- """Setup the SSH configuration for the cluster.
+def _InitSSHSetup():
+ """Setup the SSH configuration for the cluster.
This generates a dsa keypair for root, adds the pub key to the
permitted hosts and adds the hostkey to its own known hosts.
- Args:
- node: the name of this host as a fqdn
-
"""
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
f.close()
+def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
+ """Generates a self-signed SSL certificate.
+
+ @type file_name: str
+ @param file_name: Path to output file
+ @type validity: int
+ @param validity: Validity for certificate in days
+
+ """
+ (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
+ try:
+ # Set permissions before writing key
+ os.chmod(tmp_file_name, 0600)
+
+ result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
+ "-days", str(validity), "-nodes", "-x509",
+ "-keyout", tmp_file_name, "-out", tmp_file_name,
+ "-batch"])
+ if result.failed:
+ raise errors.OpExecError("Could not generate SSL certificate, command"
+ " %s had exitcode %s and error message %s" %
+ (result.cmd, result.exit_code, result.output))
+
+ # Make read-only
+ os.chmod(tmp_file_name, 0400)
+
+ os.rename(tmp_file_name, file_name)
+ finally:
+ utils.RemoveFile(tmp_file_name)
+
+
def _InitGanetiServerSetup():
"""Setup the necessary configuration for the initial node daemon.
the cluster and also generates the SSL certificate.
"""
- # Create pseudo random password
- randpass = utils.GenerateSecret()
-
- # and write it into the config file
- utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
- data="%s\n" % randpass, mode=0400)
-
- result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
- "-days", str(365*5), "-nodes", "-x509",
- "-keyout", constants.SSL_CERT_FILE,
- "-out", constants.SSL_CERT_FILE, "-batch"])
- if result.failed:
- raise errors.OpExecError("could not generate server ssl cert, command"
- " %s had exitcode %s and error message %s" %
- (result.cmd, result.exit_code, result.output))
+ _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
- os.chmod(constants.SSL_CERT_FILE, 0400)
+ # Don't overwrite existing file
+ if not os.path.exists(constants.RAPI_CERT_FILE):
+ _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
def InitCluster(cluster_name, mac_prefix, def_bridge,
- master_netdev, file_storage_dir,
- secondary_ip=None,
- vg_name=None, beparams=None, hvparams=None,
+ master_netdev, file_storage_dir, candidate_pool_size,
+ secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
enabled_hypervisors=None, default_hypervisor=None):
"""Initialise the cluster.
+ @type candidate_pool_size: int
+ @param candidate_pool_size: master candidate pool size
+
"""
+ # TODO: complete the docstring
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised")
raise errors.OpPrereqError("Init.d script '%s' missing or not"
" executable." % constants.NODE_INITD_SCRIPT)
+ utils.CheckBEParams(beparams)
+
# set up the inter-node password and certificate
_InitGanetiServerSetup()
sshkey = sshline.split(" ")[1]
utils.AddHostToEtcHosts(hostname.name)
- _InitSSHSetup(hostname.name)
+ _InitSSHSetup()
# init of cluster config file
cluster_config = objects.Cluster(
default_hypervisor=default_hypervisor,
beparams={constants.BEGR_DEFAULT: beparams},
hvparams=hvparams,
+ candidate_pool_size=candidate_pool_size,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
- secondary_ip=secondary_ip)
-
- cfg = InitConfig(constants.CONFIG_VERSION,
- cluster_config, master_node_config)
- ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
+ secondary_ip=secondary_ip,
+ serial_no=1,
+ master_candidate=True,
+ offline=False, drained=False,
+ )
+
+ sscfg = InitConfig(constants.CONFIG_VERSION,
+ cluster_config, master_node_config)
+ ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
+ cfg = config.ConfigWriter()
+ cfg.Update(cfg.GetClusterInfo())
# start the master ip
# TODO: Review rpc call from bootstrap
node, and no instances.
@type version: int
- @param version: Configuration version
- @type cluster_config: objects.Cluster
- @param cluster_config: Cluster configuration
- @type master_node_config: objects.Node
- @param master_node_config: Master node configuration
- @type file_name: string
- @param file_name: Configuration file path
+ @param version: configuration version
+ @type cluster_config: L{objects.Cluster}
+ @param cluster_config: cluster configuration
+ @type master_node_config: L{objects.Node}
+ @param master_node_config: master node configuration
+ @type cfg_file: string
+ @param cfg_file: configuration file path
- @rtype: ssconf.SimpleConfigWriter
- @returns: Initialized config instance
+ @rtype: L{ssconf.SimpleConfigWriter}
+ @returns: initialized config instance
"""
nodes = {
begun in cmdlib.LUDestroyOpcode.
"""
- if not rpc.RpcRunner.call_node_stop_master(master, True):
+ result = rpc.RpcRunner.call_node_stop_master(master, True)
+ if result.failed or not result.data:
logging.warning("Could not disable the master role")
- if not rpc.RpcRunner.call_node_leave_cluster(master):
+ result = rpc.RpcRunner.call_node_leave_cluster(master)
+ if result.failed or not result.data:
logging.warning("Could not shutdown the node daemon and cleanup the node")
-def SetupNodeDaemon(node, ssh_key_check):
+def SetupNodeDaemon(cluster_name, node, ssh_key_check):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
to the remote node, copy the needed files, and start ganeti-noded,
allowing the master to do the rest via normal rpc calls.
- Args:
- node: fully qualified domain name for the new node
+ @param cluster_name: the cluster name
+ @param node: the name of the new node
+ @param ssh_key_check: whether to do a strict key check
"""
- cfg = ssconf.SimpleConfigReader()
- sshrunner = ssh.SshRunner(cfg.GetClusterName())
- gntpass = utils.GetNodeDaemonPassword()
- if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
- raise errors.OpExecError("ganeti password corruption detected")
- f = open(constants.SSL_CERT_FILE)
- try:
- gntpem = f.read(8192)
- finally:
- f.close()
+ sshrunner = ssh.SshRunner(cluster_name)
+
+ noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
+ rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
+
# in the base64 pem encoding, neither '!' nor '.' are valid chars,
# so we use this to detect an invalid certificate; as long as the
# cert doesn't contain this, the here-document will be correctly
# parsed by the shell sequence below
- if re.search('^!EOF\.', gntpem, re.MULTILINE):
+ if (re.search('^!EOF\.', noded_cert, re.MULTILINE) or
+ re.search('^!EOF\.', rapi_cert, re.MULTILINE)):
raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
- if not gntpem.endswith("\n"):
- raise errors.OpExecError("PEM must end with newline")
+
+ if not noded_cert.endswith("\n"):
+ noded_cert += "\n"
+ if not rapi_cert.endswith("\n"):
+ rapi_cert += "\n"
# set up inter-node password and certificate and restarts the node daemon
# and then connect with ssh to set password and start ganeti-noded
# note that all the below variables are sanitized at this point,
# either by being constants or by the checks above
mycommand = ("umask 077 && "
- "echo '%s' > '%s' && "
"cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n%s restart" %
- (gntpass, constants.CLUSTER_PASSWORD_FILE,
- constants.SSL_CERT_FILE, gntpem,
+ "%s!EOF.\n"
+ "cat > '%s' << '!EOF.' && \n"
+ "%s!EOF.\n"
+ "chmod 0400 %s %s && "
+ "%s restart" %
+ (constants.SSL_CERT_FILE, noded_cert,
+ constants.RAPI_CERT_FILE, rapi_cert,
+ constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
constants.NODE_INITD_SCRIPT))
result = sshrunner.Run(node, 'root', mycommand, batch=False,
" output: %s" %
(node, result.fail_reason, result.output))
- return 0
-
def MasterFailover():
"""Failover the master node.
new master.
"""
- cfg = ssconf.SimpleConfigWriter()
+ sstore = ssconf.SimpleStore()
- new_master = utils.HostInfo().name
- old_master = cfg.GetMasterNode()
- node_list = cfg.GetNodeList()
+ old_master, new_master = ssconf.GetMasterAndMyself(sstore)
+ node_list = sstore.GetNodeList()
+ mc_list = sstore.GetMasterCandidates()
if old_master == new_master:
raise errors.OpPrereqError("This commands must be run on the node"
" %s is already the master" %
old_master)
+ if new_master not in mc_list:
+ mc_no_master = [name for name in mc_list if name != old_master]
+ raise errors.OpPrereqError("This node is not among the nodes marked"
+ " as master candidates. Only these nodes"
+ " can become masters. Current list of"
+ " master candidates is:\n"
+ "%s" % ('\n'.join(mc_no_master)))
+
vote_list = GatherMasterVotes(node_list)
if vote_list:
logging.info("Setting master to %s, old master: %s", new_master, old_master)
- if not rpc.RpcRunner.call_node_stop_master(old_master, True):
+ result = rpc.RpcRunner.call_node_stop_master(old_master, True)
+ if result.failed or not result.data:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually", old_master)
- cfg.SetMasterNode(new_master)
- cfg.Save()
-
# Here we have a phase where no master should be running
- if not rpc.RpcRunner.call_upload_file(cfg.GetNodeList(),
- constants.CLUSTER_CONF_FILE):
- logging.error("Could not distribute the new configuration"
- " to the other nodes, please check.")
+ # instantiate a real config writer, as we now know we have the
+ # configuration data
+ cfg = config.ConfigWriter()
+ cluster_info = cfg.GetClusterInfo()
+ cluster_info.master_node = new_master
+ # this will also regenerate the ssconf files, since we updated the
+ # cluster info
+ cfg.Update(cluster_info)
- if not rpc.RpcRunner.call_node_start_master(new_master, True):
+ result = rpc.RpcRunner.call_node_start_master(new_master, True)
+ if result.failed or not result.data:
logging.error("Could not start the master role on the new master"
" %s, please check", new_master)
rcode = 1
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
return [(None, len(node_list))]
- positive = negative = 0
- other_masters = {}
votes = {}
for node in results:
- if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
+ nres = results[node]
+ data = nres.data
+ if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
# here the rpc layer should have already logged errors
if None not in votes:
votes[None] = 0
votes[None] += 1
continue
- master_node = results[node][2]
+ master_node = data[2]
if master_node not in votes:
votes[master_node] = 0
votes[master_node] += 1