import os.path
import sha
import re
+import logging
from ganeti import rpc
from ganeti import ssh
from ganeti import errors
from ganeti import config
from ganeti import constants
+from ganeti import objects
from ganeti import ssconf
+from ganeti.rpc import RpcRunner
def _InitSSHSetup(node):
"""Setup the SSH configuration for the cluster.
f.close()
-def _InitGanetiServerSetup(ss):
+def _InitGanetiServerSetup():
"""Setup the necessary configuration for the initial node daemon.
This creates the nodepass file containing the shared password for
"""
# Create pseudo random password
- randpass = sha.new(os.urandom(64)).hexdigest()
- # and write it into sstore
- ss.SetKey(ss.SS_NODED_PASS, randpass)
+ randpass = utils.GenerateSecret()
+
+ # and write it into the config file
+ utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
+ data="%s\n" % randpass, mode=0400)
result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
"-days", str(365*5), "-nodes", "-x509",
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised")
- if hypervisor_type == constants.HT_XEN_HVM31:
+ if hypervisor_type == constants.HT_XEN_HVM:
if not os.path.exists(constants.VNC_PASSWORD_FILE):
raise errors.OpPrereqError("Please prepare the cluster VNC"
"password file %s" %
" range (%s). Please fix DNS or %s." %
(hostname.ip, constants.ETC_HOSTS))
- if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
- source=constants.LOCALHOST_IP_ADDRESS):
+ if not utils.OwnIpAddress(hostname.ip):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host."
if not utils.IsValidIP(secondary_ip):
raise errors.OpPrereqError("Invalid secondary ip given")
if (secondary_ip != hostname.ip and
- (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
- source=constants.LOCALHOST_IP_ADDRESS))):
+ not utils.OwnIpAddress(secondary_ip)):
raise errors.OpPrereqError("You gave %s as secondary IP,"
" but it does not belong to this host." %
secondary_ip)
+ else:
+ secondary_ip = hostname.ip
if vg_name is not None:
# Check if volume group is valid
raise errors.OpPrereqError("Init.d script '%s' missing or not"
" executable." % constants.NODE_INITD_SCRIPT)
- # set up the simple store
- ss = ssconf.SimpleStore()
- ss.SetKey(ss.SS_HYPERVISOR, hypervisor_type)
- ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
- ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
- ss.SetKey(ss.SS_MASTER_NETDEV, master_netdev)
- ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
- ss.SetKey(ss.SS_FILE_STORAGE_DIR, file_storage_dir)
- ss.SetKey(ss.SS_CONFIG_VERSION, constants.CONFIG_VERSION)
-
# set up the inter-node password and certificate
- _InitGanetiServerSetup(ss)
-
- # start the master ip
- # TODO: Review rpc call from bootstrap
- rpc.call_node_start_master(hostname.name)
+ _InitGanetiServerSetup()
# set up ssh config and /etc/hosts
f = open(constants.SSH_HOST_RSA_PUB, 'r')
_InitSSHSetup(hostname.name)
# init of cluster config file
- cfg = config.ConfigWriter()
- cfg.InitConfig(hostname.name, hostname.ip, secondary_ip, sshkey,
- mac_prefix, vg_name, def_bridge)
+ cluster_config = objects.Cluster(
+ serial_no=1,
+ rsahostkeypub=sshkey,
+ highest_used_port=(constants.FIRST_DRBD_PORT - 1),
+ mac_prefix=mac_prefix,
+ volume_group_name=vg_name,
+ default_bridge=def_bridge,
+ tcpudp_port_pool=set(),
+ hypervisor=hypervisor_type,
+ master_node=hostname.name,
+ master_ip=clustername.ip,
+ master_netdev=master_netdev,
+ cluster_name=clustername.name,
+ file_storage_dir=file_storage_dir,
+ )
+ master_node_config = objects.Node(name=hostname.name,
+ primary_ip=hostname.ip,
+ secondary_ip=secondary_ip)
+
+ cfg = InitConfig(constants.CONFIG_VERSION,
+ cluster_config, master_node_config)
+ ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
+
+ # start the master ip
+ # TODO: Review rpc call from bootstrap
+ RpcRunner.call_node_start_master(hostname.name, True)
+
+
+def InitConfig(version, cluster_config, master_node_config,
+ cfg_file=constants.CLUSTER_CONF_FILE):
+ """Create the initial cluster configuration.
+
+ It will contain the current node, which will also be the master
+ node, and no instances.
+
+ @type version: int
+ @param version: Configuration version
+ @type cluster_config: objects.Cluster
+ @param cluster_config: Cluster configuration
+ @type master_node_config: objects.Node
+ @param master_node_config: Master node configuration
+ @type file_name: string
+ @param file_name: Configuration file path
+
+ @rtype: ssconf.SimpleConfigWriter
+ @returns: Initialized config instance
+
+ """
+ nodes = {
+ master_node_config.name: master_node_config,
+ }
+
+ config_data = objects.ConfigData(version=version,
+ cluster=cluster_config,
+ nodes=nodes,
+ instances={},
+ serial_no=1)
+ cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
+ cfg.Save()
+
+ return cfg
+
+
+def FinalizeClusterDestroy(master):
+ """Execute the last steps of cluster destroy
+
+ This function shuts down all the daemons, completing the destroy
+ begun in cmdlib.LUDestroyOpcode.
+
+ """
+ if not RpcRunner.call_node_stop_master(master, True):
+ logging.warning("Could not disable the master role")
+ if not RpcRunner.call_node_leave_cluster(master):
+ logging.warning("Could not shutdown the node daemon and cleanup the node")
+
+
+def SetupNodeDaemon(node, ssh_key_check):
+ """Add a node to the cluster.
+
+ This function must be called before the actual opcode, and will ssh
+ to the remote node, copy the needed files, and start ganeti-noded,
+ allowing the master to do the rest via normal rpc calls.
+
+ Args:
+ node: fully qualified domain name for the new node
+
+ """
+ cfg = ssconf.SimpleConfigReader()
+ sshrunner = ssh.SshRunner(cfg.GetClusterName())
+ gntpass = utils.GetNodeDaemonPassword()
+ if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
+ raise errors.OpExecError("ganeti password corruption detected")
+ f = open(constants.SSL_CERT_FILE)
+ try:
+ gntpem = f.read(8192)
+ finally:
+ f.close()
+ # in the base64 pem encoding, neither '!' nor '.' are valid chars,
+ # so we use this to detect an invalid certificate; as long as the
+ # cert doesn't contain this, the here-document will be correctly
+ # parsed by the shell sequence below
+ if re.search('^!EOF\.', gntpem, re.MULTILINE):
+ raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
+ if not gntpem.endswith("\n"):
+ raise errors.OpExecError("PEM must end with newline")
+
+ # set up inter-node password and certificate and restarts the node daemon
+ # and then connect with ssh to set password and start ganeti-noded
+ # note that all the below variables are sanitized at this point,
+ # either by being constants or by the checks above
+ mycommand = ("umask 077 && "
+ "echo '%s' > '%s' && "
+ "cat > '%s' << '!EOF.' && \n"
+ "%s!EOF.\n%s restart" %
+ (gntpass, constants.CLUSTER_PASSWORD_FILE,
+ constants.SSL_CERT_FILE, gntpem,
+ constants.NODE_INITD_SCRIPT))
+
+ result = sshrunner.Run(node, 'root', mycommand, batch=False,
+ ask_key=ssh_key_check,
+ use_cluster_key=False,
+ strict_host_check=ssh_key_check)
+ if result.failed:
+ raise errors.OpExecError("Remote command on node %s, error: %s,"
+ " output: %s" %
+ (node, result.fail_reason, result.output))
+
+ return 0
+
+
+def MasterFailover():
+ """Failover the master node.
+
+ This checks that we are not already the master, and will cause the
+ current master to cease being master, and the non-master to become
+ new master.
+
+ """
+ cfg = ssconf.SimpleConfigWriter()
+
+ new_master = utils.HostInfo().name
+ old_master = cfg.GetMasterNode()
+ node_list = cfg.GetNodeList()
+
+ if old_master == new_master:
+ raise errors.OpPrereqError("This commands must be run on the node"
+ " where you want the new master to be."
+ " %s is already the master" %
+ old_master)
+
+ vote_list = GatherMasterVotes(node_list)
+
+ if vote_list:
+ voted_master = vote_list[0][0]
+ if voted_master is None:
+ raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
+ " respond.")
+ elif voted_master != old_master:
+ raise errors.OpPrereqError("I have wrong configuration, I believe the"
+ " master is %s but the other nodes voted for"
+ " %s. Please resync the configuration of"
+ " this node." % (old_master, voted_master))
+ # end checks
+
+ rcode = 0
+
+ logging.info("Setting master to %s, old master: %s", new_master, old_master)
- ssh.WriteKnownHostsFile(cfg, ss, constants.SSH_KNOWN_HOSTS_FILE)
+ if not RpcRunner.call_node_stop_master(old_master, True):
+ logging.error("Could not disable the master role on the old master"
+ " %s, please disable manually", old_master)
+
+ cfg.SetMasterNode(new_master)
+ cfg.Save()
+
+ # Here we have a phase where no master should be running
+
+ if not RpcRunner.call_upload_file(cfg.GetNodeList(),
+ constants.CLUSTER_CONF_FILE):
+ logging.error("Could not distribute the new configuration"
+ " to the other nodes, please check.")
+
+
+ if not RpcRunner.call_node_start_master(new_master, True):
+ logging.error("Could not start the master role on the new master"
+ " %s, please check", new_master)
+ rcode = 1
+
+ return rcode
+
+
+def GatherMasterVotes(node_list):
+ """Check the agreement on who is the master.
+
+ This function will return a list of (node, number of votes), ordered
+ by the number of votes. Errors will be denoted by the key 'None'.
+
+ Note that the sum of votes is the number of nodes this machine
+ knows, whereas the number of entries in the list could be different
+ (if some nodes vote for another master).
+
+ We remove ourselves from the list since we know that (bugs aside)
+ since we use the same source for configuration information for both
+ backend and boostrap, we'll always vote for ourselves.
+
+ @type node_list: list
+ @param node_list: the list of nodes to query for master info; the current
+ node wil be removed if it is in the list
+ @rtype: list
+ @return: list of (node, votes)
+
+ """
+ myself = utils.HostInfo().name
+ try:
+ node_list.remove(myself)
+ except ValueError:
+ pass
+ if not node_list:
+ # no nodes left (eventually after removing myself)
+ return []
+ results = rpc.RpcRunner.call_master_info(node_list)
+ if not isinstance(results, dict):
+ # this should not happen (unless internal error in rpc)
+ logging.critical("Can't complete rpc call, aborting master startup")
+ return [(None, len(node_list))]
+ positive = negative = 0
+ other_masters = {}
+ votes = {}
+ for node in results:
+ if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
+ # here the rpc layer should have already logged errors
+ if None not in votes:
+ votes[None] = 0
+ votes[None] += 1
+ continue
+ master_node = results[node][2]
+ if master_node not in votes:
+ votes[master_node] = 0
+ votes[master_node] += 1
+
+ vote_list = [v for v in votes.items()]
+ # sort first on number of votes then on name, since we want None
+ # sorted later if we have the half of the nodes not responding, and
+ # half voting all for the same master
+ vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
+
+ return vote_list