from ganeti import bdev
from ganeti import netutils
from ganeti import backend
+from ganeti import luxi
+
# ec_id for InitConfig's temporary reservation manager
_INITCONF_ECID = "initconfig-ecid"
+#: After how many seconds daemon must be responsive
+_DAEMON_READY_TIMEOUT = 10.0
+
def _InitSSHSetup():
"""Setup the SSH configuration for the cluster.
raise utils.RetryAgain()
try:
- utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
+ utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Node daemon on %s didn't answer queries within"
- " 10 seconds" % node_name)
+ " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
+
+
+def _WaitForMasterDaemon():
+ """Wait for master daemon to become responsive.
+
+ """
+ def _CheckMasterDaemon():
+ try:
+ cl = luxi.Client()
+ (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
+ except Exception:
+ raise utils.RetryAgain()
+
+ logging.debug("Received cluster name %s from master", cluster_name)
+
+ try:
+ utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
+ except utils.RetryTimeout:
+ raise errors.OpExecError("Master daemon didn't answer queries within"
+ " %s seconds" % _DAEMON_READY_TIMEOUT)
def _InitFileStorage(file_storage_dir):
return file_storage_dir
-#pylint: disable-msg=R0913
-def InitCluster(cluster_name, mac_prefix,
+def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913
master_netdev, file_storage_dir, candidate_pool_size,
secondary_ip=None, vg_name=None, beparams=None,
- nicparams=None, hvparams=None, enabled_hypervisors=None,
- modify_etc_hosts=True, modify_ssh_setup=True,
- maintain_node_health=False, drbd_helper=None,
- uid_pool=None, default_iallocator=None,
- primary_ip_version=None):
+ nicparams=None, ndparams=None, hvparams=None,
+ enabled_hypervisors=None, modify_etc_hosts=True,
+ modify_ssh_setup=True, maintain_node_health=False,
+ drbd_helper=None, uid_pool=None, default_iallocator=None,
+ primary_ip_version=None, prealloc_wipe_disks=False):
"""Initialise the cluster.
@type candidate_pool_size: int
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
objects.NIC.CheckParameterSyntax(nicparams)
+ if ndparams is not None:
+ utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
+ else:
+ ndparams = dict(constants.NDC_DEFAULTS)
+
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
enabled_hypervisors=enabled_hypervisors,
beparams={constants.PP_DEFAULT: beparams},
nicparams={constants.PP_DEFAULT: nicparams},
+ ndparams=ndparams,
hvparams=hvparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
drbd_usermode_helper=drbd_helper,
default_iallocator=default_iallocator,
primary_ip_family=ipcls.family,
+ prealloc_wipe_disks=prealloc_wipe_disks,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
serial_no=1,
master_candidate=True,
offline=False, drained=False,
+ ctime=now, mtime=now,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
# set up the inter-node password and certificate
_InitGanetiServerSetup(hostname.name)
- # start the master ip
- # TODO: Review rpc call from bootstrap
- # TODO: Warn on failed start master
- rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
+ logging.debug("Starting daemons")
+ result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
+ if result.failed:
+ raise errors.OpExecError("Could not start daemons, command %s"
+ " had exitcode %s and error %s" %
+ (result.cmd, result.exit_code, result.output))
+
+ _WaitForMasterDaemon()
def InitConfig(version, cluster_config, master_node_config,
}
default_nodegroup = objects.NodeGroup(
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
- name="default",
+ name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.name],
)
nodegroups = {
"""
family = ssconf.SimpleStore().GetPrimaryIPFamily()
sshrunner = ssh.SshRunner(cluster_name,
- ipv6=family==netutils.IP6Address.family)
-
- noded_cert = utils.ReadFile(constants.NODED_CERT_FILE)
- rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
- confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY)
-
- # in the base64 pem encoding, neither '!' nor '.' are valid chars,
- # so we use this to detect an invalid certificate; as long as the
- # cert doesn't contain this, the here-document will be correctly
- # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
- # so the same restrictions apply.
- for content in (noded_cert, rapi_cert, confd_hmac_key):
- if re.search('^!EOF\.', content, re.MULTILINE):
- raise errors.OpExecError("invalid SSL certificate or HMAC key")
-
- if not noded_cert.endswith("\n"):
- noded_cert += "\n"
- if not rapi_cert.endswith("\n"):
- rapi_cert += "\n"
- if not confd_hmac_key.endswith("\n"):
- confd_hmac_key += "\n"
+ ipv6=(family == netutils.IP6Address.family))
bind_address = constants.IP4_ADDRESS_ANY
if family == netutils.IP6Address.family:
sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
- mycommand = ("%s stop-all; %s start %s -b '%s'" % (constants.DAEMON_UTIL,
- constants.DAEMON_UTIL,
- constants.NODED,
- bind_address))
+ mycommand = ("%s stop-all; %s start %s -b %s" %
+ (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
+ utils.ShellQuote(bind_address)))
result = sshrunner.Run(node, 'root', mycommand, batch=False,
ask_key=ssh_key_check,
logging.info("Setting master to %s, old master: %s", new_master, old_master)
+ try:
+ # instantiate a real config writer, as we now know we have the
+ # configuration data
+ cfg = config.ConfigWriter(accept_foreign=True)
+
+ cluster_info = cfg.GetClusterInfo()
+ cluster_info.master_node = new_master
+ # this will also regenerate the ssconf files, since we updated the
+ # cluster info
+ cfg.Update(cluster_info, logging.error)
+ except errors.ConfigurationError, err:
+ logging.error("Error while trying to set the new master: %s",
+ str(err))
+ return 1
+
+ # if cfg.Update worked, then it means the old master daemon won't be
+ # able now to write its own config file (we rely on locking in both
+ # backend.UploadFile() and ConfigWriter._Write(); hence the next
+ # step is to kill the old master
+
+ logging.info("Stopping the master daemon on node %s", old_master)
+
result = rpc.RpcRunner.call_node_stop_master(old_master, True)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
+ logging.info("Checking master IP non-reachability...")
+
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
" continuing but activating the master on the current"
" node will probably fail", total_timeout)
- # instantiate a real config writer, as we now know we have the
- # configuration data
- cfg = config.ConfigWriter()
-
- cluster_info = cfg.GetClusterInfo()
- cluster_info.master_node = new_master
- # this will also regenerate the ssconf files, since we updated the
- # cluster info
- cfg.Update(cluster_info, logging.error)
+ logging.info("Starting the master daemons on the new master")
result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
msg = result.fail_msg
" %s, please check: %s", new_master, msg)
rcode = 1
+ logging.info("Master failed over from %s to %s", old_master, new_master)
return rcode