from ganeti import netutils
from ganeti import backend
+# ec_id for InitConfig's temporary reservation manager
+_INITCONF_ECID = "initconfig-ecid"
+
def _InitSSHSetup():
"""Setup the SSH configuration for the cluster.
modify_etc_hosts=True, modify_ssh_setup=True,
maintain_node_health=False, drbd_helper=None,
uid_pool=None, default_iallocator=None,
- primary_ip_version=None):
+ primary_ip_version=None, prealloc_wipe_disks=False):
"""Initialise the cluster.
@type candidate_pool_size: int
sshkey = sshline.split(" ")[1]
if modify_etc_hosts:
- utils.AddHostToEtcHosts(hostname)
+ utils.AddHostToEtcHosts(hostname.name, hostname.ip)
if modify_ssh_setup:
_InitSSHSetup()
uid_pool=uid_pool,
ctime=now,
mtime=now,
- uuid=utils.NewUUID(),
maintain_node_health=maintain_node_health,
drbd_usermode_helper=drbd_helper,
default_iallocator=default_iallocator,
primary_ip_family=ipcls.family,
+ prealloc_wipe_disks=prealloc_wipe_disks,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
serial_no=1,
master_candidate=True,
offline=False, drained=False,
+ ctime=now, mtime=now,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
@param cfg_file: configuration file path
"""
+ uuid_generator = config.TemporaryReservationManager()
+ cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
+ _INITCONF_ECID)
+ master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
+ _INITCONF_ECID)
nodes = {
master_node_config.name: master_node_config,
}
-
+ default_nodegroup = objects.NodeGroup(
+ uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
+ name="default",
+ members=[master_node_config.name],
+ )
+ nodegroups = {
+ default_nodegroup.uuid: default_nodegroup,
+ }
now = time.time()
config_data = objects.ConfigData(version=version,
cluster=cluster_config,
+ nodegroups=nodegroups,
nodes=nodes,
instances={},
serial_no=1,
# and then connect with ssh to set password and start ganeti-noded
# note that all the below variables are sanitized at this point,
# either by being constants or by the checks above
- # TODO: Could this command exceed a shell's maximum command length?
- mycommand = ("umask 077 && "
- "cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n"
- "cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n"
- "cat > '%s' << '!EOF.' && \n"
- "%s!EOF.\n"
- "chmod 0400 %s %s %s && "
- "%s start %s -b '%s'" %
- (constants.NODED_CERT_FILE, noded_cert,
- constants.RAPI_CERT_FILE, rapi_cert,
- constants.CONFD_HMAC_KEY, confd_hmac_key,
- constants.NODED_CERT_FILE, constants.RAPI_CERT_FILE,
- constants.CONFD_HMAC_KEY,
- constants.DAEMON_UTIL, constants.NODED, bind_address))
+ sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
+ sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
+ sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
+ mycommand = ("%s stop-all; %s start %s -b '%s'" % (constants.DAEMON_UTIL,
+ constants.DAEMON_UTIL,
+ constants.NODED,
+ bind_address))
result = sshrunner.Run(node, 'root', mycommand, batch=False,
ask_key=ssh_key_check,
- use_cluster_key=False,
+ use_cluster_key=True,
strict_host_check=ssh_key_check)
if result.failed:
raise errors.OpExecError("Remote command on node %s, error: %s,"
logging.info("Setting master to %s, old master: %s", new_master, old_master)
+ try:
+ # instantiate a real config writer, as we now know we have the
+ # configuration data
+ cfg = config.ConfigWriter(accept_foreign=True)
+
+ cluster_info = cfg.GetClusterInfo()
+ cluster_info.master_node = new_master
+ # this will also regenerate the ssconf files, since we updated the
+ # cluster info
+ cfg.Update(cluster_info, logging.error)
+ except errors.ConfigurationError, err:
+ logging.error("Error while trying to set the new master: %s",
+ str(err))
+ return 1
+
+ # if cfg.Update worked, then it means the old master daemon won't be
+ # able now to write its own config file (we rely on locking in both
+ # backend.UploadFile() and ConfigWriter._Write(); hence the next
+ # step is to kill the old master
+
+ logging.info("Stopping the master daemon on node %s", old_master)
+
result = rpc.RpcRunner.call_node_stop_master(old_master, True)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
+ logging.info("Checking master IP non-reachability...")
+
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
" continuing but activating the master on the current"
" node will probably fail", total_timeout)
- # instantiate a real config writer, as we now know we have the
- # configuration data
- cfg = config.ConfigWriter()
-
- cluster_info = cfg.GetClusterInfo()
- cluster_info.master_node = new_master
- # this will also regenerate the ssconf files, since we updated the
- # cluster info
- cfg.Update(cluster_info, logging.error)
+ logging.info("Starting the master daemons on the new master")
result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
msg = result.fail_msg
" %s, please check: %s", new_master, msg)
rcode = 1
+ logging.info("Master failed over from %s to %s", old_master, new_master)
return rcode