4 # Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
40 from ganeti import serializer
41 from ganeti import hypervisor
42 from ganeti import bdev
43 from ganeti import netutils
44 from ganeti import backend
45 from ganeti import luxi
46 from ganeti import jstore
49 # ec_id for InitConfig's temporary reservation manager
50 _INITCONF_ECID = "initconfig-ecid"
52 #: After how many seconds daemon must be responsive
53 _DAEMON_READY_TIMEOUT = 10.0
57 """Setup the SSH configuration for the cluster.
59 This generates a dsa keypair for root, adds the pub key to the
60 permitted hosts and adds the hostkey to its own known hosts.
63 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
65 for name in priv_key, pub_key:
66 if os.path.exists(name):
67 utils.CreateBackup(name)
68 utils.RemoveFile(name)
70 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
74 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
77 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
80 def GenerateHmacKey(file_name):
81 """Writes a new HMAC key.
84 @param file_name: Path to output file
87 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
91 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
92 new_confd_hmac_key, new_cds,
93 rapi_cert_pem=None, spice_cert_pem=None,
94 spice_cacert_pem=None, cds=None,
95 nodecert_file=constants.NODED_CERT_FILE,
96 rapicert_file=constants.RAPI_CERT_FILE,
97 spicecert_file=constants.SPICE_CERT_FILE,
98 spicecacert_file=constants.SPICE_CACERT_FILE,
99 hmackey_file=constants.CONFD_HMAC_KEY,
100 cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
101 """Updates the cluster certificates, keys and secrets.
103 @type new_cluster_cert: bool
104 @param new_cluster_cert: Whether to generate a new cluster certificate
105 @type new_rapi_cert: bool
106 @param new_rapi_cert: Whether to generate a new RAPI certificate
107 @type new_spice_cert: bool
108 @param new_spice_cert: Whether to generate a new SPICE certificate
109 @type new_confd_hmac_key: bool
110 @param new_confd_hmac_key: Whether to generate a new HMAC key
112 @param new_cds: Whether to generate a new cluster domain secret
113 @type rapi_cert_pem: string
114 @param rapi_cert_pem: New RAPI certificate in PEM format
115 @type spice_cert_pem: string
116 @param spice_cert_pem: New SPICE certificate in PEM format
117 @type spice_cacert_pem: string
118 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
119 certificate, in PEM format
121 @param cds: New cluster domain secret
122 @type nodecert_file: string
123 @param nodecert_file: optional override of the node cert file path
124 @type rapicert_file: string
125 @param rapicert_file: optional override of the rapi cert file path
126 @type spicecert_file: string
127 @param spicecert_file: optional override of the spice cert file path
128 @type spicecacert_file: string
129 @param spicecacert_file: optional override of the spice CA cert file path
130 @type hmackey_file: string
131 @param hmackey_file: optional override of the hmac key file path
134 # noded SSL certificate
135 cluster_cert_exists = os.path.exists(nodecert_file)
136 if new_cluster_cert or not cluster_cert_exists:
137 if cluster_cert_exists:
138 utils.CreateBackup(nodecert_file)
140 logging.debug("Generating new cluster certificate at %s", nodecert_file)
141 utils.GenerateSelfSignedSslCert(nodecert_file)
144 if new_confd_hmac_key or not os.path.exists(hmackey_file):
145 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
146 GenerateHmacKey(hmackey_file)
149 rapi_cert_exists = os.path.exists(rapicert_file)
152 # Assume rapi_pem contains a valid PEM-formatted certificate and key
153 logging.debug("Writing RAPI certificate at %s", rapicert_file)
154 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
156 elif new_rapi_cert or not rapi_cert_exists:
158 utils.CreateBackup(rapicert_file)
160 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
161 utils.GenerateSelfSignedSslCert(rapicert_file)
164 spice_cert_exists = os.path.exists(spicecert_file)
165 spice_cacert_exists = os.path.exists(spicecacert_file)
167 # spice_cert_pem implies also spice_cacert_pem
168 logging.debug("Writing SPICE certificate at %s", spicecert_file)
169 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
170 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
171 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
172 elif new_spice_cert or not spice_cert_exists:
173 if spice_cert_exists:
174 utils.CreateBackup(spicecert_file)
175 if spice_cacert_exists:
176 utils.CreateBackup(spicecacert_file)
178 logging.debug("Generating new self-signed SPICE certificate at %s",
180 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
182 # Self-signed certificate -> the public certificate is also the CA public
184 logging.debug("Writing the public certificate to %s",
186 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
188 # Cluster domain secret
190 logging.debug("Writing cluster domain secret to %s", cds_file)
191 utils.WriteFile(cds_file, data=cds, backup=True)
193 elif new_cds or not os.path.exists(cds_file):
194 logging.debug("Generating new cluster domain secret at %s", cds_file)
195 GenerateHmacKey(cds_file)
198 def _InitGanetiServerSetup(master_name):
199 """Setup the necessary configuration for the initial node daemon.
201 This creates the nodepass file containing the shared password for
202 the cluster, generates the SSL certificate and starts the node daemon.
204 @type master_name: str
205 @param master_name: Name of the master node
208 # Generate cluster secrets
209 GenerateClusterCrypto(True, False, False, False, False)
211 result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
213 raise errors.OpExecError("Could not start the node daemon, command %s"
214 " had exitcode %s and error %s" %
215 (result.cmd, result.exit_code, result.output))
217 _WaitForNodeDaemon(master_name)
220 def _WaitForNodeDaemon(node_name):
221 """Wait for node daemon to become responsive.
224 def _CheckNodeDaemon():
225 result = rpc.BootstrapRunner().call_version([node_name])[node_name]
227 raise utils.RetryAgain()
230 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
231 except utils.RetryTimeout:
232 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
233 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
236 def _WaitForMasterDaemon():
237 """Wait for master daemon to become responsive.
240 def _CheckMasterDaemon():
243 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
245 raise utils.RetryAgain()
247 logging.debug("Received cluster name %s from master", cluster_name)
250 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251 except utils.RetryTimeout:
252 raise errors.OpExecError("Master daemon didn't answer queries within"
253 " %s seconds" % _DAEMON_READY_TIMEOUT)
256 def _InitFileStorage(file_storage_dir):
257 """Initialize if needed the file storage.
259 @param file_storage_dir: the user-supplied value
260 @return: either empty string (if file storage was disabled at build
261 time) or the normalized path to the storage directory
264 file_storage_dir = os.path.normpath(file_storage_dir)
266 if not os.path.isabs(file_storage_dir):
267 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
268 " path" % file_storage_dir, errors.ECODE_INVAL)
270 if not os.path.exists(file_storage_dir):
272 os.makedirs(file_storage_dir, 0750)
274 raise errors.OpPrereqError("Cannot create file storage directory"
275 " '%s': %s" % (file_storage_dir, err),
276 errors.ECODE_ENVIRON)
278 if not os.path.isdir(file_storage_dir):
279 raise errors.OpPrereqError("The file storage directory '%s' is not"
280 " a directory." % file_storage_dir,
281 errors.ECODE_ENVIRON)
282 return file_storage_dir
285 def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913
286 master_netmask, master_netdev, file_storage_dir,
287 shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
288 vg_name=None, beparams=None, nicparams=None, ndparams=None,
289 hvparams=None, enabled_hypervisors=None, modify_etc_hosts=True,
290 modify_ssh_setup=True, maintain_node_health=False,
291 drbd_helper=None, uid_pool=None, default_iallocator=None,
292 primary_ip_version=None, prealloc_wipe_disks=False):
293 """Initialise the cluster.
295 @type candidate_pool_size: int
296 @param candidate_pool_size: master candidate pool size
299 # TODO: complete the docstring
300 if config.ConfigWriter.IsCluster():
301 raise errors.OpPrereqError("Cluster is already initialised",
304 if not enabled_hypervisors:
305 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
306 " least one member", errors.ECODE_INVAL)
307 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
309 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
310 " entries: %s" % invalid_hvs,
314 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
315 except errors.ProgrammerError:
316 raise errors.OpPrereqError("Invalid primary ip version: %d." %
319 hostname = netutils.GetHostname(family=ipcls.family)
320 if not ipcls.IsValid(hostname.ip):
321 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
322 " address." % (hostname.ip, primary_ip_version))
324 if ipcls.IsLoopback(hostname.ip):
325 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
326 " address. Please fix DNS or %s." %
327 (hostname.ip, constants.ETC_HOSTS),
328 errors.ECODE_ENVIRON)
330 if not ipcls.Own(hostname.ip):
331 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
332 " to %s,\nbut this ip address does not"
333 " belong to this host" %
334 hostname.ip, errors.ECODE_ENVIRON)
336 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
338 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
339 raise errors.OpPrereqError("Cluster IP already active",
340 errors.ECODE_NOTUNIQUE)
343 if primary_ip_version == constants.IP6_VERSION:
344 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
345 " IPv4 address must be given as secondary",
347 secondary_ip = hostname.ip
349 if not netutils.IP4Address.IsValid(secondary_ip):
350 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
351 " IPv4 address." % secondary_ip,
354 if not netutils.IP4Address.Own(secondary_ip):
355 raise errors.OpPrereqError("You gave %s as secondary IP,"
356 " but it does not belong to this host." %
357 secondary_ip, errors.ECODE_ENVIRON)
359 if master_netmask is not None:
360 if not ipcls.ValidateNetmask(master_netmask):
361 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
362 (master_netmask, primary_ip_version))
364 master_netmask = ipcls.iplen
366 if vg_name is not None:
367 # Check if volume group is valid
368 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
369 constants.MIN_VG_SIZE)
371 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
372 " you are not using lvm" % vgstatus,
375 if drbd_helper is not None:
377 curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
378 except errors.BlockDeviceError, err:
379 raise errors.OpPrereqError("Error while checking drbd helper"
380 " (specify --no-drbd-storage if you are not"
381 " using drbd): %s" % str(err),
382 errors.ECODE_ENVIRON)
383 if drbd_helper != curr_helper:
384 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
385 " is the current helper" % (drbd_helper,
389 if constants.ENABLE_FILE_STORAGE:
390 file_storage_dir = _InitFileStorage(file_storage_dir)
392 file_storage_dir = ""
394 if constants.ENABLE_SHARED_FILE_STORAGE:
395 shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
397 shared_file_storage_dir = ""
399 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
400 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
403 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
405 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
407 result.output.strip()), errors.ECODE_INVAL)
409 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
410 utils.EnsureDirs(dirs)
412 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
413 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
414 objects.NIC.CheckParameterSyntax(nicparams)
416 if ndparams is not None:
417 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
419 ndparams = dict(constants.NDC_DEFAULTS)
421 # hvparams is a mapping of hypervisor->hvparams dict
422 for hv_name, hv_params in hvparams.iteritems():
423 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
424 hv_class = hypervisor.GetHypervisor(hv_name)
425 hv_class.CheckParameterSyntax(hv_params)
427 # set up ssh config and /etc/hosts
428 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
429 sshkey = sshline.split(" ")[1]
432 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
437 if default_iallocator is not None:
438 alloc_script = utils.FindFile(default_iallocator,
439 constants.IALLOCATOR_SEARCH_PATH,
441 if alloc_script is None:
442 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
443 " specified" % default_iallocator,
445 elif constants.HTOOLS:
446 # htools was enabled at build-time, we default to it
447 if utils.FindFile(constants.IALLOC_HAIL,
448 constants.IALLOCATOR_SEARCH_PATH,
450 default_iallocator = constants.IALLOC_HAIL
454 # init of cluster config file
455 cluster_config = objects.Cluster(
457 rsahostkeypub=sshkey,
458 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
459 mac_prefix=mac_prefix,
460 volume_group_name=vg_name,
461 tcpudp_port_pool=set(),
462 master_node=hostname.name,
463 master_ip=clustername.ip,
464 master_netmask=master_netmask,
465 master_netdev=master_netdev,
466 cluster_name=clustername.name,
467 file_storage_dir=file_storage_dir,
468 shared_file_storage_dir=shared_file_storage_dir,
469 enabled_hypervisors=enabled_hypervisors,
470 beparams={constants.PP_DEFAULT: beparams},
471 nicparams={constants.PP_DEFAULT: nicparams},
474 candidate_pool_size=candidate_pool_size,
475 modify_etc_hosts=modify_etc_hosts,
476 modify_ssh_setup=modify_ssh_setup,
480 maintain_node_health=maintain_node_health,
481 drbd_usermode_helper=drbd_helper,
482 default_iallocator=default_iallocator,
483 primary_ip_family=ipcls.family,
484 prealloc_wipe_disks=prealloc_wipe_disks,
486 master_node_config = objects.Node(name=hostname.name,
487 primary_ip=hostname.ip,
488 secondary_ip=secondary_ip,
490 master_candidate=True,
491 offline=False, drained=False,
492 ctime=now, mtime=now,
494 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
495 cfg = config.ConfigWriter(offline=True)
496 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
497 cfg.Update(cfg.GetClusterInfo(), logging.error)
498 backend.WriteSsconfFiles(cfg.GetSsconfValues())
500 # set up the inter-node password and certificate
501 _InitGanetiServerSetup(hostname.name)
503 logging.debug("Starting daemons")
504 result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
506 raise errors.OpExecError("Could not start daemons, command %s"
507 " had exitcode %s and error %s" %
508 (result.cmd, result.exit_code, result.output))
510 _WaitForMasterDaemon()
513 def InitConfig(version, cluster_config, master_node_config,
514 cfg_file=constants.CLUSTER_CONF_FILE):
515 """Create the initial cluster configuration.
517 It will contain the current node, which will also be the master
518 node, and no instances.
521 @param version: configuration version
522 @type cluster_config: L{objects.Cluster}
523 @param cluster_config: cluster configuration
524 @type master_node_config: L{objects.Node}
525 @param master_node_config: master node configuration
526 @type cfg_file: string
527 @param cfg_file: configuration file path
530 uuid_generator = config.TemporaryReservationManager()
531 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
533 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
536 master_node_config.name: master_node_config,
538 default_nodegroup = objects.NodeGroup(
539 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
540 name=constants.INITIAL_NODE_GROUP_NAME,
541 members=[master_node_config.name],
544 default_nodegroup.uuid: default_nodegroup,
547 config_data = objects.ConfigData(version=version,
548 cluster=cluster_config,
549 nodegroups=nodegroups,
553 ctime=now, mtime=now)
554 utils.WriteFile(cfg_file,
555 data=serializer.Dump(config_data.ToDict()),
559 def FinalizeClusterDestroy(master):
560 """Execute the last steps of cluster destroy
562 This function shuts down all the daemons, completing the destroy
563 begun in cmdlib.LUDestroyOpcode.
566 cfg = config.ConfigWriter()
567 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
568 runner = rpc.BootstrapRunner()
570 master_params = cfg.GetMasterNetworkParameters()
571 master_params.name = master
572 result = runner.call_node_deactivate_master_ip(master_params.name,
575 msg = result.fail_msg
577 logging.warning("Could not disable the master IP: %s", msg)
579 result = runner.call_node_stop_master(master)
580 msg = result.fail_msg
582 logging.warning("Could not disable the master role: %s", msg)
584 result = runner.call_node_leave_cluster(master, modify_ssh_setup)
585 msg = result.fail_msg
587 logging.warning("Could not shutdown the node daemon and cleanup"
588 " the node: %s", msg)
591 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
592 """Add a node to the cluster.
594 This function must be called before the actual opcode, and will ssh
595 to the remote node, copy the needed files, and start ganeti-noded,
596 allowing the master to do the rest via normal rpc calls.
598 @param cluster_name: the cluster name
599 @param node: the name of the new node
600 @param ssh_key_check: whether to do a strict key check
603 family = ssconf.SimpleStore().GetPrimaryIPFamily()
604 sshrunner = ssh.SshRunner(cluster_name,
605 ipv6=(family == netutils.IP6Address.family))
607 bind_address = constants.IP4_ADDRESS_ANY
608 if family == netutils.IP6Address.family:
609 bind_address = constants.IP6_ADDRESS_ANY
611 # set up inter-node password and certificate and restarts the node daemon
612 # and then connect with ssh to set password and start ganeti-noded
613 # note that all the below variables are sanitized at this point,
614 # either by being constants or by the checks above
615 sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
616 sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
617 sshrunner.CopyFileToNode(node, constants.SPICE_CERT_FILE)
618 sshrunner.CopyFileToNode(node, constants.SPICE_CACERT_FILE)
619 sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
620 mycommand = ("%s stop-all; %s start %s -b %s" %
621 (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
622 utils.ShellQuote(bind_address)))
624 result = sshrunner.Run(node, 'root', mycommand, batch=False,
625 ask_key=ssh_key_check,
626 use_cluster_key=True,
627 strict_host_check=ssh_key_check)
629 raise errors.OpExecError("Remote command on node %s, error: %s,"
631 (node, result.fail_reason, result.output))
633 _WaitForNodeDaemon(node)
636 def MasterFailover(no_voting=False):
637 """Failover the master node.
639 This checks that we are not already the master, and will cause the
640 current master to cease being master, and the non-master to become
643 @type no_voting: boolean
644 @param no_voting: force the operation without remote nodes agreement
648 sstore = ssconf.SimpleStore()
650 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
651 node_list = sstore.GetNodeList()
652 mc_list = sstore.GetMasterCandidates()
654 if old_master == new_master:
655 raise errors.OpPrereqError("This commands must be run on the node"
656 " where you want the new master to be."
657 " %s is already the master" %
658 old_master, errors.ECODE_INVAL)
660 if new_master not in mc_list:
661 mc_no_master = [name for name in mc_list if name != old_master]
662 raise errors.OpPrereqError("This node is not among the nodes marked"
663 " as master candidates. Only these nodes"
664 " can become masters. Current list of"
665 " master candidates is:\n"
666 "%s" % ('\n'.join(mc_no_master)),
670 vote_list = GatherMasterVotes(node_list)
673 voted_master = vote_list[0][0]
674 if voted_master is None:
675 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
676 " not respond.", errors.ECODE_ENVIRON)
677 elif voted_master != old_master:
678 raise errors.OpPrereqError("I have a wrong configuration, I believe"
679 " the master is %s but the other nodes"
680 " voted %s. Please resync the configuration"
682 (old_master, voted_master),
688 logging.info("Setting master to %s, old master: %s", new_master, old_master)
691 # instantiate a real config writer, as we now know we have the
693 cfg = config.ConfigWriter(accept_foreign=True)
695 cluster_info = cfg.GetClusterInfo()
696 cluster_info.master_node = new_master
697 # this will also regenerate the ssconf files, since we updated the
699 cfg.Update(cluster_info, logging.error)
700 except errors.ConfigurationError, err:
701 logging.error("Error while trying to set the new master: %s",
705 # if cfg.Update worked, then it means the old master daemon won't be
706 # able now to write its own config file (we rely on locking in both
707 # backend.UploadFile() and ConfigWriter._Write(); hence the next
708 # step is to kill the old master
710 logging.info("Stopping the master daemon on node %s", old_master)
712 runner = rpc.BootstrapRunner()
713 master_params = cfg.GetMasterNetworkParameters()
714 master_params.name = old_master
715 result = runner.call_node_deactivate_master_ip(master_params.name,
718 msg = result.fail_msg
720 logging.warning("Could not disable the master IP: %s", msg)
722 result = runner.call_node_stop_master(old_master)
723 msg = result.fail_msg
725 logging.error("Could not disable the master role on the old master"
726 " %s, please disable manually: %s", old_master, msg)
728 logging.info("Checking master IP non-reachability...")
730 master_ip = sstore.GetMasterIP()
733 # Here we have a phase where no master should be running
735 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
736 raise utils.RetryAgain()
739 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
740 except utils.RetryTimeout:
741 logging.warning("The master IP is still reachable after %s seconds,"
742 " continuing but activating the master on the current"
743 " node will probably fail", total_timeout)
745 if jstore.CheckDrainFlag():
746 logging.info("Undraining job queue")
747 jstore.SetDrainFlag(False)
749 logging.info("Starting the master daemons on the new master")
751 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
753 msg = result.fail_msg
755 logging.error("Could not start the master role on the new master"
756 " %s, please check: %s", new_master, msg)
759 logging.info("Master failed over from %s to %s", old_master, new_master)
764 """Returns the current master node.
766 This is a separate function in bootstrap since it's needed by
767 gnt-cluster, and instead of importing directly ssconf, it's better
768 to abstract it in bootstrap, where we do use ssconf in other
772 sstore = ssconf.SimpleStore()
774 old_master, _ = ssconf.GetMasterAndMyself(sstore)
779 def GatherMasterVotes(node_list):
780 """Check the agreement on who is the master.
782 This function will return a list of (node, number of votes), ordered
783 by the number of votes. Errors will be denoted by the key 'None'.
785 Note that the sum of votes is the number of nodes this machine
786 knows, whereas the number of entries in the list could be different
787 (if some nodes vote for another master).
789 We remove ourselves from the list since we know that (bugs aside)
790 since we use the same source for configuration information for both
791 backend and boostrap, we'll always vote for ourselves.
793 @type node_list: list
794 @param node_list: the list of nodes to query for master info; the current
795 node will be removed if it is in the list
797 @return: list of (node, votes)
800 myself = netutils.Hostname.GetSysName()
802 node_list.remove(myself)
806 # no nodes left (eventually after removing myself)
808 results = rpc.BootstrapRunner().call_master_info(node_list)
809 if not isinstance(results, dict):
810 # this should not happen (unless internal error in rpc)
811 logging.critical("Can't complete rpc call, aborting master startup")
812 return [(None, len(node_list))]
820 logging.warning("Error contacting node %s: %s", node, msg)
822 # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
823 # and data[4] is the master netmask)
824 elif not isinstance(data, (tuple, list)) or len(data) < 3:
825 logging.warning("Invalid data received from node %s: %s", node, data)
828 if None not in votes:
832 master_node = data[2]
833 if master_node not in votes:
834 votes[master_node] = 0
835 votes[master_node] += 1
837 vote_list = [v for v in votes.items()]
838 # sort first on number of votes then on name, since we want None
839 # sorted later if we have the half of the nodes not responding, and
840 # half voting all for the same master
841 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)