4 # Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
40 from ganeti import serializer
41 from ganeti import hypervisor
42 from ganeti import bdev
43 from ganeti import netutils
44 from ganeti import backend
45 from ganeti import luxi
46 from ganeti import jstore
49 # ec_id for InitConfig's temporary reservation manager
50 _INITCONF_ECID = "initconfig-ecid"
52 #: After how many seconds daemon must be responsive
53 _DAEMON_READY_TIMEOUT = 10.0
57 """Setup the SSH configuration for the cluster.
59 This generates a dsa keypair for root, adds the pub key to the
60 permitted hosts and adds the hostkey to its own known hosts.
63 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
65 for name in priv_key, pub_key:
66 if os.path.exists(name):
67 utils.CreateBackup(name)
68 utils.RemoveFile(name)
70 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
74 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
77 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
80 def GenerateHmacKey(file_name):
81 """Writes a new HMAC key.
84 @param file_name: Path to output file
87 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
91 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
92 new_confd_hmac_key, new_cds,
93 rapi_cert_pem=None, spice_cert_pem=None,
94 spice_cacert_pem=None, cds=None,
95 nodecert_file=constants.NODED_CERT_FILE,
96 rapicert_file=constants.RAPI_CERT_FILE,
97 spicecert_file=constants.SPICE_CERT_FILE,
98 spicecacert_file=constants.SPICE_CACERT_FILE,
99 hmackey_file=constants.CONFD_HMAC_KEY,
100 cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
101 """Updates the cluster certificates, keys and secrets.
103 @type new_cluster_cert: bool
104 @param new_cluster_cert: Whether to generate a new cluster certificate
105 @type new_rapi_cert: bool
106 @param new_rapi_cert: Whether to generate a new RAPI certificate
107 @type new_spice_cert: bool
108 @param new_spice_cert: Whether to generate a new SPICE certificate
109 @type new_confd_hmac_key: bool
110 @param new_confd_hmac_key: Whether to generate a new HMAC key
112 @param new_cds: Whether to generate a new cluster domain secret
113 @type rapi_cert_pem: string
114 @param rapi_cert_pem: New RAPI certificate in PEM format
115 @type spice_cert_pem: string
116 @param spice_cert_pem: New SPICE certificate in PEM format
117 @type spice_cacert_pem: string
118 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
119 certificate, in PEM format
121 @param cds: New cluster domain secret
122 @type nodecert_file: string
123 @param nodecert_file: optional override of the node cert file path
124 @type rapicert_file: string
125 @param rapicert_file: optional override of the rapi cert file path
126 @type spicecert_file: string
127 @param spicecert_file: optional override of the spice cert file path
128 @type spicecacert_file: string
129 @param spicecacert_file: optional override of the spice CA cert file path
130 @type hmackey_file: string
131 @param hmackey_file: optional override of the hmac key file path
134 # noded SSL certificate
135 cluster_cert_exists = os.path.exists(nodecert_file)
136 if new_cluster_cert or not cluster_cert_exists:
137 if cluster_cert_exists:
138 utils.CreateBackup(nodecert_file)
140 logging.debug("Generating new cluster certificate at %s", nodecert_file)
141 utils.GenerateSelfSignedSslCert(nodecert_file)
144 if new_confd_hmac_key or not os.path.exists(hmackey_file):
145 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
146 GenerateHmacKey(hmackey_file)
149 rapi_cert_exists = os.path.exists(rapicert_file)
152 # Assume rapi_pem contains a valid PEM-formatted certificate and key
153 logging.debug("Writing RAPI certificate at %s", rapicert_file)
154 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
156 elif new_rapi_cert or not rapi_cert_exists:
158 utils.CreateBackup(rapicert_file)
160 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
161 utils.GenerateSelfSignedSslCert(rapicert_file)
164 spice_cert_exists = os.path.exists(spicecert_file)
165 spice_cacert_exists = os.path.exists(spicecacert_file)
167 # spice_cert_pem implies also spice_cacert_pem
168 logging.debug("Writing SPICE certificate at %s", spicecert_file)
169 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
170 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
171 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
172 elif new_spice_cert or not spice_cert_exists:
173 if spice_cert_exists:
174 utils.CreateBackup(spicecert_file)
175 if spice_cacert_exists:
176 utils.CreateBackup(spicecacert_file)
178 logging.debug("Generating new self-signed SPICE certificate at %s",
180 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
182 # Self-signed certificate -> the public certificate is also the CA public
184 logging.debug("Writing the public certificate to %s",
186 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
188 # Cluster domain secret
190 logging.debug("Writing cluster domain secret to %s", cds_file)
191 utils.WriteFile(cds_file, data=cds, backup=True)
193 elif new_cds or not os.path.exists(cds_file):
194 logging.debug("Generating new cluster domain secret at %s", cds_file)
195 GenerateHmacKey(cds_file)
198 def _InitGanetiServerSetup(master_name):
199 """Setup the necessary configuration for the initial node daemon.
201 This creates the nodepass file containing the shared password for
202 the cluster, generates the SSL certificate and starts the node daemon.
204 @type master_name: str
205 @param master_name: Name of the master node
208 # Generate cluster secrets
209 GenerateClusterCrypto(True, False, False, False, False)
211 result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
213 raise errors.OpExecError("Could not start the node daemon, command %s"
214 " had exitcode %s and error %s" %
215 (result.cmd, result.exit_code, result.output))
217 _WaitForNodeDaemon(master_name)
220 def _WaitForNodeDaemon(node_name):
221 """Wait for node daemon to become responsive.
224 def _CheckNodeDaemon():
225 result = rpc.BootstrapRunner().call_version([node_name])[node_name]
227 raise utils.RetryAgain()
230 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
231 except utils.RetryTimeout:
232 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
233 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
236 def _WaitForMasterDaemon():
237 """Wait for master daemon to become responsive.
240 def _CheckMasterDaemon():
243 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
245 raise utils.RetryAgain()
247 logging.debug("Received cluster name %s from master", cluster_name)
250 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251 except utils.RetryTimeout:
252 raise errors.OpExecError("Master daemon didn't answer queries within"
253 " %s seconds" % _DAEMON_READY_TIMEOUT)
256 def _InitFileStorage(file_storage_dir):
257 """Initialize if needed the file storage.
259 @param file_storage_dir: the user-supplied value
260 @return: either empty string (if file storage was disabled at build
261 time) or the normalized path to the storage directory
264 file_storage_dir = os.path.normpath(file_storage_dir)
266 if not os.path.isabs(file_storage_dir):
267 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
268 " path" % file_storage_dir, errors.ECODE_INVAL)
270 if not os.path.exists(file_storage_dir):
272 os.makedirs(file_storage_dir, 0750)
274 raise errors.OpPrereqError("Cannot create file storage directory"
275 " '%s': %s" % (file_storage_dir, err),
276 errors.ECODE_ENVIRON)
278 if not os.path.isdir(file_storage_dir):
279 raise errors.OpPrereqError("The file storage directory '%s' is not"
280 " a directory." % file_storage_dir,
281 errors.ECODE_ENVIRON)
282 return file_storage_dir
285 def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913
286 master_netmask, master_netdev, file_storage_dir,
287 shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
288 vg_name=None, beparams=None, nicparams=None, ndparams=None,
289 hvparams=None, diskparams=None, enabled_hypervisors=None,
290 modify_etc_hosts=True, modify_ssh_setup=True,
291 maintain_node_health=False, drbd_helper=None, uid_pool=None,
292 default_iallocator=None, primary_ip_version=None,
293 prealloc_wipe_disks=False, use_external_mip_script=False):
294 """Initialise the cluster.
296 @type candidate_pool_size: int
297 @param candidate_pool_size: master candidate pool size
300 # TODO: complete the docstring
301 if config.ConfigWriter.IsCluster():
302 raise errors.OpPrereqError("Cluster is already initialised",
305 if not enabled_hypervisors:
306 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
307 " least one member", errors.ECODE_INVAL)
308 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
310 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
311 " entries: %s" % invalid_hvs,
315 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
316 except errors.ProgrammerError:
317 raise errors.OpPrereqError("Invalid primary ip version: %d." %
320 hostname = netutils.GetHostname(family=ipcls.family)
321 if not ipcls.IsValid(hostname.ip):
322 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
323 " address." % (hostname.ip, primary_ip_version))
325 if ipcls.IsLoopback(hostname.ip):
326 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
327 " address. Please fix DNS or %s." %
328 (hostname.ip, constants.ETC_HOSTS),
329 errors.ECODE_ENVIRON)
331 if not ipcls.Own(hostname.ip):
332 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
333 " to %s,\nbut this ip address does not"
334 " belong to this host" %
335 hostname.ip, errors.ECODE_ENVIRON)
337 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
339 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
340 raise errors.OpPrereqError("Cluster IP already active",
341 errors.ECODE_NOTUNIQUE)
344 if primary_ip_version == constants.IP6_VERSION:
345 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
346 " IPv4 address must be given as secondary",
348 secondary_ip = hostname.ip
350 if not netutils.IP4Address.IsValid(secondary_ip):
351 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
352 " IPv4 address." % secondary_ip,
355 if not netutils.IP4Address.Own(secondary_ip):
356 raise errors.OpPrereqError("You gave %s as secondary IP,"
357 " but it does not belong to this host." %
358 secondary_ip, errors.ECODE_ENVIRON)
360 if master_netmask is not None:
361 if not ipcls.ValidateNetmask(master_netmask):
362 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
363 (master_netmask, primary_ip_version))
365 master_netmask = ipcls.iplen
367 if vg_name is not None:
368 # Check if volume group is valid
369 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
370 constants.MIN_VG_SIZE)
372 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
373 " you are not using lvm" % vgstatus,
376 if drbd_helper is not None:
378 curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
379 except errors.BlockDeviceError, err:
380 raise errors.OpPrereqError("Error while checking drbd helper"
381 " (specify --no-drbd-storage if you are not"
382 " using drbd): %s" % str(err),
383 errors.ECODE_ENVIRON)
384 if drbd_helper != curr_helper:
385 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
386 " is the current helper" % (drbd_helper,
390 if constants.ENABLE_FILE_STORAGE:
391 file_storage_dir = _InitFileStorage(file_storage_dir)
393 file_storage_dir = ""
395 if constants.ENABLE_SHARED_FILE_STORAGE:
396 shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
398 shared_file_storage_dir = ""
400 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
401 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
404 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
406 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
408 result.output.strip()), errors.ECODE_INVAL)
410 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
411 utils.EnsureDirs(dirs)
413 objects.UpgradeBeParams(beparams)
414 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
415 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
416 objects.NIC.CheckParameterSyntax(nicparams)
418 if ndparams is not None:
419 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
421 ndparams = dict(constants.NDC_DEFAULTS)
423 # hvparams is a mapping of hypervisor->hvparams dict
424 for hv_name, hv_params in hvparams.iteritems():
425 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
426 hv_class = hypervisor.GetHypervisor(hv_name)
427 hv_class.CheckParameterSyntax(hv_params)
429 # diskparams is a mapping of disk-template->diskparams dict
430 for template, dt_params in diskparams.items():
431 param_keys = set(dt_params.keys())
432 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
433 if not (param_keys <= default_param_keys):
434 unknown_params = param_keys - default_param_keys
435 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
437 utils.CommaJoin(unknown_params)))
438 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
440 # set up ssh config and /etc/hosts
441 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
442 sshkey = sshline.split(" ")[1]
445 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
450 if default_iallocator is not None:
451 alloc_script = utils.FindFile(default_iallocator,
452 constants.IALLOCATOR_SEARCH_PATH,
454 if alloc_script is None:
455 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
456 " specified" % default_iallocator,
458 elif constants.HTOOLS:
459 # htools was enabled at build-time, we default to it
460 if utils.FindFile(constants.IALLOC_HAIL,
461 constants.IALLOCATOR_SEARCH_PATH,
463 default_iallocator = constants.IALLOC_HAIL
467 # init of cluster config file
468 cluster_config = objects.Cluster(
470 rsahostkeypub=sshkey,
471 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
472 mac_prefix=mac_prefix,
473 volume_group_name=vg_name,
474 tcpudp_port_pool=set(),
475 master_node=hostname.name,
476 master_ip=clustername.ip,
477 master_netmask=master_netmask,
478 master_netdev=master_netdev,
479 cluster_name=clustername.name,
480 file_storage_dir=file_storage_dir,
481 shared_file_storage_dir=shared_file_storage_dir,
482 enabled_hypervisors=enabled_hypervisors,
483 beparams={constants.PP_DEFAULT: beparams},
484 nicparams={constants.PP_DEFAULT: nicparams},
487 diskparams=diskparams,
488 candidate_pool_size=candidate_pool_size,
489 modify_etc_hosts=modify_etc_hosts,
490 modify_ssh_setup=modify_ssh_setup,
494 maintain_node_health=maintain_node_health,
495 drbd_usermode_helper=drbd_helper,
496 default_iallocator=default_iallocator,
497 primary_ip_family=ipcls.family,
498 prealloc_wipe_disks=prealloc_wipe_disks,
499 use_external_mip_script=use_external_mip_script,
501 master_node_config = objects.Node(name=hostname.name,
502 primary_ip=hostname.ip,
503 secondary_ip=secondary_ip,
505 master_candidate=True,
506 offline=False, drained=False,
507 ctime=now, mtime=now,
509 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
510 cfg = config.ConfigWriter(offline=True)
511 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
512 cfg.Update(cfg.GetClusterInfo(), logging.error)
513 backend.WriteSsconfFiles(cfg.GetSsconfValues())
515 # set up the inter-node password and certificate
516 _InitGanetiServerSetup(hostname.name)
518 logging.debug("Starting daemons")
519 result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
521 raise errors.OpExecError("Could not start daemons, command %s"
522 " had exitcode %s and error %s" %
523 (result.cmd, result.exit_code, result.output))
525 _WaitForMasterDaemon()
528 def InitConfig(version, cluster_config, master_node_config,
529 cfg_file=constants.CLUSTER_CONF_FILE):
530 """Create the initial cluster configuration.
532 It will contain the current node, which will also be the master
533 node, and no instances.
536 @param version: configuration version
537 @type cluster_config: L{objects.Cluster}
538 @param cluster_config: cluster configuration
539 @type master_node_config: L{objects.Node}
540 @param master_node_config: master node configuration
541 @type cfg_file: string
542 @param cfg_file: configuration file path
545 uuid_generator = config.TemporaryReservationManager()
546 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
548 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
551 master_node_config.name: master_node_config,
553 default_nodegroup = objects.NodeGroup(
554 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
555 name=constants.INITIAL_NODE_GROUP_NAME,
556 members=[master_node_config.name],
557 diskparams=cluster_config.diskparams,
560 default_nodegroup.uuid: default_nodegroup,
563 config_data = objects.ConfigData(version=version,
564 cluster=cluster_config,
565 nodegroups=nodegroups,
569 ctime=now, mtime=now)
570 utils.WriteFile(cfg_file,
571 data=serializer.Dump(config_data.ToDict()),
575 def FinalizeClusterDestroy(master):
576 """Execute the last steps of cluster destroy
578 This function shuts down all the daemons, completing the destroy
579 begun in cmdlib.LUDestroyOpcode.
582 cfg = config.ConfigWriter()
583 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
584 runner = rpc.BootstrapRunner()
586 master_params = cfg.GetMasterNetworkParameters()
587 master_params.name = master
588 ems = cfg.GetUseExternalMipScript()
589 result = runner.call_node_deactivate_master_ip(master_params.name,
592 msg = result.fail_msg
594 logging.warning("Could not disable the master IP: %s", msg)
596 result = runner.call_node_stop_master(master)
597 msg = result.fail_msg
599 logging.warning("Could not disable the master role: %s", msg)
601 result = runner.call_node_leave_cluster(master, modify_ssh_setup)
602 msg = result.fail_msg
604 logging.warning("Could not shutdown the node daemon and cleanup"
605 " the node: %s", msg)
608 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
609 """Add a node to the cluster.
611 This function must be called before the actual opcode, and will ssh
612 to the remote node, copy the needed files, and start ganeti-noded,
613 allowing the master to do the rest via normal rpc calls.
615 @param cluster_name: the cluster name
616 @param node: the name of the new node
617 @param ssh_key_check: whether to do a strict key check
620 family = ssconf.SimpleStore().GetPrimaryIPFamily()
621 sshrunner = ssh.SshRunner(cluster_name,
622 ipv6=(family == netutils.IP6Address.family))
624 bind_address = constants.IP4_ADDRESS_ANY
625 if family == netutils.IP6Address.family:
626 bind_address = constants.IP6_ADDRESS_ANY
628 # set up inter-node password and certificate and restarts the node daemon
629 # and then connect with ssh to set password and start ganeti-noded
630 # note that all the below variables are sanitized at this point,
631 # either by being constants or by the checks above
632 sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
633 sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
634 sshrunner.CopyFileToNode(node, constants.SPICE_CERT_FILE)
635 sshrunner.CopyFileToNode(node, constants.SPICE_CACERT_FILE)
636 sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
637 mycommand = ("%s stop-all; %s start %s -b %s" %
638 (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
639 utils.ShellQuote(bind_address)))
641 result = sshrunner.Run(node, 'root', mycommand, batch=False,
642 ask_key=ssh_key_check,
643 use_cluster_key=True,
644 strict_host_check=ssh_key_check)
646 raise errors.OpExecError("Remote command on node %s, error: %s,"
648 (node, result.fail_reason, result.output))
650 _WaitForNodeDaemon(node)
653 def MasterFailover(no_voting=False):
654 """Failover the master node.
656 This checks that we are not already the master, and will cause the
657 current master to cease being master, and the non-master to become
660 @type no_voting: boolean
661 @param no_voting: force the operation without remote nodes agreement
665 sstore = ssconf.SimpleStore()
667 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
668 node_list = sstore.GetNodeList()
669 mc_list = sstore.GetMasterCandidates()
671 if old_master == new_master:
672 raise errors.OpPrereqError("This commands must be run on the node"
673 " where you want the new master to be."
674 " %s is already the master" %
675 old_master, errors.ECODE_INVAL)
677 if new_master not in mc_list:
678 mc_no_master = [name for name in mc_list if name != old_master]
679 raise errors.OpPrereqError("This node is not among the nodes marked"
680 " as master candidates. Only these nodes"
681 " can become masters. Current list of"
682 " master candidates is:\n"
683 "%s" % ('\n'.join(mc_no_master)),
687 vote_list = GatherMasterVotes(node_list)
690 voted_master = vote_list[0][0]
691 if voted_master is None:
692 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
693 " not respond.", errors.ECODE_ENVIRON)
694 elif voted_master != old_master:
695 raise errors.OpPrereqError("I have a wrong configuration, I believe"
696 " the master is %s but the other nodes"
697 " voted %s. Please resync the configuration"
699 (old_master, voted_master),
705 logging.info("Setting master to %s, old master: %s", new_master, old_master)
708 # instantiate a real config writer, as we now know we have the
710 cfg = config.ConfigWriter(accept_foreign=True)
712 cluster_info = cfg.GetClusterInfo()
713 cluster_info.master_node = new_master
714 # this will also regenerate the ssconf files, since we updated the
716 cfg.Update(cluster_info, logging.error)
717 except errors.ConfigurationError, err:
718 logging.error("Error while trying to set the new master: %s",
722 # if cfg.Update worked, then it means the old master daemon won't be
723 # able now to write its own config file (we rely on locking in both
724 # backend.UploadFile() and ConfigWriter._Write(); hence the next
725 # step is to kill the old master
727 logging.info("Stopping the master daemon on node %s", old_master)
729 runner = rpc.BootstrapRunner()
730 master_params = cfg.GetMasterNetworkParameters()
731 master_params.name = old_master
732 ems = cfg.GetUseExternalMipScript()
733 result = runner.call_node_deactivate_master_ip(master_params.name,
736 msg = result.fail_msg
738 logging.warning("Could not disable the master IP: %s", msg)
740 result = runner.call_node_stop_master(old_master)
741 msg = result.fail_msg
743 logging.error("Could not disable the master role on the old master"
744 " %s, please disable manually: %s", old_master, msg)
746 logging.info("Checking master IP non-reachability...")
748 master_ip = sstore.GetMasterIP()
751 # Here we have a phase where no master should be running
753 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
754 raise utils.RetryAgain()
757 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
758 except utils.RetryTimeout:
759 logging.warning("The master IP is still reachable after %s seconds,"
760 " continuing but activating the master on the current"
761 " node will probably fail", total_timeout)
763 if jstore.CheckDrainFlag():
764 logging.info("Undraining job queue")
765 jstore.SetDrainFlag(False)
767 logging.info("Starting the master daemons on the new master")
769 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
771 msg = result.fail_msg
773 logging.error("Could not start the master role on the new master"
774 " %s, please check: %s", new_master, msg)
777 logging.info("Master failed over from %s to %s", old_master, new_master)
782 """Returns the current master node.
784 This is a separate function in bootstrap since it's needed by
785 gnt-cluster, and instead of importing directly ssconf, it's better
786 to abstract it in bootstrap, where we do use ssconf in other
790 sstore = ssconf.SimpleStore()
792 old_master, _ = ssconf.GetMasterAndMyself(sstore)
797 def GatherMasterVotes(node_list):
798 """Check the agreement on who is the master.
800 This function will return a list of (node, number of votes), ordered
801 by the number of votes. Errors will be denoted by the key 'None'.
803 Note that the sum of votes is the number of nodes this machine
804 knows, whereas the number of entries in the list could be different
805 (if some nodes vote for another master).
807 We remove ourselves from the list since we know that (bugs aside)
808 since we use the same source for configuration information for both
809 backend and boostrap, we'll always vote for ourselves.
811 @type node_list: list
812 @param node_list: the list of nodes to query for master info; the current
813 node will be removed if it is in the list
815 @return: list of (node, votes)
818 myself = netutils.Hostname.GetSysName()
820 node_list.remove(myself)
824 # no nodes left (eventually after removing myself)
826 results = rpc.BootstrapRunner().call_master_info(node_list)
827 if not isinstance(results, dict):
828 # this should not happen (unless internal error in rpc)
829 logging.critical("Can't complete rpc call, aborting master startup")
830 return [(None, len(node_list))]
838 logging.warning("Error contacting node %s: %s", node, msg)
840 # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
841 # and data[4] is the master netmask)
842 elif not isinstance(data, (tuple, list)) or len(data) < 3:
843 logging.warning("Invalid data received from node %s: %s", node, data)
846 if None not in votes:
850 master_node = data[2]
851 if master_node not in votes:
852 votes[master_node] = 0
853 votes[master_node] += 1
855 vote_list = [v for v in votes.items()]
856 # sort first on number of votes then on name, since we want None
857 # sorted later if we have the half of the nodes not responding, and
858 # half voting all for the same master
859 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)