4 # Copyright (C) 2006, 2007, 2008, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
40 from ganeti import serializer
41 from ganeti import hypervisor
42 from ganeti import bdev
43 from ganeti import netutils
44 from ganeti import backend
46 # ec_id for InitConfig's temporary reservation manager
47 _INITCONF_ECID = "initconfig-ecid"
51 """Setup the SSH configuration for the cluster.
53 This generates a dsa keypair for root, adds the pub key to the
54 permitted hosts and adds the hostkey to its own known hosts.
57 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
59 for name in priv_key, pub_key:
60 if os.path.exists(name):
61 utils.CreateBackup(name)
62 utils.RemoveFile(name)
64 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
68 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
71 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
74 def GenerateHmacKey(file_name):
75 """Writes a new HMAC key.
78 @param file_name: Path to output file
81 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
85 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
86 new_cds, rapi_cert_pem=None, cds=None,
87 nodecert_file=constants.NODED_CERT_FILE,
88 rapicert_file=constants.RAPI_CERT_FILE,
89 hmackey_file=constants.CONFD_HMAC_KEY,
90 cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
91 """Updates the cluster certificates, keys and secrets.
93 @type new_cluster_cert: bool
94 @param new_cluster_cert: Whether to generate a new cluster certificate
95 @type new_rapi_cert: bool
96 @param new_rapi_cert: Whether to generate a new RAPI certificate
97 @type new_confd_hmac_key: bool
98 @param new_confd_hmac_key: Whether to generate a new HMAC key
100 @param new_cds: Whether to generate a new cluster domain secret
101 @type rapi_cert_pem: string
102 @param rapi_cert_pem: New RAPI certificate in PEM format
104 @param cds: New cluster domain secret
105 @type nodecert_file: string
106 @param nodecert_file: optional override of the node cert file path
107 @type rapicert_file: string
108 @param rapicert_file: optional override of the rapi cert file path
109 @type hmackey_file: string
110 @param hmackey_file: optional override of the hmac key file path
113 # noded SSL certificate
114 cluster_cert_exists = os.path.exists(nodecert_file)
115 if new_cluster_cert or not cluster_cert_exists:
116 if cluster_cert_exists:
117 utils.CreateBackup(nodecert_file)
119 logging.debug("Generating new cluster certificate at %s", nodecert_file)
120 utils.GenerateSelfSignedSslCert(nodecert_file)
123 if new_confd_hmac_key or not os.path.exists(hmackey_file):
124 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
125 GenerateHmacKey(hmackey_file)
128 rapi_cert_exists = os.path.exists(rapicert_file)
131 # Assume rapi_pem contains a valid PEM-formatted certificate and key
132 logging.debug("Writing RAPI certificate at %s", rapicert_file)
133 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
135 elif new_rapi_cert or not rapi_cert_exists:
137 utils.CreateBackup(rapicert_file)
139 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
140 utils.GenerateSelfSignedSslCert(rapicert_file)
142 # Cluster domain secret
144 logging.debug("Writing cluster domain secret to %s", cds_file)
145 utils.WriteFile(cds_file, data=cds, backup=True)
147 elif new_cds or not os.path.exists(cds_file):
148 logging.debug("Generating new cluster domain secret at %s", cds_file)
149 GenerateHmacKey(cds_file)
152 def _InitGanetiServerSetup(master_name):
153 """Setup the necessary configuration for the initial node daemon.
155 This creates the nodepass file containing the shared password for
156 the cluster, generates the SSL certificate and starts the node daemon.
158 @type master_name: str
159 @param master_name: Name of the master node
162 # Generate cluster secrets
163 GenerateClusterCrypto(True, False, False, False)
165 result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
167 raise errors.OpExecError("Could not start the node daemon, command %s"
168 " had exitcode %s and error %s" %
169 (result.cmd, result.exit_code, result.output))
171 _WaitForNodeDaemon(master_name)
174 def _WaitForNodeDaemon(node_name):
175 """Wait for node daemon to become responsive.
178 def _CheckNodeDaemon():
179 result = rpc.RpcRunner.call_version([node_name])[node_name]
181 raise utils.RetryAgain()
184 utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
185 except utils.RetryTimeout:
186 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
187 " 10 seconds" % node_name)
190 def _InitFileStorage(file_storage_dir):
191 """Initialize if needed the file storage.
193 @param file_storage_dir: the user-supplied value
194 @return: either empty string (if file storage was disabled at build
195 time) or the normalized path to the storage directory
198 if not constants.ENABLE_FILE_STORAGE:
201 file_storage_dir = os.path.normpath(file_storage_dir)
203 if not os.path.isabs(file_storage_dir):
204 raise errors.OpPrereqError("The file storage directory you passed is"
205 " not an absolute path.", errors.ECODE_INVAL)
207 if not os.path.exists(file_storage_dir):
209 os.makedirs(file_storage_dir, 0750)
211 raise errors.OpPrereqError("Cannot create file storage directory"
212 " '%s': %s" % (file_storage_dir, err),
213 errors.ECODE_ENVIRON)
215 if not os.path.isdir(file_storage_dir):
216 raise errors.OpPrereqError("The file storage directory '%s' is not"
217 " a directory." % file_storage_dir,
218 errors.ECODE_ENVIRON)
219 return file_storage_dir
222 #pylint: disable-msg=R0913
223 def InitCluster(cluster_name, mac_prefix,
224 master_netdev, file_storage_dir, candidate_pool_size,
225 secondary_ip=None, vg_name=None, beparams=None,
226 nicparams=None, hvparams=None, enabled_hypervisors=None,
227 modify_etc_hosts=True, modify_ssh_setup=True,
228 maintain_node_health=False, drbd_helper=None,
229 uid_pool=None, default_iallocator=None,
230 primary_ip_version=None, prealloc_wipe_disks=False):
231 """Initialise the cluster.
233 @type candidate_pool_size: int
234 @param candidate_pool_size: master candidate pool size
237 # TODO: complete the docstring
238 if config.ConfigWriter.IsCluster():
239 raise errors.OpPrereqError("Cluster is already initialised",
242 if not enabled_hypervisors:
243 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
244 " least one member", errors.ECODE_INVAL)
245 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
247 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
248 " entries: %s" % invalid_hvs,
253 if primary_ip_version == constants.IP4_VERSION:
254 ipcls = netutils.IP4Address
255 elif primary_ip_version == constants.IP6_VERSION:
256 ipcls = netutils.IP6Address
258 raise errors.OpPrereqError("Invalid primary ip version: %d." %
261 hostname = netutils.GetHostname(family=ipcls.family)
262 if not ipcls.IsValid(hostname.ip):
263 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
264 " address." % (hostname.ip, primary_ip_version))
266 if ipcls.IsLoopback(hostname.ip):
267 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
268 " address. Please fix DNS or %s." %
269 (hostname.ip, constants.ETC_HOSTS),
270 errors.ECODE_ENVIRON)
272 if not ipcls.Own(hostname.ip):
273 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
274 " to %s,\nbut this ip address does not"
275 " belong to this host" %
276 hostname.ip, errors.ECODE_ENVIRON)
278 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
280 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
281 raise errors.OpPrereqError("Cluster IP already active",
282 errors.ECODE_NOTUNIQUE)
285 if primary_ip_version == constants.IP6_VERSION:
286 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
287 " IPv4 address must be given as secondary",
289 secondary_ip = hostname.ip
291 if not netutils.IP4Address.IsValid(secondary_ip):
292 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
293 " IPv4 address." % secondary_ip,
296 if not netutils.IP4Address.Own(secondary_ip):
297 raise errors.OpPrereqError("You gave %s as secondary IP,"
298 " but it does not belong to this host." %
299 secondary_ip, errors.ECODE_ENVIRON)
301 if vg_name is not None:
302 # Check if volume group is valid
303 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
304 constants.MIN_VG_SIZE)
306 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
307 " you are not using lvm" % vgstatus,
310 if drbd_helper is not None:
312 curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
313 except errors.BlockDeviceError, err:
314 raise errors.OpPrereqError("Error while checking drbd helper"
315 " (specify --no-drbd-storage if you are not"
316 " using drbd): %s" % str(err),
317 errors.ECODE_ENVIRON)
318 if drbd_helper != curr_helper:
319 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
320 " is the current helper" % (drbd_helper,
324 file_storage_dir = _InitFileStorage(file_storage_dir)
326 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
327 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
330 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
332 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
334 result.output.strip()), errors.ECODE_INVAL)
336 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
337 utils.EnsureDirs(dirs)
339 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
340 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
341 objects.NIC.CheckParameterSyntax(nicparams)
343 # hvparams is a mapping of hypervisor->hvparams dict
344 for hv_name, hv_params in hvparams.iteritems():
345 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
346 hv_class = hypervisor.GetHypervisor(hv_name)
347 hv_class.CheckParameterSyntax(hv_params)
349 # set up ssh config and /etc/hosts
350 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
351 sshkey = sshline.split(" ")[1]
354 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
359 if default_iallocator is not None:
360 alloc_script = utils.FindFile(default_iallocator,
361 constants.IALLOCATOR_SEARCH_PATH,
363 if alloc_script is None:
364 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
365 " specified" % default_iallocator,
370 # init of cluster config file
371 cluster_config = objects.Cluster(
373 rsahostkeypub=sshkey,
374 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
375 mac_prefix=mac_prefix,
376 volume_group_name=vg_name,
377 tcpudp_port_pool=set(),
378 master_node=hostname.name,
379 master_ip=clustername.ip,
380 master_netdev=master_netdev,
381 cluster_name=clustername.name,
382 file_storage_dir=file_storage_dir,
383 enabled_hypervisors=enabled_hypervisors,
384 beparams={constants.PP_DEFAULT: beparams},
385 nicparams={constants.PP_DEFAULT: nicparams},
387 candidate_pool_size=candidate_pool_size,
388 modify_etc_hosts=modify_etc_hosts,
389 modify_ssh_setup=modify_ssh_setup,
393 maintain_node_health=maintain_node_health,
394 drbd_usermode_helper=drbd_helper,
395 default_iallocator=default_iallocator,
396 primary_ip_family=ipcls.family,
397 prealloc_wipe_disks=prealloc_wipe_disks,
399 master_node_config = objects.Node(name=hostname.name,
400 primary_ip=hostname.ip,
401 secondary_ip=secondary_ip,
403 master_candidate=True,
404 offline=False, drained=False,
406 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
407 cfg = config.ConfigWriter(offline=True)
408 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
409 cfg.Update(cfg.GetClusterInfo(), logging.error)
410 backend.WriteSsconfFiles(cfg.GetSsconfValues())
412 # set up the inter-node password and certificate
413 _InitGanetiServerSetup(hostname.name)
415 # start the master ip
416 # TODO: Review rpc call from bootstrap
417 # TODO: Warn on failed start master
418 rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
421 def InitConfig(version, cluster_config, master_node_config,
422 cfg_file=constants.CLUSTER_CONF_FILE):
423 """Create the initial cluster configuration.
425 It will contain the current node, which will also be the master
426 node, and no instances.
429 @param version: configuration version
430 @type cluster_config: L{objects.Cluster}
431 @param cluster_config: cluster configuration
432 @type master_node_config: L{objects.Node}
433 @param master_node_config: master node configuration
434 @type cfg_file: string
435 @param cfg_file: configuration file path
438 uuid_generator = config.TemporaryReservationManager()
439 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
441 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
444 master_node_config.name: master_node_config,
446 default_nodegroup = objects.NodeGroup(
447 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
449 members=[master_node_config.name],
452 default_nodegroup.uuid: default_nodegroup,
455 config_data = objects.ConfigData(version=version,
456 cluster=cluster_config,
457 nodegroups=nodegroups,
461 ctime=now, mtime=now)
462 utils.WriteFile(cfg_file,
463 data=serializer.Dump(config_data.ToDict()),
467 def FinalizeClusterDestroy(master):
468 """Execute the last steps of cluster destroy
470 This function shuts down all the daemons, completing the destroy
471 begun in cmdlib.LUDestroyOpcode.
474 cfg = config.ConfigWriter()
475 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
476 result = rpc.RpcRunner.call_node_stop_master(master, True)
477 msg = result.fail_msg
479 logging.warning("Could not disable the master role: %s", msg)
480 result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
481 msg = result.fail_msg
483 logging.warning("Could not shutdown the node daemon and cleanup"
484 " the node: %s", msg)
487 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
488 """Add a node to the cluster.
490 This function must be called before the actual opcode, and will ssh
491 to the remote node, copy the needed files, and start ganeti-noded,
492 allowing the master to do the rest via normal rpc calls.
494 @param cluster_name: the cluster name
495 @param node: the name of the new node
496 @param ssh_key_check: whether to do a strict key check
499 family = ssconf.SimpleStore().GetPrimaryIPFamily()
500 sshrunner = ssh.SshRunner(cluster_name,
501 ipv6=family==netutils.IP6Address.family)
503 noded_cert = utils.ReadFile(constants.NODED_CERT_FILE)
504 rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
505 confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY)
507 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
508 # so we use this to detect an invalid certificate; as long as the
509 # cert doesn't contain this, the here-document will be correctly
510 # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
511 # so the same restrictions apply.
512 for content in (noded_cert, rapi_cert, confd_hmac_key):
513 if re.search('^!EOF\.', content, re.MULTILINE):
514 raise errors.OpExecError("invalid SSL certificate or HMAC key")
516 if not noded_cert.endswith("\n"):
518 if not rapi_cert.endswith("\n"):
520 if not confd_hmac_key.endswith("\n"):
521 confd_hmac_key += "\n"
523 bind_address = constants.IP4_ADDRESS_ANY
524 if family == netutils.IP6Address.family:
525 bind_address = constants.IP6_ADDRESS_ANY
527 # set up inter-node password and certificate and restarts the node daemon
528 # and then connect with ssh to set password and start ganeti-noded
529 # note that all the below variables are sanitized at this point,
530 # either by being constants or by the checks above
531 sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
532 sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
533 sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
534 mycommand = ("%s stop-all; %s start %s -b '%s'" % (constants.DAEMON_UTIL,
535 constants.DAEMON_UTIL,
539 result = sshrunner.Run(node, 'root', mycommand, batch=False,
540 ask_key=ssh_key_check,
541 use_cluster_key=True,
542 strict_host_check=ssh_key_check)
544 raise errors.OpExecError("Remote command on node %s, error: %s,"
546 (node, result.fail_reason, result.output))
548 _WaitForNodeDaemon(node)
551 def MasterFailover(no_voting=False):
552 """Failover the master node.
554 This checks that we are not already the master, and will cause the
555 current master to cease being master, and the non-master to become
558 @type no_voting: boolean
559 @param no_voting: force the operation without remote nodes agreement
563 sstore = ssconf.SimpleStore()
565 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
566 node_list = sstore.GetNodeList()
567 mc_list = sstore.GetMasterCandidates()
569 if old_master == new_master:
570 raise errors.OpPrereqError("This commands must be run on the node"
571 " where you want the new master to be."
572 " %s is already the master" %
573 old_master, errors.ECODE_INVAL)
575 if new_master not in mc_list:
576 mc_no_master = [name for name in mc_list if name != old_master]
577 raise errors.OpPrereqError("This node is not among the nodes marked"
578 " as master candidates. Only these nodes"
579 " can become masters. Current list of"
580 " master candidates is:\n"
581 "%s" % ('\n'.join(mc_no_master)),
585 vote_list = GatherMasterVotes(node_list)
588 voted_master = vote_list[0][0]
589 if voted_master is None:
590 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
591 " not respond.", errors.ECODE_ENVIRON)
592 elif voted_master != old_master:
593 raise errors.OpPrereqError("I have a wrong configuration, I believe"
594 " the master is %s but the other nodes"
595 " voted %s. Please resync the configuration"
597 (old_master, voted_master),
603 logging.info("Setting master to %s, old master: %s", new_master, old_master)
606 # instantiate a real config writer, as we now know we have the
608 cfg = config.ConfigWriter(accept_foreign=True)
610 cluster_info = cfg.GetClusterInfo()
611 cluster_info.master_node = new_master
612 # this will also regenerate the ssconf files, since we updated the
614 cfg.Update(cluster_info, logging.error)
615 except errors.ConfigurationError, err:
616 logging.error("Error while trying to set the new master: %s",
620 # if cfg.Update worked, then it means the old master daemon won't be
621 # able now to write its own config file (we rely on locking in both
622 # backend.UploadFile() and ConfigWriter._Write(); hence the next
623 # step is to kill the old master
625 logging.info("Stopping the master daemon on node %s", old_master)
627 result = rpc.RpcRunner.call_node_stop_master(old_master, True)
628 msg = result.fail_msg
630 logging.error("Could not disable the master role on the old master"
631 " %s, please disable manually: %s", old_master, msg)
633 logging.info("Checking master IP non-reachability...")
635 master_ip = sstore.GetMasterIP()
637 # Here we have a phase where no master should be running
639 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
640 raise utils.RetryAgain()
643 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
644 except utils.RetryTimeout:
645 logging.warning("The master IP is still reachable after %s seconds,"
646 " continuing but activating the master on the current"
647 " node will probably fail", total_timeout)
649 logging.info("Starting the master daemons on the new master")
651 result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
652 msg = result.fail_msg
654 logging.error("Could not start the master role on the new master"
655 " %s, please check: %s", new_master, msg)
658 logging.info("Master failed over from %s to %s", old_master, new_master)
663 """Returns the current master node.
665 This is a separate function in bootstrap since it's needed by
666 gnt-cluster, and instead of importing directly ssconf, it's better
667 to abstract it in bootstrap, where we do use ssconf in other
671 sstore = ssconf.SimpleStore()
673 old_master, _ = ssconf.GetMasterAndMyself(sstore)
678 def GatherMasterVotes(node_list):
679 """Check the agreement on who is the master.
681 This function will return a list of (node, number of votes), ordered
682 by the number of votes. Errors will be denoted by the key 'None'.
684 Note that the sum of votes is the number of nodes this machine
685 knows, whereas the number of entries in the list could be different
686 (if some nodes vote for another master).
688 We remove ourselves from the list since we know that (bugs aside)
689 since we use the same source for configuration information for both
690 backend and boostrap, we'll always vote for ourselves.
692 @type node_list: list
693 @param node_list: the list of nodes to query for master info; the current
694 node will be removed if it is in the list
696 @return: list of (node, votes)
699 myself = netutils.Hostname.GetSysName()
701 node_list.remove(myself)
705 # no nodes left (eventually after removing myself)
707 results = rpc.RpcRunner.call_master_info(node_list)
708 if not isinstance(results, dict):
709 # this should not happen (unless internal error in rpc)
710 logging.critical("Can't complete rpc call, aborting master startup")
711 return [(None, len(node_list))]
719 logging.warning("Error contacting node %s: %s", node, msg)
721 # for now we accept both length 3 and 4 (data[3] is primary ip version)
722 elif not isinstance(data, (tuple, list)) or len(data) < 3:
723 logging.warning("Invalid data received from node %s: %s", node, data)
726 if None not in votes:
730 master_node = data[2]
731 if master_node not in votes:
732 votes[master_node] = 0
733 votes[master_node] += 1
735 vote_list = [v for v in votes.items()]
736 # sort first on number of votes then on name, since we want None
737 # sorted later if we have the half of the nodes not responding, and
738 # half voting all for the same master
739 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)