4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
40 from ganeti import serializer
41 from ganeti import hypervisor
42 from ganeti import bdev
43 from ganeti import netutils
47 """Setup the SSH configuration for the cluster.
49 This generates a dsa keypair for root, adds the pub key to the
50 permitted hosts and adds the hostkey to its own known hosts.
53 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
55 for name in priv_key, pub_key:
56 if os.path.exists(name):
57 utils.CreateBackup(name)
58 utils.RemoveFile(name)
60 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
64 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
67 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
70 def GenerateHmacKey(file_name):
71 """Writes a new HMAC key.
74 @param file_name: Path to output file
77 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
81 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
82 new_cds, rapi_cert_pem=None, cds=None,
83 nodecert_file=constants.NODED_CERT_FILE,
84 rapicert_file=constants.RAPI_CERT_FILE,
85 hmackey_file=constants.CONFD_HMAC_KEY,
86 cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
87 """Updates the cluster certificates, keys and secrets.
89 @type new_cluster_cert: bool
90 @param new_cluster_cert: Whether to generate a new cluster certificate
91 @type new_rapi_cert: bool
92 @param new_rapi_cert: Whether to generate a new RAPI certificate
93 @type new_confd_hmac_key: bool
94 @param new_confd_hmac_key: Whether to generate a new HMAC key
96 @param new_cds: Whether to generate a new cluster domain secret
97 @type rapi_cert_pem: string
98 @param rapi_cert_pem: New RAPI certificate in PEM format
100 @param cds: New cluster domain secret
101 @type nodecert_file: string
102 @param nodecert_file: optional override of the node cert file path
103 @type rapicert_file: string
104 @param rapicert_file: optional override of the rapi cert file path
105 @type hmackey_file: string
106 @param hmackey_file: optional override of the hmac key file path
109 # noded SSL certificate
110 cluster_cert_exists = os.path.exists(nodecert_file)
111 if new_cluster_cert or not cluster_cert_exists:
112 if cluster_cert_exists:
113 utils.CreateBackup(nodecert_file)
115 logging.debug("Generating new cluster certificate at %s", nodecert_file)
116 utils.GenerateSelfSignedSslCert(nodecert_file)
119 if new_confd_hmac_key or not os.path.exists(hmackey_file):
120 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
121 GenerateHmacKey(hmackey_file)
124 rapi_cert_exists = os.path.exists(rapicert_file)
127 # Assume rapi_pem contains a valid PEM-formatted certificate and key
128 logging.debug("Writing RAPI certificate at %s", rapicert_file)
129 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
131 elif new_rapi_cert or not rapi_cert_exists:
133 utils.CreateBackup(rapicert_file)
135 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
136 utils.GenerateSelfSignedSslCert(rapicert_file)
138 # Cluster domain secret
140 logging.debug("Writing cluster domain secret to %s", cds_file)
141 utils.WriteFile(cds_file, data=cds, backup=True)
143 elif new_cds or not os.path.exists(cds_file):
144 logging.debug("Generating new cluster domain secret at %s", cds_file)
145 GenerateHmacKey(cds_file)
148 def _InitGanetiServerSetup(master_name):
149 """Setup the necessary configuration for the initial node daemon.
151 This creates the nodepass file containing the shared password for
152 the cluster and also generates the SSL certificate.
155 # Generate cluster secrets
156 GenerateClusterCrypto(True, False, False, False)
158 result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
160 raise errors.OpExecError("Could not start the node daemon, command %s"
161 " had exitcode %s and error %s" %
162 (result.cmd, result.exit_code, result.output))
164 _WaitForNodeDaemon(master_name)
167 def _WaitForNodeDaemon(node_name):
168 """Wait for node daemon to become responsive.
171 def _CheckNodeDaemon():
172 result = rpc.RpcRunner.call_version([node_name])[node_name]
174 raise utils.RetryAgain()
177 utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
178 except utils.RetryTimeout:
179 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
180 " 10 seconds" % node_name)
183 def _InitFileStorage(file_storage_dir):
184 """Initialize if needed the file storage.
186 @param file_storage_dir: the user-supplied value
187 @return: either empty string (if file storage was disabled at build
188 time) or the normalized path to the storage directory
191 if not constants.ENABLE_FILE_STORAGE:
194 file_storage_dir = os.path.normpath(file_storage_dir)
196 if not os.path.isabs(file_storage_dir):
197 raise errors.OpPrereqError("The file storage directory you passed is"
198 " not an absolute path.", errors.ECODE_INVAL)
200 if not os.path.exists(file_storage_dir):
202 os.makedirs(file_storage_dir, 0750)
204 raise errors.OpPrereqError("Cannot create file storage directory"
205 " '%s': %s" % (file_storage_dir, err),
206 errors.ECODE_ENVIRON)
208 if not os.path.isdir(file_storage_dir):
209 raise errors.OpPrereqError("The file storage directory '%s' is not"
210 " a directory." % file_storage_dir,
211 errors.ECODE_ENVIRON)
212 return file_storage_dir
215 #pylint: disable-msg=R0913
216 def InitCluster(cluster_name, mac_prefix,
217 master_netdev, file_storage_dir, candidate_pool_size,
218 secondary_ip=None, vg_name=None, beparams=None,
219 nicparams=None, hvparams=None, enabled_hypervisors=None,
220 modify_etc_hosts=True, modify_ssh_setup=True,
221 maintain_node_health=False, drbd_helper=None,
222 uid_pool=None, default_iallocator=None):
223 """Initialise the cluster.
225 @type candidate_pool_size: int
226 @param candidate_pool_size: master candidate pool size
229 # TODO: complete the docstring
230 if config.ConfigWriter.IsCluster():
231 raise errors.OpPrereqError("Cluster is already initialised",
234 if not enabled_hypervisors:
235 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
236 " least one member", errors.ECODE_INVAL)
237 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
239 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
240 " entries: %s" % invalid_hvs,
243 hostname = netutils.GetHostInfo()
245 if hostname.ip.startswith("127."):
246 raise errors.OpPrereqError("This host's IP resolves to the private"
247 " range (%s). Please fix DNS or %s." %
248 (hostname.ip, constants.ETC_HOSTS),
249 errors.ECODE_ENVIRON)
251 if not netutils.OwnIpAddress(hostname.ip):
252 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
253 " to %s,\nbut this ip address does not"
254 " belong to this host. Aborting." %
255 hostname.ip, errors.ECODE_ENVIRON)
258 netutils.GetHostInfo(netutils.HostInfo.NormalizeName(cluster_name))
260 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
262 raise errors.OpPrereqError("Cluster IP already active. Aborting.",
263 errors.ECODE_NOTUNIQUE)
266 if not netutils.IsValidIP4(secondary_ip):
267 raise errors.OpPrereqError("Invalid secondary ip given",
269 if (secondary_ip != hostname.ip and
270 not netutils.OwnIpAddress(secondary_ip)):
271 raise errors.OpPrereqError("You gave %s as secondary IP,"
272 " but it does not belong to this host." %
273 secondary_ip, errors.ECODE_ENVIRON)
275 secondary_ip = hostname.ip
277 if vg_name is not None:
278 # Check if volume group is valid
279 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
280 constants.MIN_VG_SIZE)
282 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
283 " you are not using lvm" % vgstatus,
286 if drbd_helper is not None:
288 curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
289 except errors.BlockDeviceError, err:
290 raise errors.OpPrereqError("Error while checking drbd helper"
291 " (specify --no-drbd-storage if you are not"
292 " using drbd): %s" % str(err),
293 errors.ECODE_ENVIRON)
294 if drbd_helper != curr_helper:
295 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
296 " is the current helper" % (drbd_helper,
300 file_storage_dir = _InitFileStorage(file_storage_dir)
302 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
303 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
306 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
308 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
310 result.output.strip()), errors.ECODE_INVAL)
312 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
313 utils.EnsureDirs(dirs)
315 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
316 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
317 objects.NIC.CheckParameterSyntax(nicparams)
319 # hvparams is a mapping of hypervisor->hvparams dict
320 for hv_name, hv_params in hvparams.iteritems():
321 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
322 hv_class = hypervisor.GetHypervisor(hv_name)
323 hv_class.CheckParameterSyntax(hv_params)
325 # set up the inter-node password and certificate
326 _InitGanetiServerSetup(hostname.name)
328 # set up ssh config and /etc/hosts
329 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
330 sshkey = sshline.split(" ")[1]
333 utils.AddHostToEtcHosts(hostname.name)
338 if default_iallocator is not None:
339 alloc_script = utils.FindFile(default_iallocator,
340 constants.IALLOCATOR_SEARCH_PATH,
342 if alloc_script is None:
343 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
344 " specified" % default_iallocator,
349 # init of cluster config file
350 cluster_config = objects.Cluster(
352 rsahostkeypub=sshkey,
353 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
354 mac_prefix=mac_prefix,
355 volume_group_name=vg_name,
356 tcpudp_port_pool=set(),
357 master_node=hostname.name,
358 master_ip=clustername.ip,
359 master_netdev=master_netdev,
360 cluster_name=clustername.name,
361 file_storage_dir=file_storage_dir,
362 enabled_hypervisors=enabled_hypervisors,
363 beparams={constants.PP_DEFAULT: beparams},
364 nicparams={constants.PP_DEFAULT: nicparams},
366 candidate_pool_size=candidate_pool_size,
367 modify_etc_hosts=modify_etc_hosts,
368 modify_ssh_setup=modify_ssh_setup,
372 uuid=utils.NewUUID(),
373 maintain_node_health=maintain_node_health,
374 drbd_usermode_helper=drbd_helper,
375 default_iallocator=default_iallocator,
377 master_node_config = objects.Node(name=hostname.name,
378 primary_ip=hostname.ip,
379 secondary_ip=secondary_ip,
381 master_candidate=True,
382 offline=False, drained=False,
384 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
385 cfg = config.ConfigWriter()
386 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
387 cfg.Update(cfg.GetClusterInfo(), logging.error)
389 # start the master ip
390 # TODO: Review rpc call from bootstrap
391 # TODO: Warn on failed start master
392 rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
395 def InitConfig(version, cluster_config, master_node_config,
396 cfg_file=constants.CLUSTER_CONF_FILE):
397 """Create the initial cluster configuration.
399 It will contain the current node, which will also be the master
400 node, and no instances.
403 @param version: configuration version
404 @type cluster_config: L{objects.Cluster}
405 @param cluster_config: cluster configuration
406 @type master_node_config: L{objects.Node}
407 @param master_node_config: master node configuration
408 @type cfg_file: string
409 @param cfg_file: configuration file path
413 master_node_config.name: master_node_config,
417 config_data = objects.ConfigData(version=version,
418 cluster=cluster_config,
422 ctime=now, mtime=now)
423 utils.WriteFile(cfg_file,
424 data=serializer.Dump(config_data.ToDict()),
428 def FinalizeClusterDestroy(master):
429 """Execute the last steps of cluster destroy
431 This function shuts down all the daemons, completing the destroy
432 begun in cmdlib.LUDestroyOpcode.
435 cfg = config.ConfigWriter()
436 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
437 result = rpc.RpcRunner.call_node_stop_master(master, True)
438 msg = result.fail_msg
440 logging.warning("Could not disable the master role: %s", msg)
441 result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
442 msg = result.fail_msg
444 logging.warning("Could not shutdown the node daemon and cleanup"
445 " the node: %s", msg)
448 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
449 """Add a node to the cluster.
451 This function must be called before the actual opcode, and will ssh
452 to the remote node, copy the needed files, and start ganeti-noded,
453 allowing the master to do the rest via normal rpc calls.
455 @param cluster_name: the cluster name
456 @param node: the name of the new node
457 @param ssh_key_check: whether to do a strict key check
460 sshrunner = ssh.SshRunner(cluster_name)
462 noded_cert = utils.ReadFile(constants.NODED_CERT_FILE)
463 rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
464 confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY)
466 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
467 # so we use this to detect an invalid certificate; as long as the
468 # cert doesn't contain this, the here-document will be correctly
469 # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
470 # so the same restrictions apply.
471 for content in (noded_cert, rapi_cert, confd_hmac_key):
472 if re.search('^!EOF\.', content, re.MULTILINE):
473 raise errors.OpExecError("invalid SSL certificate or HMAC key")
475 if not noded_cert.endswith("\n"):
477 if not rapi_cert.endswith("\n"):
479 if not confd_hmac_key.endswith("\n"):
480 confd_hmac_key += "\n"
482 # set up inter-node password and certificate and restarts the node daemon
483 # and then connect with ssh to set password and start ganeti-noded
484 # note that all the below variables are sanitized at this point,
485 # either by being constants or by the checks above
486 # TODO: Could this command exceed a shell's maximum command length?
487 mycommand = ("umask 077 && "
488 "cat > '%s' << '!EOF.' && \n"
490 "cat > '%s' << '!EOF.' && \n"
492 "cat > '%s' << '!EOF.' && \n"
494 "chmod 0400 %s %s %s && "
496 (constants.NODED_CERT_FILE, noded_cert,
497 constants.RAPI_CERT_FILE, rapi_cert,
498 constants.CONFD_HMAC_KEY, confd_hmac_key,
499 constants.NODED_CERT_FILE, constants.RAPI_CERT_FILE,
500 constants.CONFD_HMAC_KEY,
501 constants.DAEMON_UTIL, constants.NODED))
503 result = sshrunner.Run(node, 'root', mycommand, batch=False,
504 ask_key=ssh_key_check,
505 use_cluster_key=False,
506 strict_host_check=ssh_key_check)
508 raise errors.OpExecError("Remote command on node %s, error: %s,"
510 (node, result.fail_reason, result.output))
512 _WaitForNodeDaemon(node)
515 def MasterFailover(no_voting=False):
516 """Failover the master node.
518 This checks that we are not already the master, and will cause the
519 current master to cease being master, and the non-master to become
522 @type no_voting: boolean
523 @param no_voting: force the operation without remote nodes agreement
527 sstore = ssconf.SimpleStore()
529 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
530 node_list = sstore.GetNodeList()
531 mc_list = sstore.GetMasterCandidates()
533 if old_master == new_master:
534 raise errors.OpPrereqError("This commands must be run on the node"
535 " where you want the new master to be."
536 " %s is already the master" %
537 old_master, errors.ECODE_INVAL)
539 if new_master not in mc_list:
540 mc_no_master = [name for name in mc_list if name != old_master]
541 raise errors.OpPrereqError("This node is not among the nodes marked"
542 " as master candidates. Only these nodes"
543 " can become masters. Current list of"
544 " master candidates is:\n"
545 "%s" % ('\n'.join(mc_no_master)),
549 vote_list = GatherMasterVotes(node_list)
552 voted_master = vote_list[0][0]
553 if voted_master is None:
554 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
555 " not respond.", errors.ECODE_ENVIRON)
556 elif voted_master != old_master:
557 raise errors.OpPrereqError("I have a wrong configuration, I believe"
558 " the master is %s but the other nodes"
559 " voted %s. Please resync the configuration"
561 (old_master, voted_master),
567 logging.info("Setting master to %s, old master: %s", new_master, old_master)
569 result = rpc.RpcRunner.call_node_stop_master(old_master, True)
570 msg = result.fail_msg
572 logging.error("Could not disable the master role on the old master"
573 " %s, please disable manually: %s", old_master, msg)
575 master_ip = sstore.GetMasterIP()
577 # Here we have a phase where no master should be running
579 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
580 raise utils.RetryAgain()
583 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
584 except utils.RetryTimeout:
585 logging.warning("The master IP is still reachable after %s seconds,"
586 " continuing but activating the master on the current"
587 " node will probably fail", total_timeout)
589 # instantiate a real config writer, as we now know we have the
591 cfg = config.ConfigWriter()
593 cluster_info = cfg.GetClusterInfo()
594 cluster_info.master_node = new_master
595 # this will also regenerate the ssconf files, since we updated the
597 cfg.Update(cluster_info, logging.error)
599 result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
600 msg = result.fail_msg
602 logging.error("Could not start the master role on the new master"
603 " %s, please check: %s", new_master, msg)
610 """Returns the current master node.
612 This is a separate function in bootstrap since it's needed by
613 gnt-cluster, and instead of importing directly ssconf, it's better
614 to abstract it in bootstrap, where we do use ssconf in other
618 sstore = ssconf.SimpleStore()
620 old_master, _ = ssconf.GetMasterAndMyself(sstore)
625 def GatherMasterVotes(node_list):
626 """Check the agreement on who is the master.
628 This function will return a list of (node, number of votes), ordered
629 by the number of votes. Errors will be denoted by the key 'None'.
631 Note that the sum of votes is the number of nodes this machine
632 knows, whereas the number of entries in the list could be different
633 (if some nodes vote for another master).
635 We remove ourselves from the list since we know that (bugs aside)
636 since we use the same source for configuration information for both
637 backend and boostrap, we'll always vote for ourselves.
639 @type node_list: list
640 @param node_list: the list of nodes to query for master info; the current
641 node will be removed if it is in the list
643 @return: list of (node, votes)
646 myself = netutils.HostInfo().name
648 node_list.remove(myself)
652 # no nodes left (eventually after removing myself)
654 results = rpc.RpcRunner.call_master_info(node_list)
655 if not isinstance(results, dict):
656 # this should not happen (unless internal error in rpc)
657 logging.critical("Can't complete rpc call, aborting master startup")
658 return [(None, len(node_list))]
666 logging.warning("Error contacting node %s: %s", node, msg)
668 elif not isinstance(data, (tuple, list)) or len(data) < 3:
669 logging.warning("Invalid data received from node %s: %s", node, data)
672 if None not in votes:
676 master_node = data[2]
677 if master_node not in votes:
678 votes[master_node] = 0
679 votes[master_node] += 1
681 vote_list = [v for v in votes.items()]
682 # sort first on number of votes then on name, since we want None
683 # sorted later if we have the half of the nodes not responding, and
684 # half voting all for the same master
685 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)