4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
33 from ganeti import rpc
34 from ganeti import ssh
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import config
38 from ganeti import constants
39 from ganeti import objects
40 from ganeti import ssconf
41 from ganeti import serializer
42 from ganeti import hypervisor
46 """Setup the SSH configuration for the cluster.
48 This generates a dsa keypair for root, adds the pub key to the
49 permitted hosts and adds the hostkey to its own known hosts.
52 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
54 for name in priv_key, pub_key:
55 if os.path.exists(name):
56 utils.CreateBackup(name)
57 utils.RemoveFile(name)
59 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
63 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
66 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
69 def GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
70 """Generates a self-signed SSL certificate.
73 @param file_name: Path to output file
75 @param validity: Validity for certificate in days
78 (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
81 # Set permissions before writing key
82 os.chmod(tmp_file_name, 0600)
84 result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
85 "-days", str(validity), "-nodes", "-x509",
86 "-keyout", tmp_file_name, "-out", tmp_file_name,
89 raise errors.OpExecError("Could not generate SSL certificate, command"
90 " %s had exitcode %s and error message %s" %
91 (result.cmd, result.exit_code, result.output))
94 os.chmod(tmp_file_name, 0400)
96 os.rename(tmp_file_name, file_name)
98 utils.RemoveFile(tmp_file_name)
103 def GenerateHmacKey(file_name):
104 """Writes a new HMAC key.
107 @param file_name: Path to output file
110 utils.WriteFile(file_name, data=utils.GenerateSecret(), mode=0400)
113 def _InitGanetiServerSetup(master_name):
114 """Setup the necessary configuration for the initial node daemon.
116 This creates the nodepass file containing the shared password for
117 the cluster and also generates the SSL certificate.
120 GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
122 # Don't overwrite existing file
123 if not os.path.exists(constants.RAPI_CERT_FILE):
124 GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
126 if not os.path.exists(constants.HMAC_CLUSTER_KEY):
127 GenerateHmacKey(constants.HMAC_CLUSTER_KEY)
129 result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
132 raise errors.OpExecError("Could not start the node daemon, command %s"
133 " had exitcode %s and error %s" %
134 (result.cmd, result.exit_code, result.output))
136 # Wait for node daemon to become responsive
137 def _CheckNodeDaemon():
138 result = rpc.RpcRunner.call_version([master_name])[master_name]
140 raise utils.RetryAgain()
143 utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
144 except utils.RetryTimeout:
145 raise errors.OpExecError("Node daemon didn't answer queries within"
148 def InitCluster(cluster_name, mac_prefix,
149 master_netdev, file_storage_dir, candidate_pool_size,
150 secondary_ip=None, vg_name=None, beparams=None,
151 nicparams=None, hvparams=None, enabled_hypervisors=None,
152 modify_etc_hosts=True, modify_ssh_setup=True):
153 """Initialise the cluster.
155 @type candidate_pool_size: int
156 @param candidate_pool_size: master candidate pool size
159 # TODO: complete the docstring
160 if config.ConfigWriter.IsCluster():
161 raise errors.OpPrereqError("Cluster is already initialised",
164 if not enabled_hypervisors:
165 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
166 " least one member", errors.ECODE_INVAL)
167 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
169 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
170 " entries: %s" % invalid_hvs,
173 hostname = utils.GetHostInfo()
175 if hostname.ip.startswith("127."):
176 raise errors.OpPrereqError("This host's IP resolves to the private"
177 " range (%s). Please fix DNS or %s." %
178 (hostname.ip, constants.ETC_HOSTS),
179 errors.ECODE_ENVIRON)
181 if not utils.OwnIpAddress(hostname.ip):
182 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
183 " to %s,\nbut this ip address does not"
184 " belong to this host. Aborting." %
185 hostname.ip, errors.ECODE_ENVIRON)
187 clustername = utils.GetHostInfo(cluster_name)
189 if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
191 raise errors.OpPrereqError("Cluster IP already active. Aborting.",
192 errors.ECODE_NOTUNIQUE)
195 if not utils.IsValidIP(secondary_ip):
196 raise errors.OpPrereqError("Invalid secondary ip given",
198 if (secondary_ip != hostname.ip and
199 not utils.OwnIpAddress(secondary_ip)):
200 raise errors.OpPrereqError("You gave %s as secondary IP,"
201 " but it does not belong to this host." %
202 secondary_ip, errors.ECODE_ENVIRON)
204 secondary_ip = hostname.ip
206 if vg_name is not None:
207 # Check if volume group is valid
208 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
209 constants.MIN_VG_SIZE)
211 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
212 " you are not using lvm" % vgstatus,
215 file_storage_dir = os.path.normpath(file_storage_dir)
217 if not os.path.isabs(file_storage_dir):
218 raise errors.OpPrereqError("The file storage directory you passed is"
219 " not an absolute path.", errors.ECODE_INVAL)
221 if not os.path.exists(file_storage_dir):
223 os.makedirs(file_storage_dir, 0750)
225 raise errors.OpPrereqError("Cannot create file storage directory"
226 " '%s': %s" % (file_storage_dir, err),
227 errors.ECODE_ENVIRON)
229 if not os.path.isdir(file_storage_dir):
230 raise errors.OpPrereqError("The file storage directory '%s' is not"
231 " a directory." % file_storage_dir,
232 errors.ECODE_ENVIRON)
234 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
235 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
238 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
240 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
242 result.output.strip()), errors.ECODE_INVAL)
244 if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
245 os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
246 raise errors.OpPrereqError("Init.d script '%s' missing or not"
247 " executable." % constants.NODE_INITD_SCRIPT,
248 errors.ECODE_ENVIRON)
250 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
251 utils.EnsureDirs(dirs)
253 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
254 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
255 objects.NIC.CheckParameterSyntax(nicparams)
257 # hvparams is a mapping of hypervisor->hvparams dict
258 for hv_name, hv_params in hvparams.iteritems():
259 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
260 hv_class = hypervisor.GetHypervisor(hv_name)
261 hv_class.CheckParameterSyntax(hv_params)
263 # set up the inter-node password and certificate
264 _InitGanetiServerSetup(hostname.name)
266 # set up ssh config and /etc/hosts
267 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
268 sshkey = sshline.split(" ")[1]
271 utils.AddHostToEtcHosts(hostname.name)
278 # init of cluster config file
279 cluster_config = objects.Cluster(
281 rsahostkeypub=sshkey,
282 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
283 mac_prefix=mac_prefix,
284 volume_group_name=vg_name,
285 tcpudp_port_pool=set(),
286 master_node=hostname.name,
287 master_ip=clustername.ip,
288 master_netdev=master_netdev,
289 cluster_name=clustername.name,
290 file_storage_dir=file_storage_dir,
291 enabled_hypervisors=enabled_hypervisors,
292 beparams={constants.PP_DEFAULT: beparams},
293 nicparams={constants.PP_DEFAULT: nicparams},
295 candidate_pool_size=candidate_pool_size,
296 modify_etc_hosts=modify_etc_hosts,
297 modify_ssh_setup=modify_ssh_setup,
300 uuid=utils.NewUUID(),
302 master_node_config = objects.Node(name=hostname.name,
303 primary_ip=hostname.ip,
304 secondary_ip=secondary_ip,
306 master_candidate=True,
307 offline=False, drained=False,
309 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
310 cfg = config.ConfigWriter()
311 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
312 cfg.Update(cfg.GetClusterInfo(), logging.error)
314 # start the master ip
315 # TODO: Review rpc call from bootstrap
316 # TODO: Warn on failed start master
317 rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
320 def InitConfig(version, cluster_config, master_node_config,
321 cfg_file=constants.CLUSTER_CONF_FILE):
322 """Create the initial cluster configuration.
324 It will contain the current node, which will also be the master
325 node, and no instances.
328 @param version: configuration version
329 @type cluster_config: L{objects.Cluster}
330 @param cluster_config: cluster configuration
331 @type master_node_config: L{objects.Node}
332 @param master_node_config: master node configuration
333 @type cfg_file: string
334 @param cfg_file: configuration file path
338 master_node_config.name: master_node_config,
342 config_data = objects.ConfigData(version=version,
343 cluster=cluster_config,
347 ctime=now, mtime=now)
348 utils.WriteFile(cfg_file,
349 data=serializer.Dump(config_data.ToDict()),
353 def FinalizeClusterDestroy(master):
354 """Execute the last steps of cluster destroy
356 This function shuts down all the daemons, completing the destroy
357 begun in cmdlib.LUDestroyOpcode.
360 cfg = config.ConfigWriter()
361 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
362 result = rpc.RpcRunner.call_node_stop_master(master, True)
363 msg = result.fail_msg
365 logging.warning("Could not disable the master role: %s", msg)
366 result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
367 msg = result.fail_msg
369 logging.warning("Could not shutdown the node daemon and cleanup"
370 " the node: %s", msg)
373 def SetupNodeDaemon(cluster_name, node, ssh_key_check):
374 """Add a node to the cluster.
376 This function must be called before the actual opcode, and will ssh
377 to the remote node, copy the needed files, and start ganeti-noded,
378 allowing the master to do the rest via normal rpc calls.
380 @param cluster_name: the cluster name
381 @param node: the name of the new node
382 @param ssh_key_check: whether to do a strict key check
385 sshrunner = ssh.SshRunner(cluster_name)
387 noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
388 rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
389 hmac_key = utils.ReadFile(constants.HMAC_CLUSTER_KEY)
391 # in the base64 pem encoding, neither '!' nor '.' are valid chars,
392 # so we use this to detect an invalid certificate; as long as the
393 # cert doesn't contain this, the here-document will be correctly
394 # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
395 # so the same restrictions apply.
396 for content in (noded_cert, rapi_cert, hmac_key):
397 if re.search('^!EOF\.', content, re.MULTILINE):
398 raise errors.OpExecError("invalid SSL certificate or HMAC key")
400 if not noded_cert.endswith("\n"):
402 if not rapi_cert.endswith("\n"):
404 if not hmac_key.endswith("\n"):
407 # set up inter-node password and certificate and restarts the node daemon
408 # and then connect with ssh to set password and start ganeti-noded
409 # note that all the below variables are sanitized at this point,
410 # either by being constants or by the checks above
411 mycommand = ("umask 077 && "
412 "cat > '%s' << '!EOF.' && \n"
414 "cat > '%s' << '!EOF.' && \n"
416 "cat > '%s' << '!EOF.' && \n"
418 "chmod 0400 %s %s %s && "
420 (constants.SSL_CERT_FILE, noded_cert,
421 constants.RAPI_CERT_FILE, rapi_cert,
422 constants.HMAC_CLUSTER_KEY, hmac_key,
423 constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
424 constants.HMAC_CLUSTER_KEY,
425 constants.NODE_INITD_SCRIPT))
427 result = sshrunner.Run(node, 'root', mycommand, batch=False,
428 ask_key=ssh_key_check,
429 use_cluster_key=False,
430 strict_host_check=ssh_key_check)
432 raise errors.OpExecError("Remote command on node %s, error: %s,"
434 (node, result.fail_reason, result.output))
437 def MasterFailover(no_voting=False):
438 """Failover the master node.
440 This checks that we are not already the master, and will cause the
441 current master to cease being master, and the non-master to become
444 @type no_voting: boolean
445 @param no_voting: force the operation without remote nodes agreement
449 sstore = ssconf.SimpleStore()
451 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
452 node_list = sstore.GetNodeList()
453 mc_list = sstore.GetMasterCandidates()
455 if old_master == new_master:
456 raise errors.OpPrereqError("This commands must be run on the node"
457 " where you want the new master to be."
458 " %s is already the master" %
459 old_master, errors.ECODE_INVAL)
461 if new_master not in mc_list:
462 mc_no_master = [name for name in mc_list if name != old_master]
463 raise errors.OpPrereqError("This node is not among the nodes marked"
464 " as master candidates. Only these nodes"
465 " can become masters. Current list of"
466 " master candidates is:\n"
467 "%s" % ('\n'.join(mc_no_master)),
471 vote_list = GatherMasterVotes(node_list)
474 voted_master = vote_list[0][0]
475 if voted_master is None:
476 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
477 " not respond.", errors.ECODE_ENVIRON)
478 elif voted_master != old_master:
479 raise errors.OpPrereqError("I have a wrong configuration, I believe"
480 " the master is %s but the other nodes"
481 " voted %s. Please resync the configuration"
483 (old_master, voted_master),
489 logging.info("Setting master to %s, old master: %s", new_master, old_master)
491 result = rpc.RpcRunner.call_node_stop_master(old_master, True)
492 msg = result.fail_msg
494 logging.error("Could not disable the master role on the old master"
495 " %s, please disable manually: %s", old_master, msg)
497 # Here we have a phase where no master should be running
499 # instantiate a real config writer, as we now know we have the
501 cfg = config.ConfigWriter()
503 cluster_info = cfg.GetClusterInfo()
504 cluster_info.master_node = new_master
505 # this will also regenerate the ssconf files, since we updated the
507 cfg.Update(cluster_info, logging.error)
509 result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
510 msg = result.fail_msg
512 logging.error("Could not start the master role on the new master"
513 " %s, please check: %s", new_master, msg)
520 """Returns the current master node.
522 This is a separate function in bootstrap since it's needed by
523 gnt-cluster, and instead of importing directly ssconf, it's better
524 to abstract it in bootstrap, where we do use ssconf in other
528 sstore = ssconf.SimpleStore()
530 old_master, _ = ssconf.GetMasterAndMyself(sstore)
535 def GatherMasterVotes(node_list):
536 """Check the agreement on who is the master.
538 This function will return a list of (node, number of votes), ordered
539 by the number of votes. Errors will be denoted by the key 'None'.
541 Note that the sum of votes is the number of nodes this machine
542 knows, whereas the number of entries in the list could be different
543 (if some nodes vote for another master).
545 We remove ourselves from the list since we know that (bugs aside)
546 since we use the same source for configuration information for both
547 backend and boostrap, we'll always vote for ourselves.
549 @type node_list: list
550 @param node_list: the list of nodes to query for master info; the current
551 node will be removed if it is in the list
553 @return: list of (node, votes)
556 myself = utils.HostInfo().name
558 node_list.remove(myself)
562 # no nodes left (eventually after removing myself)
564 results = rpc.RpcRunner.call_master_info(node_list)
565 if not isinstance(results, dict):
566 # this should not happen (unless internal error in rpc)
567 logging.critical("Can't complete rpc call, aborting master startup")
568 return [(None, len(node_list))]
576 logging.warning("Error contacting node %s: %s", node, msg)
578 elif not isinstance(data, (tuple, list)) or len(data) < 3:
579 logging.warning("Invalid data received from node %s: %s", node, data)
582 if None not in votes:
586 master_node = data[2]
587 if master_node not in votes:
588 votes[master_node] = 0
589 votes[master_node] += 1
591 vote_list = [v for v in votes.items()]
592 # sort first on number of votes then on name, since we want None
593 # sorted later if we have the half of the nodes not responding, and
594 # half voting all for the same master
595 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)