4 # Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
33 from ganeti.cmdlib import cluster
34 from ganeti import rpc
35 from ganeti import ssh
36 from ganeti import utils
37 from ganeti import errors
38 from ganeti import config
39 from ganeti import constants
40 from ganeti import objects
41 from ganeti import ssconf
42 from ganeti import serializer
43 from ganeti import hypervisor
44 from ganeti.storage import drbd
45 from ganeti.storage import filestorage
46 from ganeti import netutils
47 from ganeti import luxi
48 from ganeti import jstore
49 from ganeti import pathutils
52 # ec_id for InitConfig's temporary reservation manager
53 _INITCONF_ECID = "initconfig-ecid"
55 #: After how many seconds daemon must be responsive
56 _DAEMON_READY_TIMEOUT = 10.0
60 """Setup the SSH configuration for the cluster.
62 This generates a dsa keypair for root, adds the pub key to the
63 permitted hosts and adds the hostkey to its own known hosts.
66 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
68 for name in priv_key, pub_key:
69 if os.path.exists(name):
70 utils.CreateBackup(name)
71 utils.RemoveFile(name)
73 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
77 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
80 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
83 def GenerateHmacKey(file_name):
84 """Writes a new HMAC key.
87 @param file_name: Path to output file
90 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
94 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
95 new_confd_hmac_key, new_cds,
96 rapi_cert_pem=None, spice_cert_pem=None,
97 spice_cacert_pem=None, cds=None,
98 nodecert_file=pathutils.NODED_CERT_FILE,
99 rapicert_file=pathutils.RAPI_CERT_FILE,
100 spicecert_file=pathutils.SPICE_CERT_FILE,
101 spicecacert_file=pathutils.SPICE_CACERT_FILE,
102 hmackey_file=pathutils.CONFD_HMAC_KEY,
103 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
104 """Updates the cluster certificates, keys and secrets.
106 @type new_cluster_cert: bool
107 @param new_cluster_cert: Whether to generate a new cluster certificate
108 @type new_rapi_cert: bool
109 @param new_rapi_cert: Whether to generate a new RAPI certificate
110 @type new_spice_cert: bool
111 @param new_spice_cert: Whether to generate a new SPICE certificate
112 @type new_confd_hmac_key: bool
113 @param new_confd_hmac_key: Whether to generate a new HMAC key
115 @param new_cds: Whether to generate a new cluster domain secret
116 @type rapi_cert_pem: string
117 @param rapi_cert_pem: New RAPI certificate in PEM format
118 @type spice_cert_pem: string
119 @param spice_cert_pem: New SPICE certificate in PEM format
120 @type spice_cacert_pem: string
121 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
122 certificate, in PEM format
124 @param cds: New cluster domain secret
125 @type nodecert_file: string
126 @param nodecert_file: optional override of the node cert file path
127 @type rapicert_file: string
128 @param rapicert_file: optional override of the rapi cert file path
129 @type spicecert_file: string
130 @param spicecert_file: optional override of the spice cert file path
131 @type spicecacert_file: string
132 @param spicecacert_file: optional override of the spice CA cert file path
133 @type hmackey_file: string
134 @param hmackey_file: optional override of the hmac key file path
137 # noded SSL certificate
138 cluster_cert_exists = os.path.exists(nodecert_file)
139 if new_cluster_cert or not cluster_cert_exists:
140 if cluster_cert_exists:
141 utils.CreateBackup(nodecert_file)
143 logging.debug("Generating new cluster certificate at %s", nodecert_file)
144 utils.GenerateSelfSignedSslCert(nodecert_file)
147 if new_confd_hmac_key or not os.path.exists(hmackey_file):
148 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
149 GenerateHmacKey(hmackey_file)
152 rapi_cert_exists = os.path.exists(rapicert_file)
155 # Assume rapi_pem contains a valid PEM-formatted certificate and key
156 logging.debug("Writing RAPI certificate at %s", rapicert_file)
157 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
159 elif new_rapi_cert or not rapi_cert_exists:
161 utils.CreateBackup(rapicert_file)
163 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
164 utils.GenerateSelfSignedSslCert(rapicert_file)
167 spice_cert_exists = os.path.exists(spicecert_file)
168 spice_cacert_exists = os.path.exists(spicecacert_file)
170 # spice_cert_pem implies also spice_cacert_pem
171 logging.debug("Writing SPICE certificate at %s", spicecert_file)
172 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
173 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
174 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
175 elif new_spice_cert or not spice_cert_exists:
176 if spice_cert_exists:
177 utils.CreateBackup(spicecert_file)
178 if spice_cacert_exists:
179 utils.CreateBackup(spicecacert_file)
181 logging.debug("Generating new self-signed SPICE certificate at %s",
183 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
185 # Self-signed certificate -> the public certificate is also the CA public
187 logging.debug("Writing the public certificate to %s",
189 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
191 # Cluster domain secret
193 logging.debug("Writing cluster domain secret to %s", cds_file)
194 utils.WriteFile(cds_file, data=cds, backup=True)
196 elif new_cds or not os.path.exists(cds_file):
197 logging.debug("Generating new cluster domain secret at %s", cds_file)
198 GenerateHmacKey(cds_file)
201 def _InitGanetiServerSetup(master_name):
202 """Setup the necessary configuration for the initial node daemon.
204 This creates the nodepass file containing the shared password for
205 the cluster, generates the SSL certificate and starts the node daemon.
207 @type master_name: str
208 @param master_name: Name of the master node
211 # Generate cluster secrets
212 GenerateClusterCrypto(True, False, False, False, False)
214 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
216 raise errors.OpExecError("Could not start the node daemon, command %s"
217 " had exitcode %s and error %s" %
218 (result.cmd, result.exit_code, result.output))
220 _WaitForNodeDaemon(master_name)
223 def _WaitForNodeDaemon(node_name):
224 """Wait for node daemon to become responsive.
227 def _CheckNodeDaemon():
228 # Pylint bug <http://www.logilab.org/ticket/35642>
229 # pylint: disable=E1101
230 result = rpc.BootstrapRunner().call_version([node_name])[node_name]
232 raise utils.RetryAgain()
235 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
236 except utils.RetryTimeout:
237 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
238 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
241 def _WaitForMasterDaemon():
242 """Wait for master daemon to become responsive.
245 def _CheckMasterDaemon():
248 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
250 raise utils.RetryAgain()
252 logging.debug("Received cluster name %s from master", cluster_name)
255 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
256 except utils.RetryTimeout:
257 raise errors.OpExecError("Master daemon didn't answer queries within"
258 " %s seconds" % _DAEMON_READY_TIMEOUT)
261 def _WaitForSshDaemon(hostname, port, family):
262 """Wait for SSH daemon to become responsive.
265 hostip = netutils.GetHostname(name=hostname, family=family).ip
267 def _CheckSshDaemon():
268 if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
269 logging.debug("SSH daemon on %s:%s (IP address %s) has become"
270 " responsive", hostname, port, hostip)
272 raise utils.RetryAgain()
275 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
276 except utils.RetryTimeout:
277 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
278 " become responsive within %s seconds" %
279 (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
282 def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
283 use_cluster_key, ask_key, strict_host_check, data):
284 """Runs a command to configure something on a remote machine.
286 @type cluster_name: string
287 @param cluster_name: Cluster name
289 @param node: Node name
290 @type basecmd: string
291 @param basecmd: Base command (path on the remote machine)
293 @param debug: Enable debug output
295 @param verbose: Enable verbose output
296 @type use_cluster_key: bool
297 @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
299 @param ask_key: See L{ssh.SshRunner.BuildCmd}
300 @type strict_host_check: bool
301 @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
302 @param data: JSON-serializable input data for script (passed to stdin)
307 # Pass --debug/--verbose to the external script if set on our invocation
309 cmd.append("--debug")
312 cmd.append("--verbose")
314 family = ssconf.SimpleStore().GetPrimaryIPFamily()
315 srun = ssh.SshRunner(cluster_name,
316 ipv6=(family == netutils.IP6Address.family))
317 scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER,
318 utils.ShellQuoteArgs(cmd),
319 batch=False, ask_key=ask_key, quiet=False,
320 strict_host_check=strict_host_check,
321 use_cluster_key=use_cluster_key)
323 tempfh = tempfile.TemporaryFile()
325 tempfh.write(serializer.DumpJson(data))
328 result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
333 raise errors.OpExecError("Command '%s' failed: %s" %
334 (result.cmd, result.fail_reason))
336 _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
339 def _InitFileStorageDir(file_storage_dir):
340 """Initialize if needed the file storage.
342 @param file_storage_dir: the user-supplied value
343 @return: either empty string (if file storage was disabled at build
344 time) or the normalized path to the storage directory
347 file_storage_dir = os.path.normpath(file_storage_dir)
349 if not os.path.isabs(file_storage_dir):
350 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
351 " path" % file_storage_dir, errors.ECODE_INVAL)
353 if not os.path.exists(file_storage_dir):
355 os.makedirs(file_storage_dir, 0750)
357 raise errors.OpPrereqError("Cannot create file storage directory"
358 " '%s': %s" % (file_storage_dir, err),
359 errors.ECODE_ENVIRON)
361 if not os.path.isdir(file_storage_dir):
362 raise errors.OpPrereqError("The file storage directory '%s' is not"
363 " a directory." % file_storage_dir,
364 errors.ECODE_ENVIRON)
366 return file_storage_dir
369 def _PrepareFileBasedStorage(
370 enabled_disk_templates, file_storage_dir,
371 default_dir, file_disk_template,
372 init_fn=_InitFileStorageDir, acceptance_fn=None):
373 """Checks if a file-base storage type is enabled and inits the dir.
375 @type enabled_disk_templates: list of string
376 @param enabled_disk_templates: list of enabled disk templates
377 @type file_storage_dir: string
378 @param file_storage_dir: the file storage directory
379 @type default_dir: string
380 @param default_dir: default file storage directory when C{file_storage_dir}
382 @type file_disk_template: string
383 @param file_disk_template: a disk template whose storage type is 'ST_FILE'
385 @returns: the name of the actual file storage directory
388 assert (file_disk_template in
389 utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
390 if file_storage_dir is None:
391 file_storage_dir = default_dir
392 if not acceptance_fn:
394 lambda path: filestorage.CheckFileStoragePathAcceptance(
395 path, exact_match_ok=True)
397 cluster.CheckFileStoragePathVsEnabledDiskTemplates(
398 logging.warning, file_storage_dir, enabled_disk_templates)
400 file_storage_enabled = file_disk_template in enabled_disk_templates
401 if file_storage_enabled:
403 acceptance_fn(file_storage_dir)
404 except errors.FileStoragePathError as e:
405 raise errors.OpPrereqError(str(e))
406 result_file_storage_dir = init_fn(file_storage_dir)
408 result_file_storage_dir = file_storage_dir
409 return result_file_storage_dir
412 def _PrepareFileStorage(
413 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
415 """Checks if file storage is enabled and inits the dir.
417 @see: C{_PrepareFileBasedStorage}
420 return _PrepareFileBasedStorage(
421 enabled_disk_templates, file_storage_dir,
422 pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE,
423 init_fn=init_fn, acceptance_fn=acceptance_fn)
426 def _PrepareSharedFileStorage(
427 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
429 """Checks if shared file storage is enabled and inits the dir.
431 @see: C{_PrepareFileBasedStorage}
434 return _PrepareFileBasedStorage(
435 enabled_disk_templates, file_storage_dir,
436 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE,
437 init_fn=init_fn, acceptance_fn=acceptance_fn)
440 def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
441 """Checks the sanity of the enabled disk templates.
444 if not enabled_disk_templates:
445 raise errors.OpPrereqError("Enabled disk templates list must contain at"
446 " least one member", errors.ECODE_INVAL)
447 invalid_disk_templates = \
448 set(enabled_disk_templates) - constants.DISK_TEMPLATES
449 if invalid_disk_templates:
450 raise errors.OpPrereqError("Enabled disk templates list contains invalid"
451 " entries: %s" % invalid_disk_templates,
455 def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
456 """Restricts the ipolicy's disk templates to the enabled ones.
458 This function clears the ipolicy's list of allowed disk templates from the
459 ones that are not enabled by the cluster.
462 @param ipolicy: the instance policy
463 @type enabled_disk_templates: list of string
464 @param enabled_disk_templates: the list of cluster-wide enabled disk
468 assert constants.IPOLICY_DTS in ipolicy
469 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
470 restricted_disk_templates = list(set(allowed_disk_templates)
471 .intersection(set(enabled_disk_templates)))
472 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
475 def _InitCheckDrbdHelper(drbd_helper, drbd_enabled):
476 """Checks the DRBD usermode helper.
478 @type drbd_helper: string
479 @param drbd_helper: name of the DRBD usermode helper that the system should
486 if drbd_helper is not None:
488 curr_helper = drbd.DRBD8.GetUsermodeHelper()
489 except errors.BlockDeviceError, err:
490 raise errors.OpPrereqError("Error while checking drbd helper"
491 " (disable drbd with --enabled-disk-templates"
492 " if you are not using drbd): %s" % str(err),
493 errors.ECODE_ENVIRON)
494 if drbd_helper != curr_helper:
495 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
496 " is the current helper" % (drbd_helper,
501 def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
502 master_netmask, master_netdev, file_storage_dir,
503 shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
504 vg_name=None, beparams=None, nicparams=None, ndparams=None,
505 hvparams=None, diskparams=None, enabled_hypervisors=None,
506 modify_etc_hosts=True, modify_ssh_setup=True,
507 maintain_node_health=False, drbd_helper=None, uid_pool=None,
508 default_iallocator=None, primary_ip_version=None, ipolicy=None,
509 prealloc_wipe_disks=False, use_external_mip_script=False,
510 hv_state=None, disk_state=None, enabled_disk_templates=None):
511 """Initialise the cluster.
513 @type candidate_pool_size: int
514 @param candidate_pool_size: master candidate pool size
515 @type enabled_disk_templates: list of string
516 @param enabled_disk_templates: list of disk_templates to be used in this
520 # TODO: complete the docstring
521 if config.ConfigWriter.IsCluster():
522 raise errors.OpPrereqError("Cluster is already initialised",
525 if not enabled_hypervisors:
526 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
527 " least one member", errors.ECODE_INVAL)
528 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
530 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
531 " entries: %s" % invalid_hvs,
534 _InitCheckEnabledDiskTemplates(enabled_disk_templates)
537 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
538 except errors.ProgrammerError:
539 raise errors.OpPrereqError("Invalid primary ip version: %d." %
540 primary_ip_version, errors.ECODE_INVAL)
542 hostname = netutils.GetHostname(family=ipcls.family)
543 if not ipcls.IsValid(hostname.ip):
544 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
545 " address." % (hostname.ip, primary_ip_version),
548 if ipcls.IsLoopback(hostname.ip):
549 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
550 " address. Please fix DNS or %s." %
551 (hostname.ip, pathutils.ETC_HOSTS),
552 errors.ECODE_ENVIRON)
554 if not ipcls.Own(hostname.ip):
555 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
556 " to %s,\nbut this ip address does not"
557 " belong to this host" %
558 hostname.ip, errors.ECODE_ENVIRON)
560 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
562 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
563 raise errors.OpPrereqError("Cluster IP already active",
564 errors.ECODE_NOTUNIQUE)
567 if primary_ip_version == constants.IP6_VERSION:
568 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
569 " IPv4 address must be given as secondary",
571 secondary_ip = hostname.ip
573 if not netutils.IP4Address.IsValid(secondary_ip):
574 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
575 " IPv4 address." % secondary_ip,
578 if not netutils.IP4Address.Own(secondary_ip):
579 raise errors.OpPrereqError("You gave %s as secondary IP,"
580 " but it does not belong to this host." %
581 secondary_ip, errors.ECODE_ENVIRON)
583 if master_netmask is not None:
584 if not ipcls.ValidateNetmask(master_netmask):
585 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
586 (master_netmask, primary_ip_version),
589 master_netmask = ipcls.iplen
592 # Check if volume group is valid
593 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
594 constants.MIN_VG_SIZE)
596 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
598 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
599 _InitCheckDrbdHelper(drbd_helper, drbd_enabled)
601 logging.debug("Stopping daemons (if any are running)")
602 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
604 raise errors.OpExecError("Could not stop daemons, command %s"
605 " had exitcode %s and error '%s'" %
606 (result.cmd, result.exit_code, result.output))
608 file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
610 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
611 shared_file_storage_dir)
613 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
614 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
617 if not nicparams.get('mode', None) == "openvswitch":
618 # Do not do this check if mode=openvswitch, since the openvswitch is not
620 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
622 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
624 result.output.strip()), errors.ECODE_INVAL)
626 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
627 utils.EnsureDirs(dirs)
629 objects.UpgradeBeParams(beparams)
630 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
631 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
633 objects.NIC.CheckParameterSyntax(nicparams)
635 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
636 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
638 if ndparams is not None:
639 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
641 ndparams = dict(constants.NDC_DEFAULTS)
643 # This is ugly, as we modify the dict itself
644 # FIXME: Make utils.ForceDictType pure functional or write a wrapper
647 for hvname, hvs_data in hv_state.items():
648 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
649 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
651 hv_state = dict((hvname, constants.HVST_DEFAULTS)
652 for hvname in enabled_hypervisors)
654 # FIXME: disk_state has no default values yet
656 for storage, ds_data in disk_state.items():
657 if storage not in constants.DS_VALID_TYPES:
658 raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
659 storage, errors.ECODE_INVAL)
660 for ds_name, state in ds_data.items():
661 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
662 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
664 # hvparams is a mapping of hypervisor->hvparams dict
665 for hv_name, hv_params in hvparams.iteritems():
666 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
667 hv_class = hypervisor.GetHypervisor(hv_name)
668 hv_class.CheckParameterSyntax(hv_params)
670 # diskparams is a mapping of disk-template->diskparams dict
671 for template, dt_params in diskparams.items():
672 param_keys = set(dt_params.keys())
673 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
674 if not (param_keys <= default_param_keys):
675 unknown_params = param_keys - default_param_keys
676 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
678 utils.CommaJoin(unknown_params)),
680 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
681 if template == constants.DT_DRBD8 and vg_name is not None:
682 # The default METAVG value is equal to the VG name set at init time,
684 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
687 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
688 except errors.OpPrereqError, err:
689 raise errors.OpPrereqError("While verify diskparam options: %s" % err,
692 # set up ssh config and /etc/hosts
695 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
696 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
697 rsa_sshkey = sshline.split(" ")[1]
698 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
699 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
700 dsa_sshkey = sshline.split(" ")[1]
701 if not rsa_sshkey and not dsa_sshkey:
702 raise errors.OpPrereqError("Failed to find SSH public keys",
703 errors.ECODE_ENVIRON)
706 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
711 if default_iallocator is not None:
712 alloc_script = utils.FindFile(default_iallocator,
713 constants.IALLOCATOR_SEARCH_PATH,
715 if alloc_script is None:
716 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
717 " specified" % default_iallocator,
719 elif constants.HTOOLS:
720 # htools was enabled at build-time, we default to it
721 if utils.FindFile(constants.IALLOC_HAIL,
722 constants.IALLOCATOR_SEARCH_PATH,
724 default_iallocator = constants.IALLOC_HAIL
728 # init of cluster config file
729 cluster_config = objects.Cluster(
731 rsahostkeypub=rsa_sshkey,
732 dsahostkeypub=dsa_sshkey,
733 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
734 mac_prefix=mac_prefix,
735 volume_group_name=vg_name,
736 tcpudp_port_pool=set(),
737 master_ip=clustername.ip,
738 master_netmask=master_netmask,
739 master_netdev=master_netdev,
740 cluster_name=clustername.name,
741 file_storage_dir=file_storage_dir,
742 shared_file_storage_dir=shared_file_storage_dir,
743 enabled_hypervisors=enabled_hypervisors,
744 beparams={constants.PP_DEFAULT: beparams},
745 nicparams={constants.PP_DEFAULT: nicparams},
748 diskparams=diskparams,
749 candidate_pool_size=candidate_pool_size,
750 modify_etc_hosts=modify_etc_hosts,
751 modify_ssh_setup=modify_ssh_setup,
755 maintain_node_health=maintain_node_health,
756 drbd_usermode_helper=drbd_helper,
757 default_iallocator=default_iallocator,
758 primary_ip_family=ipcls.family,
759 prealloc_wipe_disks=prealloc_wipe_disks,
760 use_external_mip_script=use_external_mip_script,
761 ipolicy=full_ipolicy,
762 hv_state_static=hv_state,
763 disk_state_static=disk_state,
764 enabled_disk_templates=enabled_disk_templates,
766 master_node_config = objects.Node(name=hostname.name,
767 primary_ip=hostname.ip,
768 secondary_ip=secondary_ip,
770 master_candidate=True,
771 offline=False, drained=False,
772 ctime=now, mtime=now,
774 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
775 cfg = config.ConfigWriter(offline=True)
776 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
777 cfg.Update(cfg.GetClusterInfo(), logging.error)
778 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
780 # set up the inter-node password and certificate
781 _InitGanetiServerSetup(hostname.name)
783 logging.debug("Starting daemons")
784 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
786 raise errors.OpExecError("Could not start daemons, command %s"
787 " had exitcode %s and error %s" %
788 (result.cmd, result.exit_code, result.output))
790 _WaitForMasterDaemon()
793 def InitConfig(version, cluster_config, master_node_config,
794 cfg_file=pathutils.CLUSTER_CONF_FILE):
795 """Create the initial cluster configuration.
797 It will contain the current node, which will also be the master
798 node, and no instances.
801 @param version: configuration version
802 @type cluster_config: L{objects.Cluster}
803 @param cluster_config: cluster configuration
804 @type master_node_config: L{objects.Node}
805 @param master_node_config: master node configuration
806 @type cfg_file: string
807 @param cfg_file: configuration file path
810 uuid_generator = config.TemporaryReservationManager()
811 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
813 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
815 cluster_config.master_node = master_node_config.uuid
817 master_node_config.uuid: master_node_config,
819 default_nodegroup = objects.NodeGroup(
820 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
821 name=constants.INITIAL_NODE_GROUP_NAME,
822 members=[master_node_config.uuid],
826 default_nodegroup.uuid: default_nodegroup,
829 config_data = objects.ConfigData(version=version,
830 cluster=cluster_config,
831 nodegroups=nodegroups,
836 ctime=now, mtime=now)
837 utils.WriteFile(cfg_file,
838 data=serializer.Dump(config_data.ToDict()),
842 def FinalizeClusterDestroy(master_uuid):
843 """Execute the last steps of cluster destroy
845 This function shuts down all the daemons, completing the destroy
846 begun in cmdlib.LUDestroyOpcode.
849 cfg = config.ConfigWriter()
850 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
851 runner = rpc.BootstrapRunner()
853 master_name = cfg.GetNodeName(master_uuid)
855 master_params = cfg.GetMasterNetworkParameters()
856 master_params.uuid = master_uuid
857 ems = cfg.GetUseExternalMipScript()
858 result = runner.call_node_deactivate_master_ip(master_name, master_params,
861 msg = result.fail_msg
863 logging.warning("Could not disable the master IP: %s", msg)
865 result = runner.call_node_stop_master(master_name)
866 msg = result.fail_msg
868 logging.warning("Could not disable the master role: %s", msg)
870 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
871 msg = result.fail_msg
873 logging.warning("Could not shutdown the node daemon and cleanup"
874 " the node: %s", msg)
877 def SetupNodeDaemon(opts, cluster_name, node):
878 """Add a node to the cluster.
880 This function must be called before the actual opcode, and will ssh
881 to the remote node, copy the needed files, and start ganeti-noded,
882 allowing the master to do the rest via normal rpc calls.
884 @param cluster_name: the cluster name
885 @param node: the name of the new node
889 constants.NDS_CLUSTER_NAME: cluster_name,
890 constants.NDS_NODE_DAEMON_CERTIFICATE:
891 utils.ReadFile(pathutils.NODED_CERT_FILE),
892 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
893 constants.NDS_START_NODE_DAEMON: True,
896 RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
897 opts.debug, opts.verbose,
898 True, opts.ssh_key_check, opts.ssh_key_check, data)
900 _WaitForNodeDaemon(node)
903 def MasterFailover(no_voting=False):
904 """Failover the master node.
906 This checks that we are not already the master, and will cause the
907 current master to cease being master, and the non-master to become
910 @type no_voting: boolean
911 @param no_voting: force the operation without remote nodes agreement
915 sstore = ssconf.SimpleStore()
917 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
918 node_names = sstore.GetNodeList()
919 mc_list = sstore.GetMasterCandidates()
921 if old_master == new_master:
922 raise errors.OpPrereqError("This commands must be run on the node"
923 " where you want the new master to be."
924 " %s is already the master" %
925 old_master, errors.ECODE_INVAL)
927 if new_master not in mc_list:
928 mc_no_master = [name for name in mc_list if name != old_master]
929 raise errors.OpPrereqError("This node is not among the nodes marked"
930 " as master candidates. Only these nodes"
931 " can become masters. Current list of"
932 " master candidates is:\n"
933 "%s" % ("\n".join(mc_no_master)),
937 vote_list = GatherMasterVotes(node_names)
940 voted_master = vote_list[0][0]
941 if voted_master is None:
942 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
943 " not respond.", errors.ECODE_ENVIRON)
944 elif voted_master != old_master:
945 raise errors.OpPrereqError("I have a wrong configuration, I believe"
946 " the master is %s but the other nodes"
947 " voted %s. Please resync the configuration"
949 (old_master, voted_master),
955 logging.info("Setting master to %s, old master: %s", new_master, old_master)
958 # instantiate a real config writer, as we now know we have the
960 cfg = config.ConfigWriter(accept_foreign=True)
962 old_master_node = cfg.GetNodeInfoByName(old_master)
963 if old_master_node is None:
964 raise errors.OpPrereqError("Could not find old master node '%s' in"
965 " cluster configuration." % old_master,
968 cluster_info = cfg.GetClusterInfo()
969 new_master_node = cfg.GetNodeInfoByName(new_master)
970 if new_master_node is None:
971 raise errors.OpPrereqError("Could not find new master node '%s' in"
972 " cluster configuration." % new_master,
975 cluster_info.master_node = new_master_node.uuid
976 # this will also regenerate the ssconf files, since we updated the
978 cfg.Update(cluster_info, logging.error)
979 except errors.ConfigurationError, err:
980 logging.error("Error while trying to set the new master: %s",
984 # if cfg.Update worked, then it means the old master daemon won't be
985 # able now to write its own config file (we rely on locking in both
986 # backend.UploadFile() and ConfigWriter._Write(); hence the next
987 # step is to kill the old master
989 logging.info("Stopping the master daemon on node %s", old_master)
991 runner = rpc.BootstrapRunner()
992 master_params = cfg.GetMasterNetworkParameters()
993 master_params.uuid = old_master_node.uuid
994 ems = cfg.GetUseExternalMipScript()
995 result = runner.call_node_deactivate_master_ip(old_master,
998 msg = result.fail_msg
1000 logging.warning("Could not disable the master IP: %s", msg)
1002 result = runner.call_node_stop_master(old_master)
1003 msg = result.fail_msg
1005 logging.error("Could not disable the master role on the old master"
1006 " %s, please disable manually: %s", old_master, msg)
1008 logging.info("Checking master IP non-reachability...")
1010 master_ip = sstore.GetMasterIP()
1013 # Here we have a phase where no master should be running
1015 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
1016 raise utils.RetryAgain()
1019 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
1020 except utils.RetryTimeout:
1021 logging.warning("The master IP is still reachable after %s seconds,"
1022 " continuing but activating the master on the current"
1023 " node will probably fail", total_timeout)
1025 if jstore.CheckDrainFlag():
1026 logging.info("Undraining job queue")
1027 jstore.SetDrainFlag(False)
1029 logging.info("Starting the master daemons on the new master")
1031 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1033 msg = result.fail_msg
1035 logging.error("Could not start the master role on the new master"
1036 " %s, please check: %s", new_master, msg)
1039 logging.info("Master failed over from %s to %s", old_master, new_master)
1044 """Returns the current master node.
1046 This is a separate function in bootstrap since it's needed by
1047 gnt-cluster, and instead of importing directly ssconf, it's better
1048 to abstract it in bootstrap, where we do use ssconf in other
1052 sstore = ssconf.SimpleStore()
1054 old_master, _ = ssconf.GetMasterAndMyself(sstore)
1059 def GatherMasterVotes(node_names):
1060 """Check the agreement on who is the master.
1062 This function will return a list of (node, number of votes), ordered
1063 by the number of votes. Errors will be denoted by the key 'None'.
1065 Note that the sum of votes is the number of nodes this machine
1066 knows, whereas the number of entries in the list could be different
1067 (if some nodes vote for another master).
1069 We remove ourselves from the list since we know that (bugs aside)
1070 since we use the same source for configuration information for both
1071 backend and boostrap, we'll always vote for ourselves.
1073 @type node_names: list
1074 @param node_names: the list of nodes to query for master info; the current
1075 node will be removed if it is in the list
1077 @return: list of (node, votes)
1080 myself = netutils.Hostname.GetSysName()
1082 node_names.remove(myself)
1086 # no nodes left (eventually after removing myself)
1088 results = rpc.BootstrapRunner().call_master_info(node_names)
1089 if not isinstance(results, dict):
1090 # this should not happen (unless internal error in rpc)
1091 logging.critical("Can't complete rpc call, aborting master startup")
1092 return [(None, len(node_names))]
1094 for node_name in results:
1095 nres = results[node_name]
1100 logging.warning("Error contacting node %s: %s", node_name, msg)
1102 # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
1103 # and data[4] is the master netmask)
1104 elif not isinstance(data, (tuple, list)) or len(data) < 3:
1105 logging.warning("Invalid data received from node %s: %s",
1109 if None not in votes:
1113 master_node = data[2]
1114 if master_node not in votes:
1115 votes[master_node] = 0
1116 votes[master_node] += 1
1118 vote_list = [v for v in votes.items()]
1119 # sort first on number of votes then on name, since we want None
1120 # sorted later if we have the half of the nodes not responding, and
1121 # half voting all for the same master
1122 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)