4 # Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Functions to bootstrap a new cluster.
33 from ganeti.cmdlib import cluster
34 from ganeti import rpc
35 from ganeti import ssh
36 from ganeti import utils
37 from ganeti import errors
38 from ganeti import config
39 from ganeti import constants
40 from ganeti import objects
41 from ganeti import ssconf
42 from ganeti import serializer
43 from ganeti import hypervisor
44 from ganeti.storage import drbd
45 from ganeti.storage import filestorage
46 from ganeti import netutils
47 from ganeti import luxi
48 from ganeti import jstore
49 from ganeti import pathutils
52 # ec_id for InitConfig's temporary reservation manager
53 _INITCONF_ECID = "initconfig-ecid"
55 #: After how many seconds daemon must be responsive
56 _DAEMON_READY_TIMEOUT = 10.0
60 """Setup the SSH configuration for the cluster.
62 This generates a dsa keypair for root, adds the pub key to the
63 permitted hosts and adds the hostkey to its own known hosts.
66 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
68 for name in priv_key, pub_key:
69 if os.path.exists(name):
70 utils.CreateBackup(name)
71 utils.RemoveFile(name)
73 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
77 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
80 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
83 def GenerateHmacKey(file_name):
84 """Writes a new HMAC key.
87 @param file_name: Path to output file
90 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
94 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
95 new_confd_hmac_key, new_cds,
96 rapi_cert_pem=None, spice_cert_pem=None,
97 spice_cacert_pem=None, cds=None,
98 nodecert_file=pathutils.NODED_CERT_FILE,
99 rapicert_file=pathutils.RAPI_CERT_FILE,
100 spicecert_file=pathutils.SPICE_CERT_FILE,
101 spicecacert_file=pathutils.SPICE_CACERT_FILE,
102 hmackey_file=pathutils.CONFD_HMAC_KEY,
103 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
104 """Updates the cluster certificates, keys and secrets.
106 @type new_cluster_cert: bool
107 @param new_cluster_cert: Whether to generate a new cluster certificate
108 @type new_rapi_cert: bool
109 @param new_rapi_cert: Whether to generate a new RAPI certificate
110 @type new_spice_cert: bool
111 @param new_spice_cert: Whether to generate a new SPICE certificate
112 @type new_confd_hmac_key: bool
113 @param new_confd_hmac_key: Whether to generate a new HMAC key
115 @param new_cds: Whether to generate a new cluster domain secret
116 @type rapi_cert_pem: string
117 @param rapi_cert_pem: New RAPI certificate in PEM format
118 @type spice_cert_pem: string
119 @param spice_cert_pem: New SPICE certificate in PEM format
120 @type spice_cacert_pem: string
121 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
122 certificate, in PEM format
124 @param cds: New cluster domain secret
125 @type nodecert_file: string
126 @param nodecert_file: optional override of the node cert file path
127 @type rapicert_file: string
128 @param rapicert_file: optional override of the rapi cert file path
129 @type spicecert_file: string
130 @param spicecert_file: optional override of the spice cert file path
131 @type spicecacert_file: string
132 @param spicecacert_file: optional override of the spice CA cert file path
133 @type hmackey_file: string
134 @param hmackey_file: optional override of the hmac key file path
137 # noded SSL certificate
138 cluster_cert_exists = os.path.exists(nodecert_file)
139 if new_cluster_cert or not cluster_cert_exists:
140 if cluster_cert_exists:
141 utils.CreateBackup(nodecert_file)
143 logging.debug("Generating new cluster certificate at %s", nodecert_file)
144 utils.GenerateSelfSignedSslCert(nodecert_file)
147 if new_confd_hmac_key or not os.path.exists(hmackey_file):
148 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
149 GenerateHmacKey(hmackey_file)
152 rapi_cert_exists = os.path.exists(rapicert_file)
155 # Assume rapi_pem contains a valid PEM-formatted certificate and key
156 logging.debug("Writing RAPI certificate at %s", rapicert_file)
157 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
159 elif new_rapi_cert or not rapi_cert_exists:
161 utils.CreateBackup(rapicert_file)
163 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
164 utils.GenerateSelfSignedSslCert(rapicert_file)
167 spice_cert_exists = os.path.exists(spicecert_file)
168 spice_cacert_exists = os.path.exists(spicecacert_file)
170 # spice_cert_pem implies also spice_cacert_pem
171 logging.debug("Writing SPICE certificate at %s", spicecert_file)
172 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
173 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
174 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
175 elif new_spice_cert or not spice_cert_exists:
176 if spice_cert_exists:
177 utils.CreateBackup(spicecert_file)
178 if spice_cacert_exists:
179 utils.CreateBackup(spicecacert_file)
181 logging.debug("Generating new self-signed SPICE certificate at %s",
183 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
185 # Self-signed certificate -> the public certificate is also the CA public
187 logging.debug("Writing the public certificate to %s",
189 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
191 # Cluster domain secret
193 logging.debug("Writing cluster domain secret to %s", cds_file)
194 utils.WriteFile(cds_file, data=cds, backup=True)
196 elif new_cds or not os.path.exists(cds_file):
197 logging.debug("Generating new cluster domain secret at %s", cds_file)
198 GenerateHmacKey(cds_file)
201 def _InitGanetiServerSetup(master_name):
202 """Setup the necessary configuration for the initial node daemon.
204 This creates the nodepass file containing the shared password for
205 the cluster, generates the SSL certificate and starts the node daemon.
207 @type master_name: str
208 @param master_name: Name of the master node
211 # Generate cluster secrets
212 GenerateClusterCrypto(True, False, False, False, False)
214 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
216 raise errors.OpExecError("Could not start the node daemon, command %s"
217 " had exitcode %s and error %s" %
218 (result.cmd, result.exit_code, result.output))
220 _WaitForNodeDaemon(master_name)
223 def _WaitForNodeDaemon(node_name):
224 """Wait for node daemon to become responsive.
227 def _CheckNodeDaemon():
228 # Pylint bug <http://www.logilab.org/ticket/35642>
229 # pylint: disable=E1101
230 result = rpc.BootstrapRunner().call_version([node_name])[node_name]
232 raise utils.RetryAgain()
235 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
236 except utils.RetryTimeout:
237 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
238 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
241 def _WaitForMasterDaemon():
242 """Wait for master daemon to become responsive.
245 def _CheckMasterDaemon():
248 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
250 raise utils.RetryAgain()
252 logging.debug("Received cluster name %s from master", cluster_name)
255 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
256 except utils.RetryTimeout:
257 raise errors.OpExecError("Master daemon didn't answer queries within"
258 " %s seconds" % _DAEMON_READY_TIMEOUT)
261 def _WaitForSshDaemon(hostname, port, family):
262 """Wait for SSH daemon to become responsive.
265 hostip = netutils.GetHostname(name=hostname, family=family).ip
267 def _CheckSshDaemon():
268 if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
269 logging.debug("SSH daemon on %s:%s (IP address %s) has become"
270 " responsive", hostname, port, hostip)
272 raise utils.RetryAgain()
275 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
276 except utils.RetryTimeout:
277 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
278 " become responsive within %s seconds" %
279 (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
282 def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
283 use_cluster_key, ask_key, strict_host_check, data):
284 """Runs a command to configure something on a remote machine.
286 @type cluster_name: string
287 @param cluster_name: Cluster name
289 @param node: Node name
290 @type basecmd: string
291 @param basecmd: Base command (path on the remote machine)
293 @param debug: Enable debug output
295 @param verbose: Enable verbose output
296 @type use_cluster_key: bool
297 @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
299 @param ask_key: See L{ssh.SshRunner.BuildCmd}
300 @type strict_host_check: bool
301 @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
302 @param data: JSON-serializable input data for script (passed to stdin)
307 # Pass --debug/--verbose to the external script if set on our invocation
309 cmd.append("--debug")
312 cmd.append("--verbose")
314 family = ssconf.SimpleStore().GetPrimaryIPFamily()
315 srun = ssh.SshRunner(cluster_name,
316 ipv6=(family == netutils.IP6Address.family))
317 scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER,
318 utils.ShellQuoteArgs(cmd),
319 batch=False, ask_key=ask_key, quiet=False,
320 strict_host_check=strict_host_check,
321 use_cluster_key=use_cluster_key)
323 tempfh = tempfile.TemporaryFile()
325 tempfh.write(serializer.DumpJson(data))
328 result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
333 raise errors.OpExecError("Command '%s' failed: %s" %
334 (result.cmd, result.fail_reason))
336 _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
339 def _InitFileStorageDir(file_storage_dir):
340 """Initialize if needed the file storage.
342 @param file_storage_dir: the user-supplied value
343 @return: either empty string (if file storage was disabled at build
344 time) or the normalized path to the storage directory
347 file_storage_dir = os.path.normpath(file_storage_dir)
349 if not os.path.isabs(file_storage_dir):
350 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
351 " path" % file_storage_dir, errors.ECODE_INVAL)
353 if not os.path.exists(file_storage_dir):
355 os.makedirs(file_storage_dir, 0750)
357 raise errors.OpPrereqError("Cannot create file storage directory"
358 " '%s': %s" % (file_storage_dir, err),
359 errors.ECODE_ENVIRON)
361 if not os.path.isdir(file_storage_dir):
362 raise errors.OpPrereqError("The file storage directory '%s' is not"
363 " a directory." % file_storage_dir,
364 errors.ECODE_ENVIRON)
366 return file_storage_dir
369 def _PrepareFileBasedStorage(
370 enabled_disk_templates, file_storage_dir,
371 default_dir, file_disk_template,
372 init_fn=_InitFileStorageDir, acceptance_fn=None):
373 """Checks if a file-base storage type is enabled and inits the dir.
375 @type enabled_disk_templates: list of string
376 @param enabled_disk_templates: list of enabled disk templates
377 @type file_storage_dir: string
378 @param file_storage_dir: the file storage directory
379 @type default_dir: string
380 @param default_dir: default file storage directory when C{file_storage_dir}
382 @type file_disk_template: string
383 @param file_disk_template: a disk template whose storage type is 'ST_FILE'
385 @returns: the name of the actual file storage directory
388 assert (file_disk_template in
389 utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
390 if file_storage_dir is None:
391 file_storage_dir = default_dir
392 if not acceptance_fn:
394 lambda path: filestorage.CheckFileStoragePathAcceptance(
395 path, exact_match_ok=True)
397 cluster.CheckFileStoragePathVsEnabledDiskTemplates(
398 logging.warning, file_storage_dir, enabled_disk_templates)
400 file_storage_enabled = file_disk_template in enabled_disk_templates
401 if file_storage_enabled:
403 acceptance_fn(file_storage_dir)
404 except errors.FileStoragePathError as e:
405 raise errors.OpPrereqError(str(e))
406 result_file_storage_dir = init_fn(file_storage_dir)
408 result_file_storage_dir = file_storage_dir
409 return result_file_storage_dir
412 def _PrepareFileStorage(
413 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
415 """Checks if file storage is enabled and inits the dir.
417 @see: C{_PrepareFileBasedStorage}
420 return _PrepareFileBasedStorage(
421 enabled_disk_templates, file_storage_dir,
422 pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE,
423 init_fn=init_fn, acceptance_fn=acceptance_fn)
426 def _PrepareSharedFileStorage(
427 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
429 """Checks if shared file storage is enabled and inits the dir.
431 @see: C{_PrepareFileBasedStorage}
434 return _PrepareFileBasedStorage(
435 enabled_disk_templates, file_storage_dir,
436 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE,
437 init_fn=init_fn, acceptance_fn=acceptance_fn)
440 def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
441 """Checks the sanity of the enabled disk templates.
444 if not enabled_disk_templates:
445 raise errors.OpPrereqError("Enabled disk templates list must contain at"
446 " least one member", errors.ECODE_INVAL)
447 invalid_disk_templates = \
448 set(enabled_disk_templates) - constants.DISK_TEMPLATES
449 if invalid_disk_templates:
450 raise errors.OpPrereqError("Enabled disk templates list contains invalid"
451 " entries: %s" % invalid_disk_templates,
455 def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
456 """Restricts the ipolicy's disk templates to the enabled ones.
458 This function clears the ipolicy's list of allowed disk templates from the
459 ones that are not enabled by the cluster.
462 @param ipolicy: the instance policy
463 @type enabled_disk_templates: list of string
464 @param enabled_disk_templates: the list of cluster-wide enabled disk
468 assert constants.IPOLICY_DTS in ipolicy
469 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
470 restricted_disk_templates = list(set(allowed_disk_templates)
471 .intersection(set(enabled_disk_templates)))
472 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
475 def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
476 master_netmask, master_netdev, file_storage_dir,
477 shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
478 vg_name=None, beparams=None, nicparams=None, ndparams=None,
479 hvparams=None, diskparams=None, enabled_hypervisors=None,
480 modify_etc_hosts=True, modify_ssh_setup=True,
481 maintain_node_health=False, drbd_helper=None, uid_pool=None,
482 default_iallocator=None, primary_ip_version=None, ipolicy=None,
483 prealloc_wipe_disks=False, use_external_mip_script=False,
484 hv_state=None, disk_state=None, enabled_disk_templates=None):
485 """Initialise the cluster.
487 @type candidate_pool_size: int
488 @param candidate_pool_size: master candidate pool size
489 @type enabled_disk_templates: list of string
490 @param enabled_disk_templates: list of disk_templates to be used in this
494 # TODO: complete the docstring
495 if config.ConfigWriter.IsCluster():
496 raise errors.OpPrereqError("Cluster is already initialised",
499 if not enabled_hypervisors:
500 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
501 " least one member", errors.ECODE_INVAL)
502 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
504 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
505 " entries: %s" % invalid_hvs,
508 _InitCheckEnabledDiskTemplates(enabled_disk_templates)
511 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
512 except errors.ProgrammerError:
513 raise errors.OpPrereqError("Invalid primary ip version: %d." %
514 primary_ip_version, errors.ECODE_INVAL)
516 hostname = netutils.GetHostname(family=ipcls.family)
517 if not ipcls.IsValid(hostname.ip):
518 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
519 " address." % (hostname.ip, primary_ip_version),
522 if ipcls.IsLoopback(hostname.ip):
523 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
524 " address. Please fix DNS or %s." %
525 (hostname.ip, pathutils.ETC_HOSTS),
526 errors.ECODE_ENVIRON)
528 if not ipcls.Own(hostname.ip):
529 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
530 " to %s,\nbut this ip address does not"
531 " belong to this host" %
532 hostname.ip, errors.ECODE_ENVIRON)
534 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
536 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
537 raise errors.OpPrereqError("Cluster IP already active",
538 errors.ECODE_NOTUNIQUE)
541 if primary_ip_version == constants.IP6_VERSION:
542 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
543 " IPv4 address must be given as secondary",
545 secondary_ip = hostname.ip
547 if not netutils.IP4Address.IsValid(secondary_ip):
548 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
549 " IPv4 address." % secondary_ip,
552 if not netutils.IP4Address.Own(secondary_ip):
553 raise errors.OpPrereqError("You gave %s as secondary IP,"
554 " but it does not belong to this host." %
555 secondary_ip, errors.ECODE_ENVIRON)
557 if master_netmask is not None:
558 if not ipcls.ValidateNetmask(master_netmask):
559 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
560 (master_netmask, primary_ip_version),
563 master_netmask = ipcls.iplen
566 # Check if volume group is valid
567 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
568 constants.MIN_VG_SIZE)
570 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
572 if drbd_helper is not None:
574 curr_helper = drbd.DRBD8.GetUsermodeHelper()
575 except errors.BlockDeviceError, err:
576 raise errors.OpPrereqError("Error while checking drbd helper"
577 " (specify --no-drbd-storage if you are not"
578 " using drbd): %s" % str(err),
579 errors.ECODE_ENVIRON)
580 if drbd_helper != curr_helper:
581 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
582 " is the current helper" % (drbd_helper,
586 logging.debug("Stopping daemons (if any are running)")
587 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
589 raise errors.OpExecError("Could not stop daemons, command %s"
590 " had exitcode %s and error '%s'" %
591 (result.cmd, result.exit_code, result.output))
593 file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
595 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
596 shared_file_storage_dir)
598 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
599 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
602 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
604 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
606 result.output.strip()), errors.ECODE_INVAL)
608 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
609 utils.EnsureDirs(dirs)
611 objects.UpgradeBeParams(beparams)
612 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
613 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
615 objects.NIC.CheckParameterSyntax(nicparams)
617 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
618 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
620 if ndparams is not None:
621 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
623 ndparams = dict(constants.NDC_DEFAULTS)
625 # This is ugly, as we modify the dict itself
626 # FIXME: Make utils.ForceDictType pure functional or write a wrapper
629 for hvname, hvs_data in hv_state.items():
630 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
631 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
633 hv_state = dict((hvname, constants.HVST_DEFAULTS)
634 for hvname in enabled_hypervisors)
636 # FIXME: disk_state has no default values yet
638 for storage, ds_data in disk_state.items():
639 if storage not in constants.DS_VALID_TYPES:
640 raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
641 storage, errors.ECODE_INVAL)
642 for ds_name, state in ds_data.items():
643 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
644 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
646 # hvparams is a mapping of hypervisor->hvparams dict
647 for hv_name, hv_params in hvparams.iteritems():
648 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
649 hv_class = hypervisor.GetHypervisor(hv_name)
650 hv_class.CheckParameterSyntax(hv_params)
652 # diskparams is a mapping of disk-template->diskparams dict
653 for template, dt_params in diskparams.items():
654 param_keys = set(dt_params.keys())
655 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
656 if not (param_keys <= default_param_keys):
657 unknown_params = param_keys - default_param_keys
658 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
660 utils.CommaJoin(unknown_params)),
662 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
663 if template == constants.DT_DRBD8 and vg_name is not None:
664 # The default METAVG value is equal to the VG name set at init time,
666 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
669 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
670 except errors.OpPrereqError, err:
671 raise errors.OpPrereqError("While verify diskparam options: %s" % err,
674 # set up ssh config and /etc/hosts
677 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
678 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
679 rsa_sshkey = sshline.split(" ")[1]
680 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
681 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
682 dsa_sshkey = sshline.split(" ")[1]
683 if not rsa_sshkey and not dsa_sshkey:
684 raise errors.OpPrereqError("Failed to find SSH public keys",
685 errors.ECODE_ENVIRON)
688 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
693 if default_iallocator is not None:
694 alloc_script = utils.FindFile(default_iallocator,
695 constants.IALLOCATOR_SEARCH_PATH,
697 if alloc_script is None:
698 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
699 " specified" % default_iallocator,
701 elif constants.HTOOLS:
702 # htools was enabled at build-time, we default to it
703 if utils.FindFile(constants.IALLOC_HAIL,
704 constants.IALLOCATOR_SEARCH_PATH,
706 default_iallocator = constants.IALLOC_HAIL
710 # init of cluster config file
711 cluster_config = objects.Cluster(
713 rsahostkeypub=rsa_sshkey,
714 dsahostkeypub=dsa_sshkey,
715 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
716 mac_prefix=mac_prefix,
717 volume_group_name=vg_name,
718 tcpudp_port_pool=set(),
719 master_ip=clustername.ip,
720 master_netmask=master_netmask,
721 master_netdev=master_netdev,
722 cluster_name=clustername.name,
723 file_storage_dir=file_storage_dir,
724 shared_file_storage_dir=shared_file_storage_dir,
725 enabled_hypervisors=enabled_hypervisors,
726 beparams={constants.PP_DEFAULT: beparams},
727 nicparams={constants.PP_DEFAULT: nicparams},
730 diskparams=diskparams,
731 candidate_pool_size=candidate_pool_size,
732 modify_etc_hosts=modify_etc_hosts,
733 modify_ssh_setup=modify_ssh_setup,
737 maintain_node_health=maintain_node_health,
738 drbd_usermode_helper=drbd_helper,
739 default_iallocator=default_iallocator,
740 primary_ip_family=ipcls.family,
741 prealloc_wipe_disks=prealloc_wipe_disks,
742 use_external_mip_script=use_external_mip_script,
743 ipolicy=full_ipolicy,
744 hv_state_static=hv_state,
745 disk_state_static=disk_state,
746 enabled_disk_templates=enabled_disk_templates,
748 master_node_config = objects.Node(name=hostname.name,
749 primary_ip=hostname.ip,
750 secondary_ip=secondary_ip,
752 master_candidate=True,
753 offline=False, drained=False,
754 ctime=now, mtime=now,
756 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
757 cfg = config.ConfigWriter(offline=True)
758 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
759 cfg.Update(cfg.GetClusterInfo(), logging.error)
760 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
762 # set up the inter-node password and certificate
763 _InitGanetiServerSetup(hostname.name)
765 logging.debug("Starting daemons")
766 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
768 raise errors.OpExecError("Could not start daemons, command %s"
769 " had exitcode %s and error %s" %
770 (result.cmd, result.exit_code, result.output))
772 _WaitForMasterDaemon()
775 def InitConfig(version, cluster_config, master_node_config,
776 cfg_file=pathutils.CLUSTER_CONF_FILE):
777 """Create the initial cluster configuration.
779 It will contain the current node, which will also be the master
780 node, and no instances.
783 @param version: configuration version
784 @type cluster_config: L{objects.Cluster}
785 @param cluster_config: cluster configuration
786 @type master_node_config: L{objects.Node}
787 @param master_node_config: master node configuration
788 @type cfg_file: string
789 @param cfg_file: configuration file path
792 uuid_generator = config.TemporaryReservationManager()
793 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
795 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
797 cluster_config.master_node = master_node_config.uuid
799 master_node_config.uuid: master_node_config,
801 default_nodegroup = objects.NodeGroup(
802 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
803 name=constants.INITIAL_NODE_GROUP_NAME,
804 members=[master_node_config.uuid],
808 default_nodegroup.uuid: default_nodegroup,
811 config_data = objects.ConfigData(version=version,
812 cluster=cluster_config,
813 nodegroups=nodegroups,
818 ctime=now, mtime=now)
819 utils.WriteFile(cfg_file,
820 data=serializer.Dump(config_data.ToDict()),
824 def FinalizeClusterDestroy(master_uuid):
825 """Execute the last steps of cluster destroy
827 This function shuts down all the daemons, completing the destroy
828 begun in cmdlib.LUDestroyOpcode.
831 cfg = config.ConfigWriter()
832 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
833 runner = rpc.BootstrapRunner()
835 master_name = cfg.GetNodeName(master_uuid)
837 master_params = cfg.GetMasterNetworkParameters()
838 master_params.uuid = master_uuid
839 ems = cfg.GetUseExternalMipScript()
840 result = runner.call_node_deactivate_master_ip(master_name, master_params,
843 msg = result.fail_msg
845 logging.warning("Could not disable the master IP: %s", msg)
847 result = runner.call_node_stop_master(master_name)
848 msg = result.fail_msg
850 logging.warning("Could not disable the master role: %s", msg)
852 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
853 msg = result.fail_msg
855 logging.warning("Could not shutdown the node daemon and cleanup"
856 " the node: %s", msg)
859 def SetupNodeDaemon(opts, cluster_name, node):
860 """Add a node to the cluster.
862 This function must be called before the actual opcode, and will ssh
863 to the remote node, copy the needed files, and start ganeti-noded,
864 allowing the master to do the rest via normal rpc calls.
866 @param cluster_name: the cluster name
867 @param node: the name of the new node
871 constants.NDS_CLUSTER_NAME: cluster_name,
872 constants.NDS_NODE_DAEMON_CERTIFICATE:
873 utils.ReadFile(pathutils.NODED_CERT_FILE),
874 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
875 constants.NDS_START_NODE_DAEMON: True,
878 RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
879 opts.debug, opts.verbose,
880 True, opts.ssh_key_check, opts.ssh_key_check, data)
882 _WaitForNodeDaemon(node)
885 def MasterFailover(no_voting=False):
886 """Failover the master node.
888 This checks that we are not already the master, and will cause the
889 current master to cease being master, and the non-master to become
892 @type no_voting: boolean
893 @param no_voting: force the operation without remote nodes agreement
897 sstore = ssconf.SimpleStore()
899 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
900 node_names = sstore.GetNodeList()
901 mc_list = sstore.GetMasterCandidates()
903 if old_master == new_master:
904 raise errors.OpPrereqError("This commands must be run on the node"
905 " where you want the new master to be."
906 " %s is already the master" %
907 old_master, errors.ECODE_INVAL)
909 if new_master not in mc_list:
910 mc_no_master = [name for name in mc_list if name != old_master]
911 raise errors.OpPrereqError("This node is not among the nodes marked"
912 " as master candidates. Only these nodes"
913 " can become masters. Current list of"
914 " master candidates is:\n"
915 "%s" % ("\n".join(mc_no_master)),
919 vote_list = GatherMasterVotes(node_names)
922 voted_master = vote_list[0][0]
923 if voted_master is None:
924 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
925 " not respond.", errors.ECODE_ENVIRON)
926 elif voted_master != old_master:
927 raise errors.OpPrereqError("I have a wrong configuration, I believe"
928 " the master is %s but the other nodes"
929 " voted %s. Please resync the configuration"
931 (old_master, voted_master),
937 logging.info("Setting master to %s, old master: %s", new_master, old_master)
940 # instantiate a real config writer, as we now know we have the
942 cfg = config.ConfigWriter(accept_foreign=True)
944 old_master_node = cfg.GetNodeInfoByName(old_master)
945 if old_master_node is None:
946 raise errors.OpPrereqError("Could not find old master node '%s' in"
947 " cluster configuration." % old_master,
950 cluster_info = cfg.GetClusterInfo()
951 new_master_node = cfg.GetNodeInfoByName(new_master)
952 if new_master_node is None:
953 raise errors.OpPrereqError("Could not find new master node '%s' in"
954 " cluster configuration." % new_master,
957 cluster_info.master_node = new_master_node.uuid
958 # this will also regenerate the ssconf files, since we updated the
960 cfg.Update(cluster_info, logging.error)
961 except errors.ConfigurationError, err:
962 logging.error("Error while trying to set the new master: %s",
966 # if cfg.Update worked, then it means the old master daemon won't be
967 # able now to write its own config file (we rely on locking in both
968 # backend.UploadFile() and ConfigWriter._Write(); hence the next
969 # step is to kill the old master
971 logging.info("Stopping the master daemon on node %s", old_master)
973 runner = rpc.BootstrapRunner()
974 master_params = cfg.GetMasterNetworkParameters()
975 master_params.uuid = old_master_node.uuid
976 ems = cfg.GetUseExternalMipScript()
977 result = runner.call_node_deactivate_master_ip(old_master,
980 msg = result.fail_msg
982 logging.warning("Could not disable the master IP: %s", msg)
984 result = runner.call_node_stop_master(old_master)
985 msg = result.fail_msg
987 logging.error("Could not disable the master role on the old master"
988 " %s, please disable manually: %s", old_master, msg)
990 logging.info("Checking master IP non-reachability...")
992 master_ip = sstore.GetMasterIP()
995 # Here we have a phase where no master should be running
997 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
998 raise utils.RetryAgain()
1001 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
1002 except utils.RetryTimeout:
1003 logging.warning("The master IP is still reachable after %s seconds,"
1004 " continuing but activating the master on the current"
1005 " node will probably fail", total_timeout)
1007 if jstore.CheckDrainFlag():
1008 logging.info("Undraining job queue")
1009 jstore.SetDrainFlag(False)
1011 logging.info("Starting the master daemons on the new master")
1013 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1015 msg = result.fail_msg
1017 logging.error("Could not start the master role on the new master"
1018 " %s, please check: %s", new_master, msg)
1021 logging.info("Master failed over from %s to %s", old_master, new_master)
1026 """Returns the current master node.
1028 This is a separate function in bootstrap since it's needed by
1029 gnt-cluster, and instead of importing directly ssconf, it's better
1030 to abstract it in bootstrap, where we do use ssconf in other
1034 sstore = ssconf.SimpleStore()
1036 old_master, _ = ssconf.GetMasterAndMyself(sstore)
1041 def GatherMasterVotes(node_names):
1042 """Check the agreement on who is the master.
1044 This function will return a list of (node, number of votes), ordered
1045 by the number of votes. Errors will be denoted by the key 'None'.
1047 Note that the sum of votes is the number of nodes this machine
1048 knows, whereas the number of entries in the list could be different
1049 (if some nodes vote for another master).
1051 We remove ourselves from the list since we know that (bugs aside)
1052 since we use the same source for configuration information for both
1053 backend and boostrap, we'll always vote for ourselves.
1055 @type node_names: list
1056 @param node_names: the list of nodes to query for master info; the current
1057 node will be removed if it is in the list
1059 @return: list of (node, votes)
1062 myself = netutils.Hostname.GetSysName()
1064 node_names.remove(myself)
1068 # no nodes left (eventually after removing myself)
1070 results = rpc.BootstrapRunner().call_master_info(node_names)
1071 if not isinstance(results, dict):
1072 # this should not happen (unless internal error in rpc)
1073 logging.critical("Can't complete rpc call, aborting master startup")
1074 return [(None, len(node_names))]
1076 for node_name in results:
1077 nres = results[node_name]
1082 logging.warning("Error contacting node %s: %s", node_name, msg)
1084 # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
1085 # and data[4] is the master netmask)
1086 elif not isinstance(data, (tuple, list)) or len(data) < 3:
1087 logging.warning("Invalid data received from node %s: %s",
1091 if None not in votes:
1095 master_node = data[2]
1096 if master_node not in votes:
1097 votes[master_node] = 0
1098 votes[master_node] += 1
1100 vote_list = [v for v in votes.items()]
1101 # sort first on number of votes then on name, since we want None
1102 # sorted later if we have the half of the nodes not responding, and
1103 # half voting all for the same master
1104 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)