4 # Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Cluster related commands"""
23 # pylint: disable=W0401,W0613,W0614,C0103
24 # W0401: Wildcard import ganeti.cli
25 # W0613: Unused argument, since all functions follow the same API
26 # W0614: Unused import %s from wildcard import (since we need cli)
27 # C0103: Invalid name gnt-cluster
34 from ganeti.cli import *
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import utils
39 from ganeti import bootstrap
40 from ganeti import ssh
41 from ganeti import objects
42 from ganeti import uidpool
43 from ganeti import compat
44 from ganeti import netutils
47 ON_OPT = cli_option("--on", default=False,
48 action="store_true", dest="on",
49 help="Recover from an EPO")
51 GROUPS_OPT = cli_option("--groups", default=False,
52 action="store_true", dest="groups",
53 help="Arguments are node groups instead of nodes")
55 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
56 _EPO_PING_TIMEOUT = 1 # 1 second
57 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
61 def InitCluster(opts, args):
62 """Initialize the cluster.
64 @param opts: the command line options selected by the user
66 @param args: should contain only one element, the desired
69 @return: the desired exit code
72 if not opts.lvm_storage and opts.vg_name:
73 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
76 vg_name = opts.vg_name
77 if opts.lvm_storage and not opts.vg_name:
78 vg_name = constants.DEFAULT_VG
80 if not opts.drbd_storage and opts.drbd_helper:
81 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
84 drbd_helper = opts.drbd_helper
85 if opts.drbd_storage and not opts.drbd_helper:
86 drbd_helper = constants.DEFAULT_DRBD_HELPER
88 master_netdev = opts.master_netdev
89 if master_netdev is None:
90 master_netdev = constants.DEFAULT_BRIDGE
92 hvlist = opts.enabled_hypervisors
94 hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
95 hvlist = hvlist.split(",")
97 hvparams = dict(opts.hvparams)
98 beparams = opts.beparams
99 nicparams = opts.nicparams
101 # prepare beparams dict
102 beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
103 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
105 # prepare nicparams dict
106 nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
107 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
109 # prepare ndparams dict
110 if opts.ndparams is None:
111 ndparams = dict(constants.NDC_DEFAULTS)
113 ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
114 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
116 # prepare hvparams dict
117 for hv in constants.HYPER_TYPES:
118 if hv not in hvparams:
120 hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
121 utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
123 if opts.candidate_pool_size is None:
124 opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
126 if opts.mac_prefix is None:
127 opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
129 uid_pool = opts.uid_pool
130 if uid_pool is not None:
131 uid_pool = uidpool.ParseUidPool(uid_pool)
133 if opts.prealloc_wipe_disks is None:
134 opts.prealloc_wipe_disks = False
137 primary_ip_version = int(opts.primary_ip_version)
138 except (ValueError, TypeError), err:
139 ToStderr("Invalid primary ip version value: %s" % str(err))
142 master_netmask = opts.master_netmask
144 if master_netmask is not None:
145 master_netmask = int(master_netmask)
146 except (ValueError, TypeError), err:
147 ToStderr("Invalid master netmask value: %s" % str(err))
150 bootstrap.InitCluster(cluster_name=args[0],
151 secondary_ip=opts.secondary_ip,
153 mac_prefix=opts.mac_prefix,
154 master_netmask=master_netmask,
155 master_netdev=master_netdev,
156 file_storage_dir=opts.file_storage_dir,
157 shared_file_storage_dir=opts.shared_file_storage_dir,
158 enabled_hypervisors=hvlist,
163 candidate_pool_size=opts.candidate_pool_size,
164 modify_etc_hosts=opts.modify_etc_hosts,
165 modify_ssh_setup=opts.modify_ssh_setup,
166 maintain_node_health=opts.maintain_node_health,
167 drbd_helper=drbd_helper,
169 default_iallocator=opts.default_iallocator,
170 primary_ip_version=primary_ip_version,
171 prealloc_wipe_disks=opts.prealloc_wipe_disks,
173 op = opcodes.OpClusterPostInit()
174 SubmitOpCode(op, opts=opts)
179 def DestroyCluster(opts, args):
180 """Destroy the cluster.
182 @param opts: the command line options selected by the user
184 @param args: should be an empty list
186 @return: the desired exit code
189 if not opts.yes_do_it:
190 ToStderr("Destroying a cluster is irreversible. If you really want"
191 " destroy this cluster, supply the --yes-do-it option.")
194 op = opcodes.OpClusterDestroy()
195 master = SubmitOpCode(op, opts=opts)
196 # if we reached this, the opcode didn't fail; we can proceed to
197 # shutdown all the daemons
198 bootstrap.FinalizeClusterDestroy(master)
202 def RenameCluster(opts, args):
203 """Rename the cluster.
205 @param opts: the command line options selected by the user
207 @param args: should contain only one element, the new cluster name
209 @return: the desired exit code
214 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
218 usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
219 " connected over the network to the cluster name, the"
220 " operation is very dangerous as the IP address will be"
221 " removed from the node and the change may not go through."
222 " Continue?") % (cluster_name, new_name)
223 if not AskUser(usertext):
226 op = opcodes.OpClusterRename(name=new_name)
227 result = SubmitOpCode(op, opts=opts, cl=cl)
230 ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
235 def ActivateMasterIp(opts, args):
236 """Activates the master IP.
239 op = opcodes.OpClusterActivateMasterIp()
244 def DeactivateMasterIp(opts, args):
245 """Deactivates the master IP.
249 usertext = ("This will disable the master IP. All the open connections to"
250 " the master IP will be closed. To reach the master you will"
251 " need to use its node IP."
253 if not AskUser(usertext):
256 op = opcodes.OpClusterDeactivateMasterIp()
261 def RedistributeConfig(opts, args):
262 """Forces push of the cluster configuration.
264 @param opts: the command line options selected by the user
266 @param args: empty list
268 @return: the desired exit code
271 op = opcodes.OpClusterRedistConf()
272 SubmitOrSend(op, opts)
276 def ShowClusterVersion(opts, args):
277 """Write version of ganeti software to the standard output.
279 @param opts: the command line options selected by the user
281 @param args: should be an empty list
283 @return: the desired exit code
287 result = cl.QueryClusterInfo()
288 ToStdout("Software version: %s", result["software_version"])
289 ToStdout("Internode protocol: %s", result["protocol_version"])
290 ToStdout("Configuration format: %s", result["config_version"])
291 ToStdout("OS api version: %s", result["os_api_version"])
292 ToStdout("Export interface: %s", result["export_version"])
296 def ShowClusterMaster(opts, args):
297 """Write name of master node to the standard output.
299 @param opts: the command line options selected by the user
301 @param args: should be an empty list
303 @return: the desired exit code
306 master = bootstrap.GetMaster()
311 def _PrintGroupedParams(paramsdict, level=1, roman=False):
312 """Print Grouped parameters (be, nic, disk) by group.
314 @type paramsdict: dict of dicts
315 @param paramsdict: {group: {param: value, ...}, ...}
317 @param level: Level of indention
321 for item, val in sorted(paramsdict.items()):
322 if isinstance(val, dict):
323 ToStdout("%s- %s:", indent, item)
324 _PrintGroupedParams(val, level=level + 1, roman=roman)
325 elif roman and isinstance(val, int):
326 ToStdout("%s %s: %s", indent, item, compat.TryToRoman(val))
328 ToStdout("%s %s: %s", indent, item, val)
331 def ShowClusterConfig(opts, args):
332 """Shows cluster information.
334 @param opts: the command line options selected by the user
336 @param args: should be an empty list
338 @return: the desired exit code
342 result = cl.QueryClusterInfo()
344 ToStdout("Cluster name: %s", result["name"])
345 ToStdout("Cluster UUID: %s", result["uuid"])
347 ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
348 ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
350 ToStdout("Master node: %s", result["master"])
352 ToStdout("Architecture (this node): %s (%s)",
353 result["architecture"][0], result["architecture"][1])
356 tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
360 ToStdout("Tags: %s", tags)
362 ToStdout("Default hypervisor: %s", result["default_hypervisor"])
363 ToStdout("Enabled hypervisors: %s",
364 utils.CommaJoin(result["enabled_hypervisors"]))
366 ToStdout("Hypervisor parameters:")
367 _PrintGroupedParams(result["hvparams"])
369 ToStdout("OS-specific hypervisor parameters:")
370 _PrintGroupedParams(result["os_hvp"])
372 ToStdout("OS parameters:")
373 _PrintGroupedParams(result["osparams"])
375 ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
376 ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
378 ToStdout("Cluster parameters:")
379 ToStdout(" - candidate pool size: %s",
380 compat.TryToRoman(result["candidate_pool_size"],
381 convert=opts.roman_integers))
382 ToStdout(" - master netdev: %s", result["master_netdev"])
383 ToStdout(" - master netmask: %s", result["master_netmask"])
384 ToStdout(" - lvm volume group: %s", result["volume_group_name"])
385 if result["reserved_lvs"]:
386 reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
388 reserved_lvs = "(none)"
389 ToStdout(" - lvm reserved volumes: %s", reserved_lvs)
390 ToStdout(" - drbd usermode helper: %s", result["drbd_usermode_helper"])
391 ToStdout(" - file storage path: %s", result["file_storage_dir"])
392 ToStdout(" - shared file storage path: %s",
393 result["shared_file_storage_dir"])
394 ToStdout(" - maintenance of node health: %s",
395 result["maintain_node_health"])
396 ToStdout(" - uid pool: %s",
397 uidpool.FormatUidPool(result["uid_pool"],
398 roman=opts.roman_integers))
399 ToStdout(" - default instance allocator: %s", result["default_iallocator"])
400 ToStdout(" - primary ip version: %d", result["primary_ip_version"])
401 ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
402 ToStdout(" - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
404 ToStdout("Default node parameters:")
405 _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
407 ToStdout("Default instance parameters:")
408 _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
410 ToStdout("Default nic parameters:")
411 _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
416 def ClusterCopyFile(opts, args):
417 """Copy a file from master to some nodes.
419 @param opts: the command line options selected by the user
421 @param args: should contain only one element, the path of
422 the file to be copied
424 @return: the desired exit code
428 if not os.path.exists(filename):
429 raise errors.OpPrereqError("No such filename '%s'" % filename,
434 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
436 results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
437 secondary_ips=opts.use_replication_network,
438 nodegroup=opts.nodegroup)
440 srun = ssh.SshRunner(cluster_name=cluster_name)
442 if not srun.CopyFileToNode(node, filename):
443 ToStderr("Copy of file %s to node %s failed", filename, node)
448 def RunClusterCommand(opts, args):
449 """Run a command on some nodes.
451 @param opts: the command line options selected by the user
453 @param args: should contain the command to be run and its arguments
455 @return: the desired exit code
460 command = " ".join(args)
462 nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
464 cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
467 srun = ssh.SshRunner(cluster_name=cluster_name)
469 # Make sure master node is at list end
470 if master_node in nodes:
471 nodes.remove(master_node)
472 nodes.append(master_node)
475 result = srun.Run(name, "root", command)
476 ToStdout("------------------------------------------------")
477 ToStdout("node: %s", name)
478 ToStdout("%s", result.output)
479 ToStdout("return code = %s", result.exit_code)
484 def VerifyCluster(opts, args):
485 """Verify integrity of cluster, performing various test on nodes.
487 @param opts: the command line options selected by the user
489 @param args: should be an empty list
491 @return: the desired exit code
496 if opts.skip_nplusone_mem:
497 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
501 op = opcodes.OpClusterVerify(verbose=opts.verbose,
502 error_codes=opts.error_codes,
503 debug_simulate_errors=opts.simulate_errors,
504 skip_checks=skip_checks,
505 ignore_errors=opts.ignore_errors,
506 group_name=opts.nodegroup)
507 result = SubmitOpCode(op, cl=cl, opts=opts)
509 # Keep track of submitted jobs
510 jex = JobExecutor(cl=cl, opts=opts)
512 for (status, job_id) in result[constants.JOB_IDS_KEY]:
513 jex.AddJobId(None, status, job_id)
515 results = jex.GetResults()
517 (bad_jobs, bad_results) = \
519 # Convert iterators to lists
522 map(compat.partial(itertools.ifilterfalse, bool),
523 # Convert result to booleans in a tuple
524 zip(*((job_success, len(op_results) == 1 and op_results[0])
525 for (job_success, op_results) in results)))))
527 if bad_jobs == 0 and bad_results == 0:
528 rcode = constants.EXIT_SUCCESS
530 rcode = constants.EXIT_FAILURE
532 ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
537 def VerifyDisks(opts, args):
538 """Verify integrity of cluster disks.
540 @param opts: the command line options selected by the user
542 @param args: should be an empty list
544 @return: the desired exit code
549 op = opcodes.OpClusterVerifyDisks()
551 result = SubmitOpCode(op, cl=cl, opts=opts)
553 # Keep track of submitted jobs
554 jex = JobExecutor(cl=cl, opts=opts)
556 for (status, job_id) in result[constants.JOB_IDS_KEY]:
557 jex.AddJobId(None, status, job_id)
559 retcode = constants.EXIT_SUCCESS
561 for (status, result) in jex.GetResults():
563 ToStdout("Job failed: %s", result)
566 ((bad_nodes, instances, missing), ) = result
568 for node, text in bad_nodes.items():
569 ToStdout("Error gathering data on node %s: %s",
570 node, utils.SafeEncode(text[-400:]))
571 retcode = constants.EXIT_FAILURE
572 ToStdout("You need to fix these nodes first before fixing instances")
574 for iname in instances:
577 op = opcodes.OpInstanceActivateDisks(instance_name=iname)
579 ToStdout("Activating disks for instance '%s'", iname)
580 SubmitOpCode(op, opts=opts, cl=cl)
581 except errors.GenericError, err:
582 nret, msg = FormatError(err)
584 ToStderr("Error activating disks for instance %s: %s", iname, msg)
587 for iname, ival in missing.iteritems():
588 all_missing = compat.all(x[0] in bad_nodes for x in ival)
590 ToStdout("Instance %s cannot be verified as it lives on"
591 " broken nodes", iname)
593 ToStdout("Instance %s has missing logical volumes:", iname)
595 for node, vol in ival:
596 if node in bad_nodes:
597 ToStdout("\tbroken node %s /dev/%s", node, vol)
599 ToStdout("\t%s /dev/%s", node, vol)
601 ToStdout("You need to replace or recreate disks for all the above"
602 " instances if this message persists after fixing broken nodes.")
603 retcode = constants.EXIT_FAILURE
608 def RepairDiskSizes(opts, args):
609 """Verify sizes of cluster disks.
611 @param opts: the command line options selected by the user
613 @param args: optional list of instances to restrict check to
615 @return: the desired exit code
618 op = opcodes.OpClusterRepairDiskSizes(instances=args)
619 SubmitOpCode(op, opts=opts)
623 def MasterFailover(opts, args):
624 """Failover the master node.
626 This command, when run on a non-master node, will cause the current
627 master to cease being master, and the non-master to become new
630 @param opts: the command line options selected by the user
632 @param args: should be an empty list
634 @return: the desired exit code
638 usertext = ("This will perform the failover even if most other nodes"
639 " are down, or if this node is outdated. This is dangerous"
640 " as it can lead to a non-consistent cluster. Check the"
641 " gnt-cluster(8) man page before proceeding. Continue?")
642 if not AskUser(usertext):
645 return bootstrap.MasterFailover(no_voting=opts.no_voting)
648 def MasterPing(opts, args):
649 """Checks if the master is alive.
651 @param opts: the command line options selected by the user
653 @param args: should be an empty list
655 @return: the desired exit code
660 cl.QueryClusterInfo()
662 except Exception: # pylint: disable=W0703
666 def SearchTags(opts, args):
667 """Searches the tags on all the cluster.
669 @param opts: the command line options selected by the user
671 @param args: should contain only one element, the tag pattern
673 @return: the desired exit code
676 op = opcodes.OpTagsSearch(pattern=args[0])
677 result = SubmitOpCode(op, opts=opts)
680 result = list(result)
682 for path, tag in result:
683 ToStdout("%s %s", path, tag)
686 def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
687 """Reads and verifies an X509 certificate.
689 @type cert_filename: string
690 @param cert_filename: the path of the file containing the certificate to
691 verify encoded in PEM format
692 @type verify_private_key: bool
693 @param verify_private_key: whether to verify the private key in addition to
694 the public certificate
696 @return: a string containing the PEM-encoded certificate.
700 pem = utils.ReadFile(cert_filename)
702 raise errors.X509CertError(cert_filename,
703 "Unable to read certificate: %s" % str(err))
706 OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
707 except Exception, err:
708 raise errors.X509CertError(cert_filename,
709 "Unable to load certificate: %s" % str(err))
711 if verify_private_key:
713 OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
714 except Exception, err:
715 raise errors.X509CertError(cert_filename,
716 "Unable to load private key: %s" % str(err))
721 def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
722 rapi_cert_filename, new_spice_cert, spice_cert_filename,
723 spice_cacert_filename, new_confd_hmac_key, new_cds,
724 cds_filename, force):
725 """Renews cluster certificates, keys and secrets.
727 @type new_cluster_cert: bool
728 @param new_cluster_cert: Whether to generate a new cluster certificate
729 @type new_rapi_cert: bool
730 @param new_rapi_cert: Whether to generate a new RAPI certificate
731 @type rapi_cert_filename: string
732 @param rapi_cert_filename: Path to file containing new RAPI certificate
733 @type new_spice_cert: bool
734 @param new_spice_cert: Whether to generate a new SPICE certificate
735 @type spice_cert_filename: string
736 @param spice_cert_filename: Path to file containing new SPICE certificate
737 @type spice_cacert_filename: string
738 @param spice_cacert_filename: Path to file containing the certificate of the
739 CA that signed the SPICE certificate
740 @type new_confd_hmac_key: bool
741 @param new_confd_hmac_key: Whether to generate a new HMAC key
743 @param new_cds: Whether to generate a new cluster domain secret
744 @type cds_filename: string
745 @param cds_filename: Path to file containing new cluster domain secret
747 @param force: Whether to ask user for confirmation
750 if new_rapi_cert and rapi_cert_filename:
751 ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
752 " options can be specified at the same time.")
755 if new_cds and cds_filename:
756 ToStderr("Only one of the --new-cluster-domain-secret and"
757 " --cluster-domain-secret options can be specified at"
761 if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
762 ToStderr("When using --new-spice-certificate, the --spice-certificate"
763 " and --spice-ca-certificate must not be used.")
766 if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
767 ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
771 rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
773 if rapi_cert_filename:
774 rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
775 if spice_cert_filename:
776 spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
777 spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
778 except errors.X509CertError, err:
779 ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
784 cds = utils.ReadFile(cds_filename)
785 except Exception, err: # pylint: disable=W0703
786 ToStderr("Can't load new cluster domain secret from %s: %s" %
787 (cds_filename, str(err)))
793 usertext = ("This requires all daemons on all nodes to be restarted and"
794 " may take some time. Continue?")
795 if not AskUser(usertext):
798 def _RenewCryptoInner(ctx):
799 ctx.feedback_fn("Updating certificates and keys")
800 bootstrap.GenerateClusterCrypto(new_cluster_cert,
805 rapi_cert_pem=rapi_cert_pem,
806 spice_cert_pem=spice_cert_pem,
807 spice_cacert_pem=spice_cacert_pem,
813 files_to_copy.append(constants.NODED_CERT_FILE)
815 if new_rapi_cert or rapi_cert_pem:
816 files_to_copy.append(constants.RAPI_CERT_FILE)
818 if new_spice_cert or spice_cert_pem:
819 files_to_copy.append(constants.SPICE_CERT_FILE)
820 files_to_copy.append(constants.SPICE_CACERT_FILE)
822 if new_confd_hmac_key:
823 files_to_copy.append(constants.CONFD_HMAC_KEY)
826 files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
829 for node_name in ctx.nonmaster_nodes:
830 ctx.feedback_fn("Copying %s to %s" %
831 (", ".join(files_to_copy), node_name))
832 for file_name in files_to_copy:
833 ctx.ssh.CopyFileToNode(node_name, file_name)
835 RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
837 ToStdout("All requested certificates and keys have been replaced."
838 " Running \"gnt-cluster verify\" now is recommended.")
843 def RenewCrypto(opts, args):
844 """Renews cluster certificates, keys and secrets.
847 return _RenewCrypto(opts.new_cluster_cert,
853 opts.new_confd_hmac_key,
854 opts.new_cluster_domain_secret,
855 opts.cluster_domain_secret,
859 def SetClusterParams(opts, args):
860 """Modify the cluster.
862 @param opts: the command line options selected by the user
864 @param args: should be an empty list
866 @return: the desired exit code
869 if not (not opts.lvm_storage or opts.vg_name or
870 not opts.drbd_storage or opts.drbd_helper or
871 opts.enabled_hypervisors or opts.hvparams or
872 opts.beparams or opts.nicparams or opts.ndparams or
873 opts.candidate_pool_size is not None or
874 opts.uid_pool is not None or
875 opts.maintain_node_health is not None or
876 opts.add_uids is not None or
877 opts.remove_uids is not None or
878 opts.default_iallocator is not None or
879 opts.reserved_lvs is not None or
880 opts.master_netdev is not None or
881 opts.master_netmask is not None or
882 opts.prealloc_wipe_disks is not None):
883 ToStderr("Please give at least one of the parameters.")
886 vg_name = opts.vg_name
887 if not opts.lvm_storage and opts.vg_name:
888 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
891 if not opts.lvm_storage:
894 drbd_helper = opts.drbd_helper
895 if not opts.drbd_storage and opts.drbd_helper:
896 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
899 if not opts.drbd_storage:
902 hvlist = opts.enabled_hypervisors
903 if hvlist is not None:
904 hvlist = hvlist.split(",")
906 # a list of (name, dict) we can pass directly to dict() (or [])
907 hvparams = dict(opts.hvparams)
908 for hv_params in hvparams.values():
909 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
911 beparams = opts.beparams
912 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
914 nicparams = opts.nicparams
915 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
917 ndparams = opts.ndparams
918 if ndparams is not None:
919 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
921 mnh = opts.maintain_node_health
923 uid_pool = opts.uid_pool
924 if uid_pool is not None:
925 uid_pool = uidpool.ParseUidPool(uid_pool)
927 add_uids = opts.add_uids
928 if add_uids is not None:
929 add_uids = uidpool.ParseUidPool(add_uids)
931 remove_uids = opts.remove_uids
932 if remove_uids is not None:
933 remove_uids = uidpool.ParseUidPool(remove_uids)
935 if opts.reserved_lvs is not None:
936 if opts.reserved_lvs == "":
937 opts.reserved_lvs = []
939 opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
941 if opts.master_netmask is not None:
943 opts.master_netmask = int(opts.master_netmask)
945 ToStderr("The --master-netmask option expects an int parameter.")
948 op = opcodes.OpClusterSetParams(vg_name=vg_name,
949 drbd_helper=drbd_helper,
950 enabled_hypervisors=hvlist,
956 candidate_pool_size=opts.candidate_pool_size,
957 maintain_node_health=mnh,
960 remove_uids=remove_uids,
961 default_iallocator=opts.default_iallocator,
962 prealloc_wipe_disks=opts.prealloc_wipe_disks,
963 master_netdev=opts.master_netdev,
964 master_netmask=opts.master_netmask,
965 reserved_lvs=opts.reserved_lvs)
966 SubmitOpCode(op, opts=opts)
970 def QueueOps(opts, args):
973 @param opts: the command line options selected by the user
975 @param args: should contain only one element, the subcommand
977 @return: the desired exit code
982 if command in ("drain", "undrain"):
983 drain_flag = command == "drain"
984 client.SetQueueDrainFlag(drain_flag)
985 elif command == "info":
986 result = client.QueryConfigValues(["drain_flag"])
991 ToStdout("The drain flag is %s" % val)
993 raise errors.OpPrereqError("Command '%s' is not valid." % command,
999 def _ShowWatcherPause(until):
1000 if until is None or until < time.time():
1001 ToStdout("The watcher is not paused.")
1003 ToStdout("The watcher is paused until %s.", time.ctime(until))
1006 def WatcherOps(opts, args):
1007 """Watcher operations.
1009 @param opts: the command line options selected by the user
1011 @param args: should contain only one element, the subcommand
1013 @return: the desired exit code
1017 client = GetClient()
1019 if command == "continue":
1020 client.SetWatcherPause(None)
1021 ToStdout("The watcher is no longer paused.")
1023 elif command == "pause":
1025 raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1027 result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1028 _ShowWatcherPause(result)
1030 elif command == "info":
1031 result = client.QueryConfigValues(["watcher_pause"])
1032 _ShowWatcherPause(result[0])
1035 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1041 def _OobPower(opts, node_list, power):
1042 """Puts the node in the list to desired power state.
1044 @param opts: The command line options selected by the user
1045 @param node_list: The list of nodes to operate on
1046 @param power: True if they should be powered on, False otherwise
1047 @return: The success of the operation (none failed)
1051 command = constants.OOB_POWER_ON
1053 command = constants.OOB_POWER_OFF
1055 op = opcodes.OpOobCommand(node_names=node_list,
1058 timeout=opts.oob_timeout,
1059 power_delay=opts.power_delay)
1060 result = SubmitOpCode(op, opts=opts)
1062 for node_result in result:
1063 (node_tuple, data_tuple) = node_result
1064 (_, node_name) = node_tuple
1065 (data_status, _) = data_tuple
1066 if data_status != constants.RS_NORMAL:
1067 assert data_status != constants.RS_UNAVAIL
1069 ToStderr("There was a problem changing power for %s, please investigate",
1078 def _InstanceStart(opts, inst_list, start):
1079 """Puts the instances in the list to desired state.
1081 @param opts: The command line options selected by the user
1082 @param inst_list: The list of instances to operate on
1083 @param start: True if they should be started, False for shutdown
1084 @return: The success of the operation (none failed)
1088 opcls = opcodes.OpInstanceStartup
1089 text_submit, text_success, text_failed = ("startup", "started", "starting")
1091 opcls = compat.partial(opcodes.OpInstanceShutdown,
1092 timeout=opts.shutdown_timeout)
1093 text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1095 jex = JobExecutor(opts=opts)
1097 for inst in inst_list:
1098 ToStdout("Submit %s of instance %s", text_submit, inst)
1099 op = opcls(instance_name=inst)
1100 jex.QueueJob(inst, op)
1102 results = jex.GetResults()
1103 bad_cnt = len([1 for (success, _) in results if not success])
1106 ToStdout("All instances have been %s successfully", text_success)
1108 ToStderr("There were errors while %s instances:\n"
1109 "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1116 class _RunWhenNodesReachableHelper:
1117 """Helper class to make shared internal state sharing easier.
1119 @ivar success: Indicates if all action_cb calls were successful
1122 def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1123 _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1126 @param node_list: The list of nodes to be reachable
1127 @param action_cb: Callback called when a new host is reachable
1129 @param node2ip: Node to ip mapping
1130 @param port: The port to use for the TCP ping
1131 @param feedback_fn: The function used for feedback
1132 @param _ping_fn: Function to check reachabilty (for unittest use only)
1133 @param _sleep_fn: Function to sleep (for unittest use only)
1136 self.down = set(node_list)
1138 self.node2ip = node2ip
1140 self.action_cb = action_cb
1142 self.feedback_fn = feedback_fn
1143 self._ping_fn = _ping_fn
1144 self._sleep_fn = _sleep_fn
1147 """When called we run action_cb.
1149 @raises utils.RetryAgain: When there are still down nodes
1152 if not self.action_cb(self.up):
1153 self.success = False
1156 raise utils.RetryAgain()
1160 def Wait(self, secs):
1161 """Checks if a host is up or waits remaining seconds.
1163 @param secs: The secs remaining
1167 for node in self.down:
1168 if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1169 live_port_needed=True):
1170 self.feedback_fn("Node %s became available" % node)
1172 self.down -= self.up
1173 # If we have a node available there is the possibility to run the
1174 # action callback successfully, therefore we don't wait and return
1177 self._sleep_fn(max(0.0, start + secs - time.time()))
1180 def _RunWhenNodesReachable(node_list, action_cb, interval):
1181 """Run action_cb when nodes become reachable.
1183 @param node_list: The list of nodes to be reachable
1184 @param action_cb: Callback called when a new host is reachable
1185 @param interval: The earliest time to retry
1188 client = GetClient()
1189 cluster_info = client.QueryClusterInfo()
1190 if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1191 family = netutils.IPAddress.family
1193 family = netutils.IP6Address.family
1195 node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1196 for node in node_list)
1198 port = netutils.GetDaemonPort(constants.NODED)
1199 helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1203 return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1204 wait_fn=helper.Wait)
1205 except utils.RetryTimeout:
1206 ToStderr("Time exceeded while waiting for nodes to become reachable"
1207 " again:\n - %s", " - ".join(helper.down))
1211 def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1212 _instance_start_fn=_InstanceStart):
1213 """Start the instances conditional based on node_states.
1215 @param opts: The command line options selected by the user
1216 @param inst_map: A dict of inst -> nodes mapping
1217 @param nodes_online: A list of nodes online
1218 @param _instance_start_fn: Callback to start instances (unittest use only)
1219 @return: Success of the operation on all instances
1222 start_inst_list = []
1223 for (inst, nodes) in inst_map.items():
1224 if not (nodes - nodes_online):
1225 # All nodes the instance lives on are back online
1226 start_inst_list.append(inst)
1228 for inst in start_inst_list:
1232 return _instance_start_fn(opts, start_inst_list, True)
1237 def _EpoOn(opts, full_node_list, node_list, inst_map):
1238 """Does the actual power on.
1240 @param opts: The command line options selected by the user
1241 @param full_node_list: All nodes to operate on (includes nodes not supporting
1243 @param node_list: The list of nodes to operate on (all need to support OOB)
1244 @param inst_map: A dict of inst -> nodes mapping
1245 @return: The desired exit status
1248 if node_list and not _OobPower(opts, node_list, False):
1249 ToStderr("Not all nodes seem to get back up, investigate and start"
1250 " manually if needed")
1252 # Wait for the nodes to be back up
1253 action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1255 ToStdout("Waiting until all nodes are available again")
1256 if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1257 ToStderr("Please investigate and start stopped instances manually")
1258 return constants.EXIT_FAILURE
1260 return constants.EXIT_SUCCESS
1263 def _EpoOff(opts, node_list, inst_map):
1264 """Does the actual power off.
1266 @param opts: The command line options selected by the user
1267 @param node_list: The list of nodes to operate on (all need to support OOB)
1268 @param inst_map: A dict of inst -> nodes mapping
1269 @return: The desired exit status
1272 if not _InstanceStart(opts, inst_map.keys(), False):
1273 ToStderr("Please investigate and stop instances manually before continuing")
1274 return constants.EXIT_FAILURE
1277 return constants.EXIT_SUCCESS
1279 if _OobPower(opts, node_list, False):
1280 return constants.EXIT_SUCCESS
1282 return constants.EXIT_FAILURE
1285 def Epo(opts, args):
1288 @param opts: the command line options selected by the user
1290 @param args: should contain only one element, the subcommand
1292 @return: the desired exit code
1295 if opts.groups and opts.show_all:
1296 ToStderr("Only one of --groups or --all are allowed")
1297 return constants.EXIT_FAILURE
1298 elif args and opts.show_all:
1299 ToStderr("Arguments in combination with --all are not allowed")
1300 return constants.EXIT_FAILURE
1302 client = GetClient()
1305 node_query_list = itertools.chain(*client.QueryGroups(names=args,
1306 fields=["node_list"],
1309 node_query_list = args
1311 result = client.QueryNodes(names=node_query_list,
1312 fields=["name", "master", "pinst_list",
1313 "sinst_list", "powered", "offline"],
1317 for (idx, (node, master, pinsts, sinsts, powered,
1318 offline)) in enumerate(result):
1319 # Normalize the node_query_list as well
1320 if not opts.show_all:
1321 node_query_list[idx] = node
1323 for inst in (pinsts + sinsts):
1324 if inst in inst_map:
1326 inst_map[inst].add(node)
1328 inst_map[inst] = set()
1330 inst_map[inst] = set([node])
1332 if master and opts.on:
1333 # We ignore the master for turning on the machines, in fact we are
1334 # already operating on the master at this point :)
1336 elif master and not opts.show_all:
1337 ToStderr("%s is the master node, please do a master-failover to another"
1338 " node not affected by the EPO or use --all if you intend to"
1339 " shutdown the whole cluster", node)
1340 return constants.EXIT_FAILURE
1341 elif powered is None:
1342 ToStdout("Node %s does not support out-of-band handling, it can not be"
1343 " handled in a fully automated manner", node)
1344 elif powered == opts.on:
1345 ToStdout("Node %s is already in desired power state, skipping", node)
1346 elif not offline or (offline and powered):
1347 node_list.append(node)
1349 if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
1350 return constants.EXIT_FAILURE
1353 return _EpoOn(opts, node_query_list, node_list, inst_map)
1355 return _EpoOff(opts, node_list, inst_map)
1360 InitCluster, [ArgHost(min=1, max=1)],
1361 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1362 HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
1363 NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
1364 NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
1365 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1366 DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1367 NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
1368 "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1370 DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1371 "", "Destroy cluster"),
1373 RenameCluster, [ArgHost(min=1, max=1)],
1374 [FORCE_OPT, DRY_RUN_OPT],
1376 "Renames the cluster"),
1378 RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1379 "", "Forces a push of the configuration file and ssconf files"
1380 " to the nodes in the cluster"),
1382 VerifyCluster, ARGS_NONE,
1383 [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1384 DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
1385 "", "Does a check on the cluster configuration"),
1387 VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1388 "", "Does a check on the cluster disk status"),
1389 "repair-disk-sizes": (
1390 RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1391 "", "Updates mismatches in recorded disk sizes"),
1392 "master-failover": (
1393 MasterFailover, ARGS_NONE, [NOVOTING_OPT],
1394 "", "Makes the current node the master"),
1396 MasterPing, ARGS_NONE, [],
1397 "", "Checks if the master is alive"),
1399 ShowClusterVersion, ARGS_NONE, [],
1400 "", "Shows the cluster version"),
1402 ShowClusterMaster, ARGS_NONE, [],
1403 "", "Shows the cluster master"),
1405 ClusterCopyFile, [ArgFile(min=1, max=1)],
1406 [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1407 "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1409 RunClusterCommand, [ArgCommand(min=1)],
1410 [NODE_LIST_OPT, NODEGROUP_OPT],
1411 "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1413 ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1414 "[--roman]", "Show cluster configuration"),
1416 ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1418 AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1419 "tag...", "Add tags to the cluster"),
1421 RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1422 "tag...", "Remove tags from the cluster"),
1424 SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1425 "Searches the tags on all objects on"
1426 " the cluster for a given pattern (regex)"),
1429 [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1430 [], "drain|undrain|info", "Change queue properties"),
1433 [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1434 ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1436 "{pause <timespec>|continue|info}", "Change watcher properties"),
1438 SetClusterParams, ARGS_NONE,
1439 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1440 MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
1441 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
1442 DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
1443 RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
1446 "Alters the parameters of the cluster"),
1448 RenewCrypto, ARGS_NONE,
1449 [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1450 NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1451 NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1452 NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1454 "Renews cluster certificates, keys and secrets"),
1456 Epo, [ArgUnknown()],
1457 [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1458 SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1460 "Performs an emergency power-off on given args"),
1461 "activate-master-ip": (
1462 ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1463 "deactivate-master-ip": (
1464 DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1465 "Deactivates the master IP"),
1469 #: dictionary with aliases for commands
1471 "masterfailover": "master-failover",
1476 return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},