4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Cluster related commands"""
23 # pylint: disable=W0401,W0613,W0614,C0103
24 # W0401: Wildcard import ganeti.cli
25 # W0613: Unused argument, since all functions follow the same API
26 # W0614: Unused import %s from wildcard import (since we need cli)
27 # C0103: Invalid name gnt-cluster
29 from cStringIO import StringIO
35 from ganeti.cli import *
36 from ganeti import opcodes
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import utils
40 from ganeti import bootstrap
41 from ganeti import ssh
42 from ganeti import objects
43 from ganeti import uidpool
44 from ganeti import compat
45 from ganeti import netutils
46 from ganeti import pathutils
49 ON_OPT = cli_option("--on", default=False,
50 action="store_true", dest="on",
51 help="Recover from an EPO")
53 GROUPS_OPT = cli_option("--groups", default=False,
54 action="store_true", dest="groups",
55 help="Arguments are node groups instead of nodes")
57 FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
58 help="Override interactive check for --no-voting",
59 default=False, action="store_true")
61 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
62 _EPO_PING_TIMEOUT = 1 # 1 second
63 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
67 def InitCluster(opts, args):
68 """Initialize the cluster.
70 @param opts: the command line options selected by the user
72 @param args: should contain only one element, the desired
75 @return: the desired exit code
78 if not opts.lvm_storage and opts.vg_name:
79 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
82 vg_name = opts.vg_name
83 if opts.lvm_storage and not opts.vg_name:
84 vg_name = constants.DEFAULT_VG
86 if not opts.drbd_storage and opts.drbd_helper:
87 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
90 drbd_helper = opts.drbd_helper
91 if opts.drbd_storage and not opts.drbd_helper:
92 drbd_helper = constants.DEFAULT_DRBD_HELPER
94 master_netdev = opts.master_netdev
95 if master_netdev is None:
96 master_netdev = constants.DEFAULT_BRIDGE
98 hvlist = opts.enabled_hypervisors
100 hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
101 hvlist = hvlist.split(",")
103 hvparams = dict(opts.hvparams)
104 beparams = opts.beparams
105 nicparams = opts.nicparams
107 diskparams = dict(opts.diskparams)
109 # check the disk template types here, as we cannot rely on the type check done
110 # by the opcode parameter types
111 diskparams_keys = set(diskparams.keys())
112 if not (diskparams_keys <= constants.DISK_TEMPLATES):
113 unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
114 ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
117 # prepare beparams dict
118 beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
119 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
121 # prepare nicparams dict
122 nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
123 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
125 # prepare ndparams dict
126 if opts.ndparams is None:
127 ndparams = dict(constants.NDC_DEFAULTS)
129 ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
130 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
132 # prepare hvparams dict
133 for hv in constants.HYPER_TYPES:
134 if hv not in hvparams:
136 hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
137 utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
139 # prepare diskparams dict
140 for templ in constants.DISK_TEMPLATES:
141 if templ not in diskparams:
142 diskparams[templ] = {}
143 diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
145 utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
147 # prepare ipolicy dict
148 ipolicy = CreateIPolicyFromOpts(
149 ispecs_mem_size=opts.ispecs_mem_size,
150 ispecs_cpu_count=opts.ispecs_cpu_count,
151 ispecs_disk_count=opts.ispecs_disk_count,
152 ispecs_disk_size=opts.ispecs_disk_size,
153 ispecs_nic_count=opts.ispecs_nic_count,
154 minmax_ispecs=opts.ipolicy_bounds_specs,
155 std_ispecs=opts.ipolicy_std_specs,
156 ipolicy_disk_templates=opts.ipolicy_disk_templates,
157 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
158 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
161 if opts.candidate_pool_size is None:
162 opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
164 if opts.mac_prefix is None:
165 opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
167 uid_pool = opts.uid_pool
168 if uid_pool is not None:
169 uid_pool = uidpool.ParseUidPool(uid_pool)
171 if opts.prealloc_wipe_disks is None:
172 opts.prealloc_wipe_disks = False
174 external_ip_setup_script = opts.use_external_mip_script
175 if external_ip_setup_script is None:
176 external_ip_setup_script = False
179 primary_ip_version = int(opts.primary_ip_version)
180 except (ValueError, TypeError), err:
181 ToStderr("Invalid primary ip version value: %s" % str(err))
184 master_netmask = opts.master_netmask
186 if master_netmask is not None:
187 master_netmask = int(master_netmask)
188 except (ValueError, TypeError), err:
189 ToStderr("Invalid master netmask value: %s" % str(err))
193 disk_state = utils.FlatToDict(opts.disk_state)
197 hv_state = dict(opts.hv_state)
199 enabled_disk_templates = opts.enabled_disk_templates
200 if enabled_disk_templates:
201 enabled_disk_templates = enabled_disk_templates.split(",")
203 enabled_disk_templates = list(constants.DEFAULT_ENABLED_DISK_TEMPLATES)
205 bootstrap.InitCluster(cluster_name=args[0],
206 secondary_ip=opts.secondary_ip,
208 mac_prefix=opts.mac_prefix,
209 master_netmask=master_netmask,
210 master_netdev=master_netdev,
211 file_storage_dir=opts.file_storage_dir,
212 shared_file_storage_dir=opts.shared_file_storage_dir,
213 enabled_hypervisors=hvlist,
218 diskparams=diskparams,
220 candidate_pool_size=opts.candidate_pool_size,
221 modify_etc_hosts=opts.modify_etc_hosts,
222 modify_ssh_setup=opts.modify_ssh_setup,
223 maintain_node_health=opts.maintain_node_health,
224 drbd_helper=drbd_helper,
226 default_iallocator=opts.default_iallocator,
227 primary_ip_version=primary_ip_version,
228 prealloc_wipe_disks=opts.prealloc_wipe_disks,
229 use_external_mip_script=external_ip_setup_script,
231 disk_state=disk_state,
232 enabled_disk_templates=enabled_disk_templates,
234 op = opcodes.OpClusterPostInit()
235 SubmitOpCode(op, opts=opts)
240 def DestroyCluster(opts, args):
241 """Destroy the cluster.
243 @param opts: the command line options selected by the user
245 @param args: should be an empty list
247 @return: the desired exit code
250 if not opts.yes_do_it:
251 ToStderr("Destroying a cluster is irreversible. If you really want"
252 " destroy this cluster, supply the --yes-do-it option.")
255 op = opcodes.OpClusterDestroy()
256 master = SubmitOpCode(op, opts=opts)
257 # if we reached this, the opcode didn't fail; we can proceed to
258 # shutdown all the daemons
259 bootstrap.FinalizeClusterDestroy(master)
263 def RenameCluster(opts, args):
264 """Rename the cluster.
266 @param opts: the command line options selected by the user
268 @param args: should contain only one element, the new cluster name
270 @return: the desired exit code
275 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
279 usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
280 " connected over the network to the cluster name, the"
281 " operation is very dangerous as the IP address will be"
282 " removed from the node and the change may not go through."
283 " Continue?") % (cluster_name, new_name)
284 if not AskUser(usertext):
287 op = opcodes.OpClusterRename(name=new_name)
288 result = SubmitOpCode(op, opts=opts, cl=cl)
291 ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
296 def ActivateMasterIp(opts, args):
297 """Activates the master IP.
300 op = opcodes.OpClusterActivateMasterIp()
305 def DeactivateMasterIp(opts, args):
306 """Deactivates the master IP.
310 usertext = ("This will disable the master IP. All the open connections to"
311 " the master IP will be closed. To reach the master you will"
312 " need to use its node IP."
314 if not AskUser(usertext):
317 op = opcodes.OpClusterDeactivateMasterIp()
322 def RedistributeConfig(opts, args):
323 """Forces push of the cluster configuration.
325 @param opts: the command line options selected by the user
327 @param args: empty list
329 @return: the desired exit code
332 op = opcodes.OpClusterRedistConf()
333 SubmitOrSend(op, opts)
337 def ShowClusterVersion(opts, args):
338 """Write version of ganeti software to the standard output.
340 @param opts: the command line options selected by the user
342 @param args: should be an empty list
344 @return: the desired exit code
347 cl = GetClient(query=True)
348 result = cl.QueryClusterInfo()
349 ToStdout("Software version: %s", result["software_version"])
350 ToStdout("Internode protocol: %s", result["protocol_version"])
351 ToStdout("Configuration format: %s", result["config_version"])
352 ToStdout("OS api version: %s", result["os_api_version"])
353 ToStdout("Export interface: %s", result["export_version"])
357 def ShowClusterMaster(opts, args):
358 """Write name of master node to the standard output.
360 @param opts: the command line options selected by the user
362 @param args: should be an empty list
364 @return: the desired exit code
367 master = bootstrap.GetMaster()
372 def _FormatGroupedParams(paramsdict, roman=False):
373 """Format Grouped parameters (be, nic, disk) by group.
375 @type paramsdict: dict of dicts
376 @param paramsdict: {group: {param: value, ...}, ...}
377 @rtype: dict of dicts
378 @return: copy of the input dictionaries with strings as values
382 for (item, val) in paramsdict.items():
383 if isinstance(val, dict):
384 ret[item] = _FormatGroupedParams(val, roman=roman)
385 elif roman and isinstance(val, int):
386 ret[item] = compat.TryToRoman(val)
392 def ShowClusterConfig(opts, args):
393 """Shows cluster information.
395 @param opts: the command line options selected by the user
397 @param args: should be an empty list
399 @return: the desired exit code
402 cl = GetClient(query=True)
403 result = cl.QueryClusterInfo()
406 tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
409 if result["reserved_lvs"]:
410 reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
412 reserved_lvs = "(none)"
414 enabled_hv = result["enabled_hypervisors"]
415 hvparams = dict((k, v) for k, v in result["hvparams"].iteritems()
419 ("Cluster name", result["name"]),
420 ("Cluster UUID", result["uuid"]),
422 ("Creation time", utils.FormatTime(result["ctime"])),
423 ("Modification time", utils.FormatTime(result["mtime"])),
425 ("Master node", result["master"]),
427 ("Architecture (this node)",
428 "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
432 ("Default hypervisor", result["default_hypervisor"]),
433 ("Enabled hypervisors", utils.CommaJoin(enabled_hv)),
435 ("Hypervisor parameters", _FormatGroupedParams(hvparams)),
437 ("OS-specific hypervisor parameters",
438 _FormatGroupedParams(result["os_hvp"])),
440 ("OS parameters", _FormatGroupedParams(result["osparams"])),
442 ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
443 ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
445 ("Cluster parameters", [
446 ("candidate pool size",
447 compat.TryToRoman(result["candidate_pool_size"],
448 convert=opts.roman_integers)),
449 ("master netdev", result["master_netdev"]),
450 ("master netmask", result["master_netmask"]),
451 ("use external master IP address setup script",
452 result["use_external_mip_script"]),
453 ("lvm volume group", result["volume_group_name"]),
454 ("lvm reserved volumes", reserved_lvs),
455 ("drbd usermode helper", result["drbd_usermode_helper"]),
456 ("file storage path", result["file_storage_dir"]),
457 ("shared file storage path", result["shared_file_storage_dir"]),
458 ("maintenance of node health", result["maintain_node_health"]),
459 ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
460 ("default instance allocator", result["default_iallocator"]),
461 ("primary ip version", result["primary_ip_version"]),
462 ("preallocation wipe disks", result["prealloc_wipe_disks"]),
463 ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
464 ("ExtStorage Providers search path",
465 utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
466 ("enabled disk templates",
467 utils.CommaJoin(result["enabled_disk_templates"])),
470 ("Default node parameters",
471 _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
473 ("Default instance parameters",
474 _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
476 ("Default nic parameters",
477 _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
479 ("Default disk parameters",
480 _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
482 ("Instance policy - limits for instances",
483 FormatPolicyInfo(result["ipolicy"], None, True)),
486 PrintGenericInfo(info)
490 def ClusterCopyFile(opts, args):
491 """Copy a file from master to some nodes.
493 @param opts: the command line options selected by the user
495 @param args: should contain only one element, the path of
496 the file to be copied
498 @return: the desired exit code
502 if not os.path.exists(filename):
503 raise errors.OpPrereqError("No such filename '%s'" % filename,
508 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
510 results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
511 secondary_ips=opts.use_replication_network,
512 nodegroup=opts.nodegroup)
514 srun = ssh.SshRunner(cluster_name)
516 if not srun.CopyFileToNode(node, filename):
517 ToStderr("Copy of file %s to node %s failed", filename, node)
522 def RunClusterCommand(opts, args):
523 """Run a command on some nodes.
525 @param opts: the command line options selected by the user
527 @param args: should contain the command to be run and its arguments
529 @return: the desired exit code
534 command = " ".join(args)
536 nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
538 cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
541 srun = ssh.SshRunner(cluster_name=cluster_name)
543 # Make sure master node is at list end
544 if master_node in nodes:
545 nodes.remove(master_node)
546 nodes.append(master_node)
549 result = srun.Run(name, constants.SSH_LOGIN_USER, command)
551 if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
552 # Do not output anything for successful commands
555 ToStdout("------------------------------------------------")
556 if opts.show_machine_names:
557 for line in result.output.splitlines():
558 ToStdout("%s: %s", name, line)
560 ToStdout("node: %s", name)
561 ToStdout("%s", result.output)
562 ToStdout("return code = %s", result.exit_code)
567 def VerifyCluster(opts, args):
568 """Verify integrity of cluster, performing various test on nodes.
570 @param opts: the command line options selected by the user
572 @param args: should be an empty list
574 @return: the desired exit code
579 if opts.skip_nplusone_mem:
580 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
584 op = opcodes.OpClusterVerify(verbose=opts.verbose,
585 error_codes=opts.error_codes,
586 debug_simulate_errors=opts.simulate_errors,
587 skip_checks=skip_checks,
588 ignore_errors=opts.ignore_errors,
589 group_name=opts.nodegroup)
590 result = SubmitOpCode(op, cl=cl, opts=opts)
592 # Keep track of submitted jobs
593 jex = JobExecutor(cl=cl, opts=opts)
595 for (status, job_id) in result[constants.JOB_IDS_KEY]:
596 jex.AddJobId(None, status, job_id)
598 results = jex.GetResults()
600 (bad_jobs, bad_results) = \
602 # Convert iterators to lists
605 map(compat.partial(itertools.ifilterfalse, bool),
606 # Convert result to booleans in a tuple
607 zip(*((job_success, len(op_results) == 1 and op_results[0])
608 for (job_success, op_results) in results)))))
610 if bad_jobs == 0 and bad_results == 0:
611 rcode = constants.EXIT_SUCCESS
613 rcode = constants.EXIT_FAILURE
615 ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
620 def VerifyDisks(opts, args):
621 """Verify integrity of cluster disks.
623 @param opts: the command line options selected by the user
625 @param args: should be an empty list
627 @return: the desired exit code
632 op = opcodes.OpClusterVerifyDisks()
634 result = SubmitOpCode(op, cl=cl, opts=opts)
636 # Keep track of submitted jobs
637 jex = JobExecutor(cl=cl, opts=opts)
639 for (status, job_id) in result[constants.JOB_IDS_KEY]:
640 jex.AddJobId(None, status, job_id)
642 retcode = constants.EXIT_SUCCESS
644 for (status, result) in jex.GetResults():
646 ToStdout("Job failed: %s", result)
649 ((bad_nodes, instances, missing), ) = result
651 for node, text in bad_nodes.items():
652 ToStdout("Error gathering data on node %s: %s",
653 node, utils.SafeEncode(text[-400:]))
654 retcode = constants.EXIT_FAILURE
655 ToStdout("You need to fix these nodes first before fixing instances")
657 for iname in instances:
660 op = opcodes.OpInstanceActivateDisks(instance_name=iname)
662 ToStdout("Activating disks for instance '%s'", iname)
663 SubmitOpCode(op, opts=opts, cl=cl)
664 except errors.GenericError, err:
665 nret, msg = FormatError(err)
667 ToStderr("Error activating disks for instance %s: %s", iname, msg)
670 for iname, ival in missing.iteritems():
671 all_missing = compat.all(x[0] in bad_nodes for x in ival)
673 ToStdout("Instance %s cannot be verified as it lives on"
674 " broken nodes", iname)
676 ToStdout("Instance %s has missing logical volumes:", iname)
678 for node, vol in ival:
679 if node in bad_nodes:
680 ToStdout("\tbroken node %s /dev/%s", node, vol)
682 ToStdout("\t%s /dev/%s", node, vol)
684 ToStdout("You need to replace or recreate disks for all the above"
685 " instances if this message persists after fixing broken nodes.")
686 retcode = constants.EXIT_FAILURE
688 ToStdout("No disks need to be activated.")
693 def RepairDiskSizes(opts, args):
694 """Verify sizes of cluster disks.
696 @param opts: the command line options selected by the user
698 @param args: optional list of instances to restrict check to
700 @return: the desired exit code
703 op = opcodes.OpClusterRepairDiskSizes(instances=args)
704 SubmitOpCode(op, opts=opts)
708 def MasterFailover(opts, args):
709 """Failover the master node.
711 This command, when run on a non-master node, will cause the current
712 master to cease being master, and the non-master to become new
715 @param opts: the command line options selected by the user
717 @param args: should be an empty list
719 @return: the desired exit code
722 if opts.no_voting and not opts.yes_do_it:
723 usertext = ("This will perform the failover even if most other nodes"
724 " are down, or if this node is outdated. This is dangerous"
725 " as it can lead to a non-consistent cluster. Check the"
726 " gnt-cluster(8) man page before proceeding. Continue?")
727 if not AskUser(usertext):
730 return bootstrap.MasterFailover(no_voting=opts.no_voting)
733 def MasterPing(opts, args):
734 """Checks if the master is alive.
736 @param opts: the command line options selected by the user
738 @param args: should be an empty list
740 @return: the desired exit code
745 cl.QueryClusterInfo()
747 except Exception: # pylint: disable=W0703
751 def SearchTags(opts, args):
752 """Searches the tags on all the cluster.
754 @param opts: the command line options selected by the user
756 @param args: should contain only one element, the tag pattern
758 @return: the desired exit code
761 op = opcodes.OpTagsSearch(pattern=args[0])
762 result = SubmitOpCode(op, opts=opts)
765 result = list(result)
767 for path, tag in result:
768 ToStdout("%s %s", path, tag)
771 def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
772 """Reads and verifies an X509 certificate.
774 @type cert_filename: string
775 @param cert_filename: the path of the file containing the certificate to
776 verify encoded in PEM format
777 @type verify_private_key: bool
778 @param verify_private_key: whether to verify the private key in addition to
779 the public certificate
781 @return: a string containing the PEM-encoded certificate.
785 pem = utils.ReadFile(cert_filename)
787 raise errors.X509CertError(cert_filename,
788 "Unable to read certificate: %s" % str(err))
791 OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
792 except Exception, err:
793 raise errors.X509CertError(cert_filename,
794 "Unable to load certificate: %s" % str(err))
796 if verify_private_key:
798 OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
799 except Exception, err:
800 raise errors.X509CertError(cert_filename,
801 "Unable to load private key: %s" % str(err))
806 def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
807 rapi_cert_filename, new_spice_cert, spice_cert_filename,
808 spice_cacert_filename, new_confd_hmac_key, new_cds,
809 cds_filename, force):
810 """Renews cluster certificates, keys and secrets.
812 @type new_cluster_cert: bool
813 @param new_cluster_cert: Whether to generate a new cluster certificate
814 @type new_rapi_cert: bool
815 @param new_rapi_cert: Whether to generate a new RAPI certificate
816 @type rapi_cert_filename: string
817 @param rapi_cert_filename: Path to file containing new RAPI certificate
818 @type new_spice_cert: bool
819 @param new_spice_cert: Whether to generate a new SPICE certificate
820 @type spice_cert_filename: string
821 @param spice_cert_filename: Path to file containing new SPICE certificate
822 @type spice_cacert_filename: string
823 @param spice_cacert_filename: Path to file containing the certificate of the
824 CA that signed the SPICE certificate
825 @type new_confd_hmac_key: bool
826 @param new_confd_hmac_key: Whether to generate a new HMAC key
828 @param new_cds: Whether to generate a new cluster domain secret
829 @type cds_filename: string
830 @param cds_filename: Path to file containing new cluster domain secret
832 @param force: Whether to ask user for confirmation
835 if new_rapi_cert and rapi_cert_filename:
836 ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
837 " options can be specified at the same time.")
840 if new_cds and cds_filename:
841 ToStderr("Only one of the --new-cluster-domain-secret and"
842 " --cluster-domain-secret options can be specified at"
846 if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
847 ToStderr("When using --new-spice-certificate, the --spice-certificate"
848 " and --spice-ca-certificate must not be used.")
851 if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
852 ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
856 rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
858 if rapi_cert_filename:
859 rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
860 if spice_cert_filename:
861 spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
862 spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
863 except errors.X509CertError, err:
864 ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
869 cds = utils.ReadFile(cds_filename)
870 except Exception, err: # pylint: disable=W0703
871 ToStderr("Can't load new cluster domain secret from %s: %s" %
872 (cds_filename, str(err)))
878 usertext = ("This requires all daemons on all nodes to be restarted and"
879 " may take some time. Continue?")
880 if not AskUser(usertext):
883 def _RenewCryptoInner(ctx):
884 ctx.feedback_fn("Updating certificates and keys")
885 bootstrap.GenerateClusterCrypto(new_cluster_cert,
890 rapi_cert_pem=rapi_cert_pem,
891 spice_cert_pem=spice_cert_pem,
892 spice_cacert_pem=spice_cacert_pem,
898 files_to_copy.append(pathutils.NODED_CERT_FILE)
900 if new_rapi_cert or rapi_cert_pem:
901 files_to_copy.append(pathutils.RAPI_CERT_FILE)
903 if new_spice_cert or spice_cert_pem:
904 files_to_copy.append(pathutils.SPICE_CERT_FILE)
905 files_to_copy.append(pathutils.SPICE_CACERT_FILE)
907 if new_confd_hmac_key:
908 files_to_copy.append(pathutils.CONFD_HMAC_KEY)
911 files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
914 for node_name in ctx.nonmaster_nodes:
915 ctx.feedback_fn("Copying %s to %s" %
916 (", ".join(files_to_copy), node_name))
917 for file_name in files_to_copy:
918 ctx.ssh.CopyFileToNode(node_name, file_name)
920 RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
922 ToStdout("All requested certificates and keys have been replaced."
923 " Running \"gnt-cluster verify\" now is recommended.")
928 def RenewCrypto(opts, args):
929 """Renews cluster certificates, keys and secrets.
932 return _RenewCrypto(opts.new_cluster_cert,
938 opts.new_confd_hmac_key,
939 opts.new_cluster_domain_secret,
940 opts.cluster_domain_secret,
944 def SetClusterParams(opts, args):
945 """Modify the cluster.
947 @param opts: the command line options selected by the user
949 @param args: should be an empty list
951 @return: the desired exit code
954 if not (not opts.lvm_storage or opts.vg_name or
955 not opts.drbd_storage or opts.drbd_helper or
956 opts.enabled_hypervisors or opts.hvparams or
957 opts.beparams or opts.nicparams or
958 opts.ndparams or opts.diskparams or
959 opts.candidate_pool_size is not None or
960 opts.uid_pool is not None or
961 opts.maintain_node_health is not None or
962 opts.add_uids is not None or
963 opts.remove_uids is not None or
964 opts.default_iallocator is not None or
965 opts.reserved_lvs is not None or
966 opts.master_netdev is not None or
967 opts.master_netmask is not None or
968 opts.use_external_mip_script is not None or
969 opts.prealloc_wipe_disks is not None or
971 opts.enabled_disk_templates or
973 opts.ipolicy_bounds_specs is not None or
974 opts.ipolicy_std_specs is not None or
975 opts.ipolicy_disk_templates is not None or
976 opts.ipolicy_vcpu_ratio is not None or
977 opts.ipolicy_spindle_ratio is not None or
978 opts.modify_etc_hosts is not None):
979 ToStderr("Please give at least one of the parameters.")
982 vg_name = opts.vg_name
983 if not opts.lvm_storage and opts.vg_name:
984 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
987 if not opts.lvm_storage:
990 drbd_helper = opts.drbd_helper
991 if not opts.drbd_storage and opts.drbd_helper:
992 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
995 if not opts.drbd_storage:
998 hvlist = opts.enabled_hypervisors
999 if hvlist is not None:
1000 hvlist = hvlist.split(",")
1002 enabled_disk_templates = opts.enabled_disk_templates
1003 if enabled_disk_templates:
1004 enabled_disk_templates = enabled_disk_templates.split(",")
1006 # a list of (name, dict) we can pass directly to dict() (or [])
1007 hvparams = dict(opts.hvparams)
1008 for hv_params in hvparams.values():
1009 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1011 diskparams = dict(opts.diskparams)
1013 for dt_params in diskparams.values():
1014 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
1016 beparams = opts.beparams
1017 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
1019 nicparams = opts.nicparams
1020 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
1022 ndparams = opts.ndparams
1023 if ndparams is not None:
1024 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
1026 ipolicy = CreateIPolicyFromOpts(
1027 minmax_ispecs=opts.ipolicy_bounds_specs,
1028 std_ispecs=opts.ipolicy_std_specs,
1029 ipolicy_disk_templates=opts.ipolicy_disk_templates,
1030 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
1031 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
1034 mnh = opts.maintain_node_health
1036 uid_pool = opts.uid_pool
1037 if uid_pool is not None:
1038 uid_pool = uidpool.ParseUidPool(uid_pool)
1040 add_uids = opts.add_uids
1041 if add_uids is not None:
1042 add_uids = uidpool.ParseUidPool(add_uids)
1044 remove_uids = opts.remove_uids
1045 if remove_uids is not None:
1046 remove_uids = uidpool.ParseUidPool(remove_uids)
1048 if opts.reserved_lvs is not None:
1049 if opts.reserved_lvs == "":
1050 opts.reserved_lvs = []
1052 opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
1054 if opts.master_netmask is not None:
1056 opts.master_netmask = int(opts.master_netmask)
1058 ToStderr("The --master-netmask option expects an int parameter.")
1061 ext_ip_script = opts.use_external_mip_script
1064 disk_state = utils.FlatToDict(opts.disk_state)
1068 hv_state = dict(opts.hv_state)
1070 op = opcodes.OpClusterSetParams(
1072 drbd_helper=drbd_helper,
1073 enabled_hypervisors=hvlist,
1077 nicparams=nicparams,
1079 diskparams=diskparams,
1081 candidate_pool_size=opts.candidate_pool_size,
1082 maintain_node_health=mnh,
1083 modify_etc_hosts=opts.modify_etc_hosts,
1086 remove_uids=remove_uids,
1087 default_iallocator=opts.default_iallocator,
1088 prealloc_wipe_disks=opts.prealloc_wipe_disks,
1089 master_netdev=opts.master_netdev,
1090 master_netmask=opts.master_netmask,
1091 reserved_lvs=opts.reserved_lvs,
1092 use_external_mip_script=ext_ip_script,
1094 disk_state=disk_state,
1095 enabled_disk_templates=enabled_disk_templates,
1098 SubmitOrSend(op, opts)
1102 def QueueOps(opts, args):
1103 """Queue operations.
1105 @param opts: the command line options selected by the user
1107 @param args: should contain only one element, the subcommand
1109 @return: the desired exit code
1113 client = GetClient()
1114 if command in ("drain", "undrain"):
1115 drain_flag = command == "drain"
1116 client.SetQueueDrainFlag(drain_flag)
1117 elif command == "info":
1118 result = client.QueryConfigValues(["drain_flag"])
1123 ToStdout("The drain flag is %s" % val)
1125 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1131 def _ShowWatcherPause(until):
1132 if until is None or until < time.time():
1133 ToStdout("The watcher is not paused.")
1135 ToStdout("The watcher is paused until %s.", time.ctime(until))
1138 def WatcherOps(opts, args):
1139 """Watcher operations.
1141 @param opts: the command line options selected by the user
1143 @param args: should contain only one element, the subcommand
1145 @return: the desired exit code
1149 client = GetClient()
1151 if command == "continue":
1152 client.SetWatcherPause(None)
1153 ToStdout("The watcher is no longer paused.")
1155 elif command == "pause":
1157 raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1159 result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1160 _ShowWatcherPause(result)
1162 elif command == "info":
1163 result = client.QueryConfigValues(["watcher_pause"])
1164 _ShowWatcherPause(result[0])
1167 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1173 def _OobPower(opts, node_list, power):
1174 """Puts the node in the list to desired power state.
1176 @param opts: The command line options selected by the user
1177 @param node_list: The list of nodes to operate on
1178 @param power: True if they should be powered on, False otherwise
1179 @return: The success of the operation (none failed)
1183 command = constants.OOB_POWER_ON
1185 command = constants.OOB_POWER_OFF
1187 op = opcodes.OpOobCommand(node_names=node_list,
1190 timeout=opts.oob_timeout,
1191 power_delay=opts.power_delay)
1192 result = SubmitOpCode(op, opts=opts)
1194 for node_result in result:
1195 (node_tuple, data_tuple) = node_result
1196 (_, node_name) = node_tuple
1197 (data_status, _) = data_tuple
1198 if data_status != constants.RS_NORMAL:
1199 assert data_status != constants.RS_UNAVAIL
1201 ToStderr("There was a problem changing power for %s, please investigate",
1210 def _InstanceStart(opts, inst_list, start, no_remember=False):
1211 """Puts the instances in the list to desired state.
1213 @param opts: The command line options selected by the user
1214 @param inst_list: The list of instances to operate on
1215 @param start: True if they should be started, False for shutdown
1216 @param no_remember: If the instance state should be remembered
1217 @return: The success of the operation (none failed)
1221 opcls = opcodes.OpInstanceStartup
1222 text_submit, text_success, text_failed = ("startup", "started", "starting")
1224 opcls = compat.partial(opcodes.OpInstanceShutdown,
1225 timeout=opts.shutdown_timeout,
1226 no_remember=no_remember)
1227 text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1229 jex = JobExecutor(opts=opts)
1231 for inst in inst_list:
1232 ToStdout("Submit %s of instance %s", text_submit, inst)
1233 op = opcls(instance_name=inst)
1234 jex.QueueJob(inst, op)
1236 results = jex.GetResults()
1237 bad_cnt = len([1 for (success, _) in results if not success])
1240 ToStdout("All instances have been %s successfully", text_success)
1242 ToStderr("There were errors while %s instances:\n"
1243 "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1250 class _RunWhenNodesReachableHelper:
1251 """Helper class to make shared internal state sharing easier.
1253 @ivar success: Indicates if all action_cb calls were successful
1256 def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1257 _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1260 @param node_list: The list of nodes to be reachable
1261 @param action_cb: Callback called when a new host is reachable
1263 @param node2ip: Node to ip mapping
1264 @param port: The port to use for the TCP ping
1265 @param feedback_fn: The function used for feedback
1266 @param _ping_fn: Function to check reachabilty (for unittest use only)
1267 @param _sleep_fn: Function to sleep (for unittest use only)
1270 self.down = set(node_list)
1272 self.node2ip = node2ip
1274 self.action_cb = action_cb
1276 self.feedback_fn = feedback_fn
1277 self._ping_fn = _ping_fn
1278 self._sleep_fn = _sleep_fn
1281 """When called we run action_cb.
1283 @raises utils.RetryAgain: When there are still down nodes
1286 if not self.action_cb(self.up):
1287 self.success = False
1290 raise utils.RetryAgain()
1294 def Wait(self, secs):
1295 """Checks if a host is up or waits remaining seconds.
1297 @param secs: The secs remaining
1301 for node in self.down:
1302 if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1303 live_port_needed=True):
1304 self.feedback_fn("Node %s became available" % node)
1306 self.down -= self.up
1307 # If we have a node available there is the possibility to run the
1308 # action callback successfully, therefore we don't wait and return
1311 self._sleep_fn(max(0.0, start + secs - time.time()))
1314 def _RunWhenNodesReachable(node_list, action_cb, interval):
1315 """Run action_cb when nodes become reachable.
1317 @param node_list: The list of nodes to be reachable
1318 @param action_cb: Callback called when a new host is reachable
1319 @param interval: The earliest time to retry
1322 client = GetClient()
1323 cluster_info = client.QueryClusterInfo()
1324 if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1325 family = netutils.IPAddress.family
1327 family = netutils.IP6Address.family
1329 node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1330 for node in node_list)
1332 port = netutils.GetDaemonPort(constants.NODED)
1333 helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1337 return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1338 wait_fn=helper.Wait)
1339 except utils.RetryTimeout:
1340 ToStderr("Time exceeded while waiting for nodes to become reachable"
1341 " again:\n - %s", " - ".join(helper.down))
1345 def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1346 _instance_start_fn=_InstanceStart):
1347 """Start the instances conditional based on node_states.
1349 @param opts: The command line options selected by the user
1350 @param inst_map: A dict of inst -> nodes mapping
1351 @param nodes_online: A list of nodes online
1352 @param _instance_start_fn: Callback to start instances (unittest use only)
1353 @return: Success of the operation on all instances
1356 start_inst_list = []
1357 for (inst, nodes) in inst_map.items():
1358 if not (nodes - nodes_online):
1359 # All nodes the instance lives on are back online
1360 start_inst_list.append(inst)
1362 for inst in start_inst_list:
1366 return _instance_start_fn(opts, start_inst_list, True)
1371 def _EpoOn(opts, full_node_list, node_list, inst_map):
1372 """Does the actual power on.
1374 @param opts: The command line options selected by the user
1375 @param full_node_list: All nodes to operate on (includes nodes not supporting
1377 @param node_list: The list of nodes to operate on (all need to support OOB)
1378 @param inst_map: A dict of inst -> nodes mapping
1379 @return: The desired exit status
1382 if node_list and not _OobPower(opts, node_list, False):
1383 ToStderr("Not all nodes seem to get back up, investigate and start"
1384 " manually if needed")
1386 # Wait for the nodes to be back up
1387 action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1389 ToStdout("Waiting until all nodes are available again")
1390 if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1391 ToStderr("Please investigate and start stopped instances manually")
1392 return constants.EXIT_FAILURE
1394 return constants.EXIT_SUCCESS
1397 def _EpoOff(opts, node_list, inst_map):
1398 """Does the actual power off.
1400 @param opts: The command line options selected by the user
1401 @param node_list: The list of nodes to operate on (all need to support OOB)
1402 @param inst_map: A dict of inst -> nodes mapping
1403 @return: The desired exit status
1406 if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1407 ToStderr("Please investigate and stop instances manually before continuing")
1408 return constants.EXIT_FAILURE
1411 return constants.EXIT_SUCCESS
1413 if _OobPower(opts, node_list, False):
1414 return constants.EXIT_SUCCESS
1416 return constants.EXIT_FAILURE
1419 def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
1420 _confirm_fn=ConfirmOperation,
1421 _stdout_fn=ToStdout, _stderr_fn=ToStderr):
1424 @param opts: the command line options selected by the user
1426 @param args: should contain only one element, the subcommand
1428 @return: the desired exit code
1431 if opts.groups and opts.show_all:
1432 _stderr_fn("Only one of --groups or --all are allowed")
1433 return constants.EXIT_FAILURE
1434 elif args and opts.show_all:
1435 _stderr_fn("Arguments in combination with --all are not allowed")
1436 return constants.EXIT_FAILURE
1443 itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
1445 node_query_list = args
1447 result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
1448 "sinst_list", "powered", "offline"],
1451 all_nodes = map(compat.fst, result)
1454 for (node, master, pinsts, sinsts, powered, offline) in result:
1456 for inst in (pinsts + sinsts):
1457 if inst in inst_map:
1459 inst_map[inst].add(node)
1461 inst_map[inst] = set()
1463 inst_map[inst] = set([node])
1465 if master and opts.on:
1466 # We ignore the master for turning on the machines, in fact we are
1467 # already operating on the master at this point :)
1469 elif master and not opts.show_all:
1470 _stderr_fn("%s is the master node, please do a master-failover to another"
1471 " node not affected by the EPO or use --all if you intend to"
1472 " shutdown the whole cluster", node)
1473 return constants.EXIT_FAILURE
1474 elif powered is None:
1475 _stdout_fn("Node %s does not support out-of-band handling, it can not be"
1476 " handled in a fully automated manner", node)
1477 elif powered == opts.on:
1478 _stdout_fn("Node %s is already in desired power state, skipping", node)
1479 elif not offline or (offline and powered):
1480 node_list.append(node)
1482 if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
1483 return constants.EXIT_FAILURE
1486 return _on_fn(opts, all_nodes, node_list, inst_map)
1488 return _off_fn(opts, node_list, inst_map)
1491 def _GetCreateCommand(info):
1493 buf.write("gnt-cluster init")
1494 PrintIPolicyCommand(buf, info["ipolicy"], False)
1496 buf.write(info["name"])
1497 return buf.getvalue()
1500 def ShowCreateCommand(opts, args):
1501 """Shows the command that can be used to re-create the cluster.
1503 Currently it works only for ipolicy specs.
1506 cl = GetClient(query=True)
1507 result = cl.QueryClusterInfo()
1508 ToStdout(_GetCreateCommand(result))
1513 InitCluster, [ArgHost(min=1, max=1)],
1514 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1515 HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
1516 NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
1517 NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
1518 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1519 DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1520 NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
1521 DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
1522 IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
1523 "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1525 DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1526 "", "Destroy cluster"),
1528 RenameCluster, [ArgHost(min=1, max=1)],
1529 [FORCE_OPT, DRY_RUN_OPT],
1531 "Renames the cluster"),
1533 RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1534 "", "Forces a push of the configuration file and ssconf files"
1535 " to the nodes in the cluster"),
1537 VerifyCluster, ARGS_NONE,
1538 [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1539 DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
1540 "", "Does a check on the cluster configuration"),
1542 VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1543 "", "Does a check on the cluster disk status"),
1544 "repair-disk-sizes": (
1545 RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1546 "[instance...]", "Updates mismatches in recorded disk sizes"),
1547 "master-failover": (
1548 MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
1549 "", "Makes the current node the master"),
1551 MasterPing, ARGS_NONE, [],
1552 "", "Checks if the master is alive"),
1554 ShowClusterVersion, ARGS_NONE, [],
1555 "", "Shows the cluster version"),
1557 ShowClusterMaster, ARGS_NONE, [],
1558 "", "Shows the cluster master"),
1560 ClusterCopyFile, [ArgFile(min=1, max=1)],
1561 [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1562 "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1564 RunClusterCommand, [ArgCommand(min=1)],
1565 [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
1566 "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1568 ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1569 "[--roman]", "Show cluster configuration"),
1571 ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1573 AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1574 "tag...", "Add tags to the cluster"),
1576 RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1577 "tag...", "Remove tags from the cluster"),
1579 SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1580 "Searches the tags on all objects on"
1581 " the cluster for a given pattern (regex)"),
1584 [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1585 [], "drain|undrain|info", "Change queue properties"),
1588 [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1589 ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1591 "{pause <timespec>|continue|info}", "Change watcher properties"),
1593 SetClusterParams, ARGS_NONE,
1595 BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1596 MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
1597 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
1598 DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
1599 RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
1600 NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
1601 DISK_STATE_OPT, SUBMIT_OPT, ENABLED_DISK_TEMPLATES_OPT,
1602 IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] + INSTANCE_POLICY_OPTS,
1604 "Alters the parameters of the cluster"),
1606 RenewCrypto, ARGS_NONE,
1607 [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1608 NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1609 NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1610 NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1612 "Renews cluster certificates, keys and secrets"),
1614 Epo, [ArgUnknown()],
1615 [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1616 SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1618 "Performs an emergency power-off on given args"),
1619 "activate-master-ip": (
1620 ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1621 "deactivate-master-ip": (
1622 DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1623 "Deactivates the master IP"),
1624 "show-ispecs-cmd": (
1625 ShowCreateCommand, ARGS_NONE, [], "",
1626 "Show the command line to re-create the cluster"),
1630 #: dictionary with aliases for commands
1632 "masterfailover": "master-failover",
1638 return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},