4 # Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Cluster related commands"""
23 # pylint: disable=W0401,W0613,W0614,C0103
24 # W0401: Wildcard import ganeti.cli
25 # W0613: Unused argument, since all functions follow the same API
26 # W0614: Unused import %s from wildcard import (since we need cli)
27 # C0103: Invalid name gnt-cluster
29 from cStringIO import StringIO
35 from ganeti.cli import *
36 from ganeti import opcodes
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import utils
40 from ganeti import bootstrap
41 from ganeti import ssh
42 from ganeti import objects
43 from ganeti import uidpool
44 from ganeti import compat
45 from ganeti import netutils
46 from ganeti import pathutils
49 ON_OPT = cli_option("--on", default=False,
50 action="store_true", dest="on",
51 help="Recover from an EPO")
53 GROUPS_OPT = cli_option("--groups", default=False,
54 action="store_true", dest="groups",
55 help="Arguments are node groups instead of nodes")
57 FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
58 help="Override interactive check for --no-voting",
59 default=False, action="store_true")
61 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
62 _EPO_PING_TIMEOUT = 1 # 1 second
63 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
67 def InitCluster(opts, args):
68 """Initialize the cluster.
70 @param opts: the command line options selected by the user
72 @param args: should contain only one element, the desired
75 @return: the desired exit code
78 if not opts.lvm_storage and opts.vg_name:
79 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
82 vg_name = opts.vg_name
83 if opts.lvm_storage and not opts.vg_name:
84 vg_name = constants.DEFAULT_VG
86 if not opts.drbd_storage and opts.drbd_helper:
87 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
90 drbd_helper = opts.drbd_helper
91 if opts.drbd_storage and not opts.drbd_helper:
92 drbd_helper = constants.DEFAULT_DRBD_HELPER
94 master_netdev = opts.master_netdev
95 if master_netdev is None:
96 master_netdev = constants.DEFAULT_BRIDGE
98 hvlist = opts.enabled_hypervisors
100 hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
101 hvlist = hvlist.split(",")
103 hvparams = dict(opts.hvparams)
104 beparams = opts.beparams
105 nicparams = opts.nicparams
107 diskparams = dict(opts.diskparams)
109 # check the disk template types here, as we cannot rely on the type check done
110 # by the opcode parameter types
111 diskparams_keys = set(diskparams.keys())
112 if not (diskparams_keys <= constants.DISK_TEMPLATES):
113 unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
114 ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
117 # prepare beparams dict
118 beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
119 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
121 # prepare nicparams dict
122 nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
123 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
125 # prepare ndparams dict
126 if opts.ndparams is None:
127 ndparams = dict(constants.NDC_DEFAULTS)
129 ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
130 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
132 # prepare hvparams dict
133 for hv in constants.HYPER_TYPES:
134 if hv not in hvparams:
136 hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
137 utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
139 # prepare diskparams dict
140 for templ in constants.DISK_TEMPLATES:
141 if templ not in diskparams:
142 diskparams[templ] = {}
143 diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
145 utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
147 # prepare ipolicy dict
148 ipolicy = CreateIPolicyFromOpts(
149 ispecs_mem_size=opts.ispecs_mem_size,
150 ispecs_cpu_count=opts.ispecs_cpu_count,
151 ispecs_disk_count=opts.ispecs_disk_count,
152 ispecs_disk_size=opts.ispecs_disk_size,
153 ispecs_nic_count=opts.ispecs_nic_count,
154 minmax_ispecs=opts.ipolicy_bounds_specs,
155 std_ispecs=opts.ipolicy_std_specs,
156 ipolicy_disk_templates=opts.ipolicy_disk_templates,
157 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
158 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
161 if opts.candidate_pool_size is None:
162 opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
164 if opts.mac_prefix is None:
165 opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
167 uid_pool = opts.uid_pool
168 if uid_pool is not None:
169 uid_pool = uidpool.ParseUidPool(uid_pool)
171 if opts.prealloc_wipe_disks is None:
172 opts.prealloc_wipe_disks = False
174 external_ip_setup_script = opts.use_external_mip_script
175 if external_ip_setup_script is None:
176 external_ip_setup_script = False
179 primary_ip_version = int(opts.primary_ip_version)
180 except (ValueError, TypeError), err:
181 ToStderr("Invalid primary ip version value: %s" % str(err))
184 master_netmask = opts.master_netmask
186 if master_netmask is not None:
187 master_netmask = int(master_netmask)
188 except (ValueError, TypeError), err:
189 ToStderr("Invalid master netmask value: %s" % str(err))
193 disk_state = utils.FlatToDict(opts.disk_state)
197 hv_state = dict(opts.hv_state)
199 enabled_disk_templates = opts.enabled_disk_templates
200 if enabled_disk_templates:
201 enabled_disk_templates = enabled_disk_templates.split(",")
203 enabled_disk_templates = list(constants.DEFAULT_ENABLED_DISK_TEMPLATES)
205 bootstrap.InitCluster(cluster_name=args[0],
206 secondary_ip=opts.secondary_ip,
208 mac_prefix=opts.mac_prefix,
209 master_netmask=master_netmask,
210 master_netdev=master_netdev,
211 file_storage_dir=opts.file_storage_dir,
212 shared_file_storage_dir=opts.shared_file_storage_dir,
213 enabled_hypervisors=hvlist,
218 diskparams=diskparams,
220 candidate_pool_size=opts.candidate_pool_size,
221 modify_etc_hosts=opts.modify_etc_hosts,
222 modify_ssh_setup=opts.modify_ssh_setup,
223 maintain_node_health=opts.maintain_node_health,
224 drbd_helper=drbd_helper,
226 default_iallocator=opts.default_iallocator,
227 primary_ip_version=primary_ip_version,
228 prealloc_wipe_disks=opts.prealloc_wipe_disks,
229 use_external_mip_script=external_ip_setup_script,
231 disk_state=disk_state,
232 enabled_disk_templates=enabled_disk_templates,
234 op = opcodes.OpClusterPostInit()
235 SubmitOpCode(op, opts=opts)
240 def DestroyCluster(opts, args):
241 """Destroy the cluster.
243 @param opts: the command line options selected by the user
245 @param args: should be an empty list
247 @return: the desired exit code
250 if not opts.yes_do_it:
251 ToStderr("Destroying a cluster is irreversible. If you really want"
252 " destroy this cluster, supply the --yes-do-it option.")
255 op = opcodes.OpClusterDestroy()
256 master = SubmitOpCode(op, opts=opts)
257 # if we reached this, the opcode didn't fail; we can proceed to
258 # shutdown all the daemons
259 bootstrap.FinalizeClusterDestroy(master)
263 def RenameCluster(opts, args):
264 """Rename the cluster.
266 @param opts: the command line options selected by the user
268 @param args: should contain only one element, the new cluster name
270 @return: the desired exit code
275 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
279 usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
280 " connected over the network to the cluster name, the"
281 " operation is very dangerous as the IP address will be"
282 " removed from the node and the change may not go through."
283 " Continue?") % (cluster_name, new_name)
284 if not AskUser(usertext):
287 op = opcodes.OpClusterRename(name=new_name)
288 result = SubmitOpCode(op, opts=opts, cl=cl)
291 ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
296 def ActivateMasterIp(opts, args):
297 """Activates the master IP.
300 op = opcodes.OpClusterActivateMasterIp()
305 def DeactivateMasterIp(opts, args):
306 """Deactivates the master IP.
310 usertext = ("This will disable the master IP. All the open connections to"
311 " the master IP will be closed. To reach the master you will"
312 " need to use its node IP."
314 if not AskUser(usertext):
317 op = opcodes.OpClusterDeactivateMasterIp()
322 def RedistributeConfig(opts, args):
323 """Forces push of the cluster configuration.
325 @param opts: the command line options selected by the user
327 @param args: empty list
329 @return: the desired exit code
332 op = opcodes.OpClusterRedistConf()
333 SubmitOrSend(op, opts)
337 def ShowClusterVersion(opts, args):
338 """Write version of ganeti software to the standard output.
340 @param opts: the command line options selected by the user
342 @param args: should be an empty list
344 @return: the desired exit code
347 cl = GetClient(query=True)
348 result = cl.QueryClusterInfo()
349 ToStdout("Software version: %s", result["software_version"])
350 ToStdout("Internode protocol: %s", result["protocol_version"])
351 ToStdout("Configuration format: %s", result["config_version"])
352 ToStdout("OS api version: %s", result["os_api_version"])
353 ToStdout("Export interface: %s", result["export_version"])
357 def ShowClusterMaster(opts, args):
358 """Write name of master node to the standard output.
360 @param opts: the command line options selected by the user
362 @param args: should be an empty list
364 @return: the desired exit code
367 master = bootstrap.GetMaster()
372 def _FormatGroupedParams(paramsdict, roman=False):
373 """Format Grouped parameters (be, nic, disk) by group.
375 @type paramsdict: dict of dicts
376 @param paramsdict: {group: {param: value, ...}, ...}
377 @rtype: dict of dicts
378 @return: copy of the input dictionaries with strings as values
382 for (item, val) in paramsdict.items():
383 if isinstance(val, dict):
384 ret[item] = _FormatGroupedParams(val, roman=roman)
385 elif roman and isinstance(val, int):
386 ret[item] = compat.TryToRoman(val)
392 def ShowClusterConfig(opts, args):
393 """Shows cluster information.
395 @param opts: the command line options selected by the user
397 @param args: should be an empty list
399 @return: the desired exit code
402 cl = GetClient(query=True)
403 result = cl.QueryClusterInfo()
406 tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
409 if result["reserved_lvs"]:
410 reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
412 reserved_lvs = "(none)"
414 enabled_hv = result["enabled_hypervisors"]
415 hvparams = dict((k, v) for k, v in result["hvparams"].iteritems()
419 ("Cluster name", result["name"]),
420 ("Cluster UUID", result["uuid"]),
422 ("Creation time", utils.FormatTime(result["ctime"])),
423 ("Modification time", utils.FormatTime(result["mtime"])),
425 ("Master node", result["master"]),
427 ("Architecture (this node)",
428 "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
432 ("Default hypervisor", result["default_hypervisor"]),
433 ("Enabled hypervisors", utils.CommaJoin(enabled_hv)),
435 ("Hypervisor parameters", _FormatGroupedParams(hvparams)),
437 ("OS-specific hypervisor parameters",
438 _FormatGroupedParams(result["os_hvp"])),
440 ("OS parameters", _FormatGroupedParams(result["osparams"])),
442 ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
443 ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
445 ("Cluster parameters", [
446 ("candidate pool size",
447 compat.TryToRoman(result["candidate_pool_size"],
448 convert=opts.roman_integers)),
449 ("master netdev", result["master_netdev"]),
450 ("master netmask", result["master_netmask"]),
451 ("use external master IP address setup script",
452 result["use_external_mip_script"]),
453 ("lvm volume group", result["volume_group_name"]),
454 ("lvm reserved volumes", reserved_lvs),
455 ("drbd usermode helper", result["drbd_usermode_helper"]),
456 ("file storage path", result["file_storage_dir"]),
457 ("shared file storage path", result["shared_file_storage_dir"]),
458 ("maintenance of node health", result["maintain_node_health"]),
459 ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
460 ("default instance allocator", result["default_iallocator"]),
461 ("primary ip version", result["primary_ip_version"]),
462 ("preallocation wipe disks", result["prealloc_wipe_disks"]),
463 ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
464 ("ExtStorage Providers search path",
465 utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
466 ("enabled disk templates",
467 utils.CommaJoin(result["enabled_disk_templates"])),
470 ("Default node parameters",
471 _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
473 ("Default instance parameters",
474 _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
476 ("Default nic parameters",
477 _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
479 ("Default disk parameters",
480 _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
482 ("Instance policy - limits for instances",
483 FormatPolicyInfo(result["ipolicy"], None, True)),
486 PrintGenericInfo(info)
490 def ClusterCopyFile(opts, args):
491 """Copy a file from master to some nodes.
493 @param opts: the command line options selected by the user
495 @param args: should contain only one element, the path of
496 the file to be copied
498 @return: the desired exit code
502 if not os.path.exists(filename):
503 raise errors.OpPrereqError("No such filename '%s'" % filename,
508 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
510 results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
511 secondary_ips=opts.use_replication_network,
512 nodegroup=opts.nodegroup)
514 srun = ssh.SshRunner(cluster_name)
516 if not srun.CopyFileToNode(node, filename):
517 ToStderr("Copy of file %s to node %s failed", filename, node)
522 def RunClusterCommand(opts, args):
523 """Run a command on some nodes.
525 @param opts: the command line options selected by the user
527 @param args: should contain the command to be run and its arguments
529 @return: the desired exit code
534 command = " ".join(args)
536 nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
538 cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
541 srun = ssh.SshRunner(cluster_name=cluster_name)
543 # Make sure master node is at list end
544 if master_node in nodes:
545 nodes.remove(master_node)
546 nodes.append(master_node)
549 result = srun.Run(name, constants.SSH_LOGIN_USER, command)
551 if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
552 # Do not output anything for successful commands
555 ToStdout("------------------------------------------------")
556 if opts.show_machine_names:
557 for line in result.output.splitlines():
558 ToStdout("%s: %s", name, line)
560 ToStdout("node: %s", name)
561 ToStdout("%s", result.output)
562 ToStdout("return code = %s", result.exit_code)
567 def VerifyCluster(opts, args):
568 """Verify integrity of cluster, performing various test on nodes.
570 @param opts: the command line options selected by the user
572 @param args: should be an empty list
574 @return: the desired exit code
579 if opts.skip_nplusone_mem:
580 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
584 op = opcodes.OpClusterVerify(verbose=opts.verbose,
585 error_codes=opts.error_codes,
586 debug_simulate_errors=opts.simulate_errors,
587 skip_checks=skip_checks,
588 ignore_errors=opts.ignore_errors,
589 group_name=opts.nodegroup)
590 result = SubmitOpCode(op, cl=cl, opts=opts)
592 # Keep track of submitted jobs
593 jex = JobExecutor(cl=cl, opts=opts)
595 for (status, job_id) in result[constants.JOB_IDS_KEY]:
596 jex.AddJobId(None, status, job_id)
598 results = jex.GetResults()
600 (bad_jobs, bad_results) = \
602 # Convert iterators to lists
605 map(compat.partial(itertools.ifilterfalse, bool),
606 # Convert result to booleans in a tuple
607 zip(*((job_success, len(op_results) == 1 and op_results[0])
608 for (job_success, op_results) in results)))))
610 if bad_jobs == 0 and bad_results == 0:
611 rcode = constants.EXIT_SUCCESS
613 rcode = constants.EXIT_FAILURE
615 ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
620 def VerifyDisks(opts, args):
621 """Verify integrity of cluster disks.
623 @param opts: the command line options selected by the user
625 @param args: should be an empty list
627 @return: the desired exit code
632 op = opcodes.OpClusterVerifyDisks()
634 result = SubmitOpCode(op, cl=cl, opts=opts)
636 # Keep track of submitted jobs
637 jex = JobExecutor(cl=cl, opts=opts)
639 for (status, job_id) in result[constants.JOB_IDS_KEY]:
640 jex.AddJobId(None, status, job_id)
642 retcode = constants.EXIT_SUCCESS
644 for (status, result) in jex.GetResults():
646 ToStdout("Job failed: %s", result)
649 ((bad_nodes, instances, missing), ) = result
651 for node, text in bad_nodes.items():
652 ToStdout("Error gathering data on node %s: %s",
653 node, utils.SafeEncode(text[-400:]))
654 retcode = constants.EXIT_FAILURE
655 ToStdout("You need to fix these nodes first before fixing instances")
657 for iname in instances:
660 op = opcodes.OpInstanceActivateDisks(instance_name=iname)
662 ToStdout("Activating disks for instance '%s'", iname)
663 SubmitOpCode(op, opts=opts, cl=cl)
664 except errors.GenericError, err:
665 nret, msg = FormatError(err)
667 ToStderr("Error activating disks for instance %s: %s", iname, msg)
670 for iname, ival in missing.iteritems():
671 all_missing = compat.all(x[0] in bad_nodes for x in ival)
673 ToStdout("Instance %s cannot be verified as it lives on"
674 " broken nodes", iname)
676 ToStdout("Instance %s has missing logical volumes:", iname)
678 for node, vol in ival:
679 if node in bad_nodes:
680 ToStdout("\tbroken node %s /dev/%s", node, vol)
682 ToStdout("\t%s /dev/%s", node, vol)
684 ToStdout("You need to replace or recreate disks for all the above"
685 " instances if this message persists after fixing broken nodes.")
686 retcode = constants.EXIT_FAILURE
688 ToStdout("No disks need to be activated.")
693 def RepairDiskSizes(opts, args):
694 """Verify sizes of cluster disks.
696 @param opts: the command line options selected by the user
698 @param args: optional list of instances to restrict check to
700 @return: the desired exit code
703 op = opcodes.OpClusterRepairDiskSizes(instances=args)
704 SubmitOpCode(op, opts=opts)
708 def MasterFailover(opts, args):
709 """Failover the master node.
711 This command, when run on a non-master node, will cause the current
712 master to cease being master, and the non-master to become new
715 @param opts: the command line options selected by the user
717 @param args: should be an empty list
719 @return: the desired exit code
722 if opts.no_voting and not opts.yes_do_it:
723 usertext = ("This will perform the failover even if most other nodes"
724 " are down, or if this node is outdated. This is dangerous"
725 " as it can lead to a non-consistent cluster. Check the"
726 " gnt-cluster(8) man page before proceeding. Continue?")
727 if not AskUser(usertext):
730 return bootstrap.MasterFailover(no_voting=opts.no_voting)
733 def MasterPing(opts, args):
734 """Checks if the master is alive.
736 @param opts: the command line options selected by the user
738 @param args: should be an empty list
740 @return: the desired exit code
745 cl.QueryClusterInfo()
747 except Exception: # pylint: disable=W0703
751 def SearchTags(opts, args):
752 """Searches the tags on all the cluster.
754 @param opts: the command line options selected by the user
756 @param args: should contain only one element, the tag pattern
758 @return: the desired exit code
761 op = opcodes.OpTagsSearch(pattern=args[0])
762 result = SubmitOpCode(op, opts=opts)
765 result = list(result)
767 for path, tag in result:
768 ToStdout("%s %s", path, tag)
771 def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
772 """Reads and verifies an X509 certificate.
774 @type cert_filename: string
775 @param cert_filename: the path of the file containing the certificate to
776 verify encoded in PEM format
777 @type verify_private_key: bool
778 @param verify_private_key: whether to verify the private key in addition to
779 the public certificate
781 @return: a string containing the PEM-encoded certificate.
785 pem = utils.ReadFile(cert_filename)
787 raise errors.X509CertError(cert_filename,
788 "Unable to read certificate: %s" % str(err))
791 OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
792 except Exception, err:
793 raise errors.X509CertError(cert_filename,
794 "Unable to load certificate: %s" % str(err))
796 if verify_private_key:
798 OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
799 except Exception, err:
800 raise errors.X509CertError(cert_filename,
801 "Unable to load private key: %s" % str(err))
806 def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
807 rapi_cert_filename, new_spice_cert, spice_cert_filename,
808 spice_cacert_filename, new_confd_hmac_key, new_cds,
809 cds_filename, force):
810 """Renews cluster certificates, keys and secrets.
812 @type new_cluster_cert: bool
813 @param new_cluster_cert: Whether to generate a new cluster certificate
814 @type new_rapi_cert: bool
815 @param new_rapi_cert: Whether to generate a new RAPI certificate
816 @type rapi_cert_filename: string
817 @param rapi_cert_filename: Path to file containing new RAPI certificate
818 @type new_spice_cert: bool
819 @param new_spice_cert: Whether to generate a new SPICE certificate
820 @type spice_cert_filename: string
821 @param spice_cert_filename: Path to file containing new SPICE certificate
822 @type spice_cacert_filename: string
823 @param spice_cacert_filename: Path to file containing the certificate of the
824 CA that signed the SPICE certificate
825 @type new_confd_hmac_key: bool
826 @param new_confd_hmac_key: Whether to generate a new HMAC key
828 @param new_cds: Whether to generate a new cluster domain secret
829 @type cds_filename: string
830 @param cds_filename: Path to file containing new cluster domain secret
832 @param force: Whether to ask user for confirmation
835 if new_rapi_cert and rapi_cert_filename:
836 ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
837 " options can be specified at the same time.")
840 if new_cds and cds_filename:
841 ToStderr("Only one of the --new-cluster-domain-secret and"
842 " --cluster-domain-secret options can be specified at"
846 if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
847 ToStderr("When using --new-spice-certificate, the --spice-certificate"
848 " and --spice-ca-certificate must not be used.")
851 if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
852 ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
856 rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
858 if rapi_cert_filename:
859 rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
860 if spice_cert_filename:
861 spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
862 spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
863 except errors.X509CertError, err:
864 ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
869 cds = utils.ReadFile(cds_filename)
870 except Exception, err: # pylint: disable=W0703
871 ToStderr("Can't load new cluster domain secret from %s: %s" %
872 (cds_filename, str(err)))
878 usertext = ("This requires all daemons on all nodes to be restarted and"
879 " may take some time. Continue?")
880 if not AskUser(usertext):
883 def _RenewCryptoInner(ctx):
884 ctx.feedback_fn("Updating certificates and keys")
885 bootstrap.GenerateClusterCrypto(new_cluster_cert,
890 rapi_cert_pem=rapi_cert_pem,
891 spice_cert_pem=spice_cert_pem,
892 spice_cacert_pem=spice_cacert_pem,
898 files_to_copy.append(pathutils.NODED_CERT_FILE)
900 if new_rapi_cert or rapi_cert_pem:
901 files_to_copy.append(pathutils.RAPI_CERT_FILE)
903 if new_spice_cert or spice_cert_pem:
904 files_to_copy.append(pathutils.SPICE_CERT_FILE)
905 files_to_copy.append(pathutils.SPICE_CACERT_FILE)
907 if new_confd_hmac_key:
908 files_to_copy.append(pathutils.CONFD_HMAC_KEY)
911 files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
914 for node_name in ctx.nonmaster_nodes:
915 ctx.feedback_fn("Copying %s to %s" %
916 (", ".join(files_to_copy), node_name))
917 for file_name in files_to_copy:
918 ctx.ssh.CopyFileToNode(node_name, file_name)
920 RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
922 ToStdout("All requested certificates and keys have been replaced."
923 " Running \"gnt-cluster verify\" now is recommended.")
928 def RenewCrypto(opts, args):
929 """Renews cluster certificates, keys and secrets.
932 return _RenewCrypto(opts.new_cluster_cert,
938 opts.new_confd_hmac_key,
939 opts.new_cluster_domain_secret,
940 opts.cluster_domain_secret,
944 def SetClusterParams(opts, args):
945 """Modify the cluster.
947 @param opts: the command line options selected by the user
949 @param args: should be an empty list
951 @return: the desired exit code
954 if not (not opts.lvm_storage or opts.vg_name or
955 not opts.drbd_storage or opts.drbd_helper or
956 opts.enabled_hypervisors or opts.hvparams or
957 opts.beparams or opts.nicparams or
958 opts.ndparams or opts.diskparams or
959 opts.candidate_pool_size is not None or
960 opts.uid_pool is not None or
961 opts.maintain_node_health is not None or
962 opts.add_uids is not None or
963 opts.remove_uids is not None or
964 opts.default_iallocator is not None or
965 opts.reserved_lvs is not None or
966 opts.master_netdev is not None or
967 opts.master_netmask is not None or
968 opts.use_external_mip_script is not None or
969 opts.prealloc_wipe_disks is not None or
971 opts.enabled_disk_templates or
973 opts.ipolicy_bounds_specs is not None or
974 opts.ipolicy_std_specs is not None or
975 opts.ipolicy_disk_templates is not None or
976 opts.ipolicy_vcpu_ratio is not None or
977 opts.ipolicy_spindle_ratio is not None):
978 ToStderr("Please give at least one of the parameters.")
981 vg_name = opts.vg_name
982 if not opts.lvm_storage and opts.vg_name:
983 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
986 if not opts.lvm_storage:
989 drbd_helper = opts.drbd_helper
990 if not opts.drbd_storage and opts.drbd_helper:
991 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
994 if not opts.drbd_storage:
997 hvlist = opts.enabled_hypervisors
998 if hvlist is not None:
999 hvlist = hvlist.split(",")
1001 enabled_disk_templates = opts.enabled_disk_templates
1002 if enabled_disk_templates:
1003 enabled_disk_templates = enabled_disk_templates.split(",")
1005 # a list of (name, dict) we can pass directly to dict() (or [])
1006 hvparams = dict(opts.hvparams)
1007 for hv_params in hvparams.values():
1008 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1010 diskparams = dict(opts.diskparams)
1012 for dt_params in diskparams.values():
1013 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
1015 beparams = opts.beparams
1016 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
1018 nicparams = opts.nicparams
1019 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
1021 ndparams = opts.ndparams
1022 if ndparams is not None:
1023 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
1025 ipolicy = CreateIPolicyFromOpts(
1026 minmax_ispecs=opts.ipolicy_bounds_specs,
1027 std_ispecs=opts.ipolicy_std_specs,
1028 ipolicy_disk_templates=opts.ipolicy_disk_templates,
1029 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
1030 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
1033 mnh = opts.maintain_node_health
1035 uid_pool = opts.uid_pool
1036 if uid_pool is not None:
1037 uid_pool = uidpool.ParseUidPool(uid_pool)
1039 add_uids = opts.add_uids
1040 if add_uids is not None:
1041 add_uids = uidpool.ParseUidPool(add_uids)
1043 remove_uids = opts.remove_uids
1044 if remove_uids is not None:
1045 remove_uids = uidpool.ParseUidPool(remove_uids)
1047 if opts.reserved_lvs is not None:
1048 if opts.reserved_lvs == "":
1049 opts.reserved_lvs = []
1051 opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
1053 if opts.master_netmask is not None:
1055 opts.master_netmask = int(opts.master_netmask)
1057 ToStderr("The --master-netmask option expects an int parameter.")
1060 ext_ip_script = opts.use_external_mip_script
1063 disk_state = utils.FlatToDict(opts.disk_state)
1067 hv_state = dict(opts.hv_state)
1069 op = opcodes.OpClusterSetParams(
1071 drbd_helper=drbd_helper,
1072 enabled_hypervisors=hvlist,
1076 nicparams=nicparams,
1078 diskparams=diskparams,
1080 candidate_pool_size=opts.candidate_pool_size,
1081 maintain_node_health=mnh,
1084 remove_uids=remove_uids,
1085 default_iallocator=opts.default_iallocator,
1086 prealloc_wipe_disks=opts.prealloc_wipe_disks,
1087 master_netdev=opts.master_netdev,
1088 master_netmask=opts.master_netmask,
1089 reserved_lvs=opts.reserved_lvs,
1090 use_external_mip_script=ext_ip_script,
1092 disk_state=disk_state,
1093 enabled_disk_templates=enabled_disk_templates,
1096 SubmitOrSend(op, opts)
1100 def QueueOps(opts, args):
1101 """Queue operations.
1103 @param opts: the command line options selected by the user
1105 @param args: should contain only one element, the subcommand
1107 @return: the desired exit code
1111 client = GetClient()
1112 if command in ("drain", "undrain"):
1113 drain_flag = command == "drain"
1114 client.SetQueueDrainFlag(drain_flag)
1115 elif command == "info":
1116 result = client.QueryConfigValues(["drain_flag"])
1121 ToStdout("The drain flag is %s" % val)
1123 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1129 def _ShowWatcherPause(until):
1130 if until is None or until < time.time():
1131 ToStdout("The watcher is not paused.")
1133 ToStdout("The watcher is paused until %s.", time.ctime(until))
1136 def WatcherOps(opts, args):
1137 """Watcher operations.
1139 @param opts: the command line options selected by the user
1141 @param args: should contain only one element, the subcommand
1143 @return: the desired exit code
1147 client = GetClient()
1149 if command == "continue":
1150 client.SetWatcherPause(None)
1151 ToStdout("The watcher is no longer paused.")
1153 elif command == "pause":
1155 raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1157 result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1158 _ShowWatcherPause(result)
1160 elif command == "info":
1161 result = client.QueryConfigValues(["watcher_pause"])
1162 _ShowWatcherPause(result[0])
1165 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1171 def _OobPower(opts, node_list, power):
1172 """Puts the node in the list to desired power state.
1174 @param opts: The command line options selected by the user
1175 @param node_list: The list of nodes to operate on
1176 @param power: True if they should be powered on, False otherwise
1177 @return: The success of the operation (none failed)
1181 command = constants.OOB_POWER_ON
1183 command = constants.OOB_POWER_OFF
1185 op = opcodes.OpOobCommand(node_names=node_list,
1188 timeout=opts.oob_timeout,
1189 power_delay=opts.power_delay)
1190 result = SubmitOpCode(op, opts=opts)
1192 for node_result in result:
1193 (node_tuple, data_tuple) = node_result
1194 (_, node_name) = node_tuple
1195 (data_status, _) = data_tuple
1196 if data_status != constants.RS_NORMAL:
1197 assert data_status != constants.RS_UNAVAIL
1199 ToStderr("There was a problem changing power for %s, please investigate",
1208 def _InstanceStart(opts, inst_list, start, no_remember=False):
1209 """Puts the instances in the list to desired state.
1211 @param opts: The command line options selected by the user
1212 @param inst_list: The list of instances to operate on
1213 @param start: True if they should be started, False for shutdown
1214 @param no_remember: If the instance state should be remembered
1215 @return: The success of the operation (none failed)
1219 opcls = opcodes.OpInstanceStartup
1220 text_submit, text_success, text_failed = ("startup", "started", "starting")
1222 opcls = compat.partial(opcodes.OpInstanceShutdown,
1223 timeout=opts.shutdown_timeout,
1224 no_remember=no_remember)
1225 text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1227 jex = JobExecutor(opts=opts)
1229 for inst in inst_list:
1230 ToStdout("Submit %s of instance %s", text_submit, inst)
1231 op = opcls(instance_name=inst)
1232 jex.QueueJob(inst, op)
1234 results = jex.GetResults()
1235 bad_cnt = len([1 for (success, _) in results if not success])
1238 ToStdout("All instances have been %s successfully", text_success)
1240 ToStderr("There were errors while %s instances:\n"
1241 "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1248 class _RunWhenNodesReachableHelper:
1249 """Helper class to make shared internal state sharing easier.
1251 @ivar success: Indicates if all action_cb calls were successful
1254 def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1255 _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1258 @param node_list: The list of nodes to be reachable
1259 @param action_cb: Callback called when a new host is reachable
1261 @param node2ip: Node to ip mapping
1262 @param port: The port to use for the TCP ping
1263 @param feedback_fn: The function used for feedback
1264 @param _ping_fn: Function to check reachabilty (for unittest use only)
1265 @param _sleep_fn: Function to sleep (for unittest use only)
1268 self.down = set(node_list)
1270 self.node2ip = node2ip
1272 self.action_cb = action_cb
1274 self.feedback_fn = feedback_fn
1275 self._ping_fn = _ping_fn
1276 self._sleep_fn = _sleep_fn
1279 """When called we run action_cb.
1281 @raises utils.RetryAgain: When there are still down nodes
1284 if not self.action_cb(self.up):
1285 self.success = False
1288 raise utils.RetryAgain()
1292 def Wait(self, secs):
1293 """Checks if a host is up or waits remaining seconds.
1295 @param secs: The secs remaining
1299 for node in self.down:
1300 if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1301 live_port_needed=True):
1302 self.feedback_fn("Node %s became available" % node)
1304 self.down -= self.up
1305 # If we have a node available there is the possibility to run the
1306 # action callback successfully, therefore we don't wait and return
1309 self._sleep_fn(max(0.0, start + secs - time.time()))
1312 def _RunWhenNodesReachable(node_list, action_cb, interval):
1313 """Run action_cb when nodes become reachable.
1315 @param node_list: The list of nodes to be reachable
1316 @param action_cb: Callback called when a new host is reachable
1317 @param interval: The earliest time to retry
1320 client = GetClient()
1321 cluster_info = client.QueryClusterInfo()
1322 if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1323 family = netutils.IPAddress.family
1325 family = netutils.IP6Address.family
1327 node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1328 for node in node_list)
1330 port = netutils.GetDaemonPort(constants.NODED)
1331 helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1335 return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1336 wait_fn=helper.Wait)
1337 except utils.RetryTimeout:
1338 ToStderr("Time exceeded while waiting for nodes to become reachable"
1339 " again:\n - %s", " - ".join(helper.down))
1343 def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1344 _instance_start_fn=_InstanceStart):
1345 """Start the instances conditional based on node_states.
1347 @param opts: The command line options selected by the user
1348 @param inst_map: A dict of inst -> nodes mapping
1349 @param nodes_online: A list of nodes online
1350 @param _instance_start_fn: Callback to start instances (unittest use only)
1351 @return: Success of the operation on all instances
1354 start_inst_list = []
1355 for (inst, nodes) in inst_map.items():
1356 if not (nodes - nodes_online):
1357 # All nodes the instance lives on are back online
1358 start_inst_list.append(inst)
1360 for inst in start_inst_list:
1364 return _instance_start_fn(opts, start_inst_list, True)
1369 def _EpoOn(opts, full_node_list, node_list, inst_map):
1370 """Does the actual power on.
1372 @param opts: The command line options selected by the user
1373 @param full_node_list: All nodes to operate on (includes nodes not supporting
1375 @param node_list: The list of nodes to operate on (all need to support OOB)
1376 @param inst_map: A dict of inst -> nodes mapping
1377 @return: The desired exit status
1380 if node_list and not _OobPower(opts, node_list, False):
1381 ToStderr("Not all nodes seem to get back up, investigate and start"
1382 " manually if needed")
1384 # Wait for the nodes to be back up
1385 action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1387 ToStdout("Waiting until all nodes are available again")
1388 if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1389 ToStderr("Please investigate and start stopped instances manually")
1390 return constants.EXIT_FAILURE
1392 return constants.EXIT_SUCCESS
1395 def _EpoOff(opts, node_list, inst_map):
1396 """Does the actual power off.
1398 @param opts: The command line options selected by the user
1399 @param node_list: The list of nodes to operate on (all need to support OOB)
1400 @param inst_map: A dict of inst -> nodes mapping
1401 @return: The desired exit status
1404 if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1405 ToStderr("Please investigate and stop instances manually before continuing")
1406 return constants.EXIT_FAILURE
1409 return constants.EXIT_SUCCESS
1411 if _OobPower(opts, node_list, False):
1412 return constants.EXIT_SUCCESS
1414 return constants.EXIT_FAILURE
1417 def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
1418 _confirm_fn=ConfirmOperation,
1419 _stdout_fn=ToStdout, _stderr_fn=ToStderr):
1422 @param opts: the command line options selected by the user
1424 @param args: should contain only one element, the subcommand
1426 @return: the desired exit code
1429 if opts.groups and opts.show_all:
1430 _stderr_fn("Only one of --groups or --all are allowed")
1431 return constants.EXIT_FAILURE
1432 elif args and opts.show_all:
1433 _stderr_fn("Arguments in combination with --all are not allowed")
1434 return constants.EXIT_FAILURE
1441 itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
1443 node_query_list = args
1445 result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
1446 "sinst_list", "powered", "offline"],
1449 all_nodes = map(compat.fst, result)
1452 for (node, master, pinsts, sinsts, powered, offline) in result:
1454 for inst in (pinsts + sinsts):
1455 if inst in inst_map:
1457 inst_map[inst].add(node)
1459 inst_map[inst] = set()
1461 inst_map[inst] = set([node])
1463 if master and opts.on:
1464 # We ignore the master for turning on the machines, in fact we are
1465 # already operating on the master at this point :)
1467 elif master and not opts.show_all:
1468 _stderr_fn("%s is the master node, please do a master-failover to another"
1469 " node not affected by the EPO or use --all if you intend to"
1470 " shutdown the whole cluster", node)
1471 return constants.EXIT_FAILURE
1472 elif powered is None:
1473 _stdout_fn("Node %s does not support out-of-band handling, it can not be"
1474 " handled in a fully automated manner", node)
1475 elif powered == opts.on:
1476 _stdout_fn("Node %s is already in desired power state, skipping", node)
1477 elif not offline or (offline and powered):
1478 node_list.append(node)
1480 if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
1481 return constants.EXIT_FAILURE
1484 return _on_fn(opts, all_nodes, node_list, inst_map)
1486 return _off_fn(opts, node_list, inst_map)
1489 def _GetCreateCommand(info):
1491 buf.write("gnt-cluster init")
1492 PrintIPolicyCommand(buf, info["ipolicy"], False)
1494 buf.write(info["name"])
1495 return buf.getvalue()
1498 def ShowCreateCommand(opts, args):
1499 """Shows the command that can be used to re-create the cluster.
1501 Currently it works only for ipolicy specs.
1504 cl = GetClient(query=True)
1505 result = cl.QueryClusterInfo()
1506 ToStdout(_GetCreateCommand(result))
1511 InitCluster, [ArgHost(min=1, max=1)],
1512 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1513 HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
1514 NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
1515 NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
1516 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1517 DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1518 NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
1519 DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
1520 IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
1521 "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1523 DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1524 "", "Destroy cluster"),
1526 RenameCluster, [ArgHost(min=1, max=1)],
1527 [FORCE_OPT, DRY_RUN_OPT],
1529 "Renames the cluster"),
1531 RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1532 "", "Forces a push of the configuration file and ssconf files"
1533 " to the nodes in the cluster"),
1535 VerifyCluster, ARGS_NONE,
1536 [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1537 DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
1538 "", "Does a check on the cluster configuration"),
1540 VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1541 "", "Does a check on the cluster disk status"),
1542 "repair-disk-sizes": (
1543 RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1544 "[instance...]", "Updates mismatches in recorded disk sizes"),
1545 "master-failover": (
1546 MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
1547 "", "Makes the current node the master"),
1549 MasterPing, ARGS_NONE, [],
1550 "", "Checks if the master is alive"),
1552 ShowClusterVersion, ARGS_NONE, [],
1553 "", "Shows the cluster version"),
1555 ShowClusterMaster, ARGS_NONE, [],
1556 "", "Shows the cluster master"),
1558 ClusterCopyFile, [ArgFile(min=1, max=1)],
1559 [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1560 "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1562 RunClusterCommand, [ArgCommand(min=1)],
1563 [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
1564 "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1566 ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1567 "[--roman]", "Show cluster configuration"),
1569 ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1571 AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1572 "tag...", "Add tags to the cluster"),
1574 RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1575 "tag...", "Remove tags from the cluster"),
1577 SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1578 "Searches the tags on all objects on"
1579 " the cluster for a given pattern (regex)"),
1582 [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1583 [], "drain|undrain|info", "Change queue properties"),
1586 [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1587 ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1589 "{pause <timespec>|continue|info}", "Change watcher properties"),
1591 SetClusterParams, ARGS_NONE,
1593 BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1594 MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
1595 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
1596 DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
1597 RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
1598 NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
1599 DISK_STATE_OPT, SUBMIT_OPT, ENABLED_DISK_TEMPLATES_OPT,
1600 IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS,
1602 "Alters the parameters of the cluster"),
1604 RenewCrypto, ARGS_NONE,
1605 [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1606 NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1607 NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1608 NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1610 "Renews cluster certificates, keys and secrets"),
1612 Epo, [ArgUnknown()],
1613 [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1614 SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1616 "Performs an emergency power-off on given args"),
1617 "activate-master-ip": (
1618 ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1619 "deactivate-master-ip": (
1620 DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1621 "Deactivates the master IP"),
1622 "show-ispecs-cmd": (
1623 ShowCreateCommand, ARGS_NONE, [], "",
1624 "Show the command line to re-create the cluster"),
1628 #: dictionary with aliases for commands
1630 "masterfailover": "master-failover",
1636 return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},