4 # Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Cluster related commands"""
23 # pylint: disable=W0401,W0613,W0614,C0103
24 # W0401: Wildcard import ganeti.cli
25 # W0613: Unused argument, since all functions follow the same API
26 # W0614: Unused import %s from wildcard import (since we need cli)
27 # C0103: Invalid name gnt-cluster
34 from ganeti.cli import *
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import utils
39 from ganeti import bootstrap
40 from ganeti import ssh
41 from ganeti import objects
42 from ganeti import uidpool
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import pathutils
48 ON_OPT = cli_option("--on", default=False,
49 action="store_true", dest="on",
50 help="Recover from an EPO")
52 GROUPS_OPT = cli_option("--groups", default=False,
53 action="store_true", dest="groups",
54 help="Arguments are node groups instead of nodes")
56 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
58 help="Show machine name for every line in output")
60 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
61 _EPO_PING_TIMEOUT = 1 # 1 second
62 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
66 def InitCluster(opts, args):
67 """Initialize the cluster.
69 @param opts: the command line options selected by the user
71 @param args: should contain only one element, the desired
74 @return: the desired exit code
77 if not opts.lvm_storage and opts.vg_name:
78 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
81 vg_name = opts.vg_name
82 if opts.lvm_storage and not opts.vg_name:
83 vg_name = constants.DEFAULT_VG
85 if not opts.drbd_storage and opts.drbd_helper:
86 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
89 drbd_helper = opts.drbd_helper
90 if opts.drbd_storage and not opts.drbd_helper:
91 drbd_helper = constants.DEFAULT_DRBD_HELPER
93 master_netdev = opts.master_netdev
94 if master_netdev is None:
95 master_netdev = constants.DEFAULT_BRIDGE
97 hvlist = opts.enabled_hypervisors
99 hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
100 hvlist = hvlist.split(",")
102 hvparams = dict(opts.hvparams)
103 beparams = opts.beparams
104 nicparams = opts.nicparams
106 diskparams = dict(opts.diskparams)
108 # check the disk template types here, as we cannot rely on the type check done
109 # by the opcode parameter types
110 diskparams_keys = set(diskparams.keys())
111 if not (diskparams_keys <= constants.DISK_TEMPLATES):
112 unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
113 ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
116 # prepare beparams dict
117 beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
118 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
120 # prepare nicparams dict
121 nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
122 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
124 # prepare ndparams dict
125 if opts.ndparams is None:
126 ndparams = dict(constants.NDC_DEFAULTS)
128 ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
129 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
131 # prepare hvparams dict
132 for hv in constants.HYPER_TYPES:
133 if hv not in hvparams:
135 hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
136 utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
138 # prepare diskparams dict
139 for templ in constants.DISK_TEMPLATES:
140 if templ not in diskparams:
141 diskparams[templ] = {}
142 diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
144 utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
146 # prepare ipolicy dict
147 ipolicy_raw = CreateIPolicyFromOpts(
148 ispecs_mem_size=opts.ispecs_mem_size,
149 ispecs_cpu_count=opts.ispecs_cpu_count,
150 ispecs_disk_count=opts.ispecs_disk_count,
151 ispecs_disk_size=opts.ispecs_disk_size,
152 ispecs_nic_count=opts.ispecs_nic_count,
153 ipolicy_disk_templates=opts.ipolicy_disk_templates,
154 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
155 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
157 ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_raw)
159 if opts.candidate_pool_size is None:
160 opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
162 if opts.mac_prefix is None:
163 opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
165 uid_pool = opts.uid_pool
166 if uid_pool is not None:
167 uid_pool = uidpool.ParseUidPool(uid_pool)
169 if opts.prealloc_wipe_disks is None:
170 opts.prealloc_wipe_disks = False
172 external_ip_setup_script = opts.use_external_mip_script
173 if external_ip_setup_script is None:
174 external_ip_setup_script = False
177 primary_ip_version = int(opts.primary_ip_version)
178 except (ValueError, TypeError), err:
179 ToStderr("Invalid primary ip version value: %s" % str(err))
182 master_netmask = opts.master_netmask
184 if master_netmask is not None:
185 master_netmask = int(master_netmask)
186 except (ValueError, TypeError), err:
187 ToStderr("Invalid master netmask value: %s" % str(err))
191 disk_state = utils.FlatToDict(opts.disk_state)
195 hv_state = dict(opts.hv_state)
197 bootstrap.InitCluster(cluster_name=args[0],
198 secondary_ip=opts.secondary_ip,
200 mac_prefix=opts.mac_prefix,
201 master_netmask=master_netmask,
202 master_netdev=master_netdev,
203 file_storage_dir=opts.file_storage_dir,
204 shared_file_storage_dir=opts.shared_file_storage_dir,
205 enabled_hypervisors=hvlist,
210 diskparams=diskparams,
212 candidate_pool_size=opts.candidate_pool_size,
213 modify_etc_hosts=opts.modify_etc_hosts,
214 modify_ssh_setup=opts.modify_ssh_setup,
215 maintain_node_health=opts.maintain_node_health,
216 drbd_helper=drbd_helper,
218 default_iallocator=opts.default_iallocator,
219 primary_ip_version=primary_ip_version,
220 prealloc_wipe_disks=opts.prealloc_wipe_disks,
221 use_external_mip_script=external_ip_setup_script,
223 disk_state=disk_state,
225 op = opcodes.OpClusterPostInit()
226 SubmitOpCode(op, opts=opts)
231 def DestroyCluster(opts, args):
232 """Destroy the cluster.
234 @param opts: the command line options selected by the user
236 @param args: should be an empty list
238 @return: the desired exit code
241 if not opts.yes_do_it:
242 ToStderr("Destroying a cluster is irreversible. If you really want"
243 " destroy this cluster, supply the --yes-do-it option.")
246 op = opcodes.OpClusterDestroy()
247 master = SubmitOpCode(op, opts=opts)
248 # if we reached this, the opcode didn't fail; we can proceed to
249 # shutdown all the daemons
250 bootstrap.FinalizeClusterDestroy(master)
254 def RenameCluster(opts, args):
255 """Rename the cluster.
257 @param opts: the command line options selected by the user
259 @param args: should contain only one element, the new cluster name
261 @return: the desired exit code
266 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
270 usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
271 " connected over the network to the cluster name, the"
272 " operation is very dangerous as the IP address will be"
273 " removed from the node and the change may not go through."
274 " Continue?") % (cluster_name, new_name)
275 if not AskUser(usertext):
278 op = opcodes.OpClusterRename(name=new_name)
279 result = SubmitOpCode(op, opts=opts, cl=cl)
282 ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
287 def ActivateMasterIp(opts, args):
288 """Activates the master IP.
291 op = opcodes.OpClusterActivateMasterIp()
296 def DeactivateMasterIp(opts, args):
297 """Deactivates the master IP.
301 usertext = ("This will disable the master IP. All the open connections to"
302 " the master IP will be closed. To reach the master you will"
303 " need to use its node IP."
305 if not AskUser(usertext):
308 op = opcodes.OpClusterDeactivateMasterIp()
313 def RedistributeConfig(opts, args):
314 """Forces push of the cluster configuration.
316 @param opts: the command line options selected by the user
318 @param args: empty list
320 @return: the desired exit code
323 op = opcodes.OpClusterRedistConf()
324 SubmitOrSend(op, opts)
328 def ShowClusterVersion(opts, args):
329 """Write version of ganeti software to the standard output.
331 @param opts: the command line options selected by the user
333 @param args: should be an empty list
335 @return: the desired exit code
338 cl = GetClient(query=True)
339 result = cl.QueryClusterInfo()
340 ToStdout("Software version: %s", result["software_version"])
341 ToStdout("Internode protocol: %s", result["protocol_version"])
342 ToStdout("Configuration format: %s", result["config_version"])
343 ToStdout("OS api version: %s", result["os_api_version"])
344 ToStdout("Export interface: %s", result["export_version"])
348 def ShowClusterMaster(opts, args):
349 """Write name of master node to the standard output.
351 @param opts: the command line options selected by the user
353 @param args: should be an empty list
355 @return: the desired exit code
358 master = bootstrap.GetMaster()
363 def _PrintGroupedParams(paramsdict, level=1, roman=False):
364 """Print Grouped parameters (be, nic, disk) by group.
366 @type paramsdict: dict of dicts
367 @param paramsdict: {group: {param: value, ...}, ...}
369 @param level: Level of indention
373 for item, val in sorted(paramsdict.items()):
374 if isinstance(val, dict):
375 ToStdout("%s- %s:", indent, item)
376 _PrintGroupedParams(val, level=level + 1, roman=roman)
377 elif roman and isinstance(val, int):
378 ToStdout("%s %s: %s", indent, item, compat.TryToRoman(val))
380 ToStdout("%s %s: %s", indent, item, val)
383 def ShowClusterConfig(opts, args):
384 """Shows cluster information.
386 @param opts: the command line options selected by the user
388 @param args: should be an empty list
390 @return: the desired exit code
393 cl = GetClient(query=True)
394 result = cl.QueryClusterInfo()
396 ToStdout("Cluster name: %s", result["name"])
397 ToStdout("Cluster UUID: %s", result["uuid"])
399 ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
400 ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
402 ToStdout("Master node: %s", result["master"])
404 ToStdout("Architecture (this node): %s (%s)",
405 result["architecture"][0], result["architecture"][1])
408 tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
412 ToStdout("Tags: %s", tags)
414 ToStdout("Default hypervisor: %s", result["default_hypervisor"])
415 ToStdout("Enabled hypervisors: %s",
416 utils.CommaJoin(result["enabled_hypervisors"]))
418 ToStdout("Hypervisor parameters:")
419 _PrintGroupedParams(result["hvparams"])
421 ToStdout("OS-specific hypervisor parameters:")
422 _PrintGroupedParams(result["os_hvp"])
424 ToStdout("OS parameters:")
425 _PrintGroupedParams(result["osparams"])
427 ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
428 ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
430 ToStdout("Cluster parameters:")
431 ToStdout(" - candidate pool size: %s",
432 compat.TryToRoman(result["candidate_pool_size"],
433 convert=opts.roman_integers))
434 ToStdout(" - master netdev: %s", result["master_netdev"])
435 ToStdout(" - master netmask: %s", result["master_netmask"])
436 ToStdout(" - use external master IP address setup script: %s",
437 result["use_external_mip_script"])
438 ToStdout(" - lvm volume group: %s", result["volume_group_name"])
439 if result["reserved_lvs"]:
440 reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
442 reserved_lvs = "(none)"
443 ToStdout(" - lvm reserved volumes: %s", reserved_lvs)
444 ToStdout(" - drbd usermode helper: %s", result["drbd_usermode_helper"])
445 ToStdout(" - file storage path: %s", result["file_storage_dir"])
446 ToStdout(" - shared file storage path: %s",
447 result["shared_file_storage_dir"])
448 ToStdout(" - maintenance of node health: %s",
449 result["maintain_node_health"])
450 ToStdout(" - uid pool: %s",
451 uidpool.FormatUidPool(result["uid_pool"],
452 roman=opts.roman_integers))
453 ToStdout(" - default instance allocator: %s", result["default_iallocator"])
454 ToStdout(" - primary ip version: %d", result["primary_ip_version"])
455 ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
456 ToStdout(" - OS search path: %s", utils.CommaJoin(pathutils.OS_SEARCH_PATH))
458 ToStdout("Default node parameters:")
459 _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
461 ToStdout("Default instance parameters:")
462 _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
464 ToStdout("Default nic parameters:")
465 _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
467 ToStdout("Default disk parameters:")
468 _PrintGroupedParams(result["diskparams"], roman=opts.roman_integers)
470 ToStdout("Instance policy - limits for instances:")
471 for key in constants.IPOLICY_ISPECS:
472 ToStdout(" - %s", key)
473 _PrintGroupedParams(result["ipolicy"][key], roman=opts.roman_integers)
474 ToStdout(" - enabled disk templates: %s",
475 utils.CommaJoin(result["ipolicy"][constants.IPOLICY_DTS]))
476 for key in constants.IPOLICY_PARAMETERS:
477 ToStdout(" - %s: %s", key, result["ipolicy"][key])
482 def ClusterCopyFile(opts, args):
483 """Copy a file from master to some nodes.
485 @param opts: the command line options selected by the user
487 @param args: should contain only one element, the path of
488 the file to be copied
490 @return: the desired exit code
494 if not os.path.exists(filename):
495 raise errors.OpPrereqError("No such filename '%s'" % filename,
500 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
502 results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
503 secondary_ips=opts.use_replication_network,
504 nodegroup=opts.nodegroup)
506 srun = ssh.SshRunner(cluster_name=cluster_name)
508 if not srun.CopyFileToNode(node, filename):
509 ToStderr("Copy of file %s to node %s failed", filename, node)
514 def RunClusterCommand(opts, args):
515 """Run a command on some nodes.
517 @param opts: the command line options selected by the user
519 @param args: should contain the command to be run and its arguments
521 @return: the desired exit code
526 command = " ".join(args)
528 nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
530 cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
533 srun = ssh.SshRunner(cluster_name=cluster_name)
535 # Make sure master node is at list end
536 if master_node in nodes:
537 nodes.remove(master_node)
538 nodes.append(master_node)
541 result = srun.Run(name, constants.SSH_LOGIN_USER, command)
542 ToStdout("------------------------------------------------")
543 if opts.show_machine_names:
544 for line in result.output.splitlines():
545 ToStdout("%s: %s", name, line)
547 ToStdout("node: %s", name)
548 ToStdout("%s", result.output)
549 ToStdout("return code = %s", result.exit_code)
554 def VerifyCluster(opts, args):
555 """Verify integrity of cluster, performing various test on nodes.
557 @param opts: the command line options selected by the user
559 @param args: should be an empty list
561 @return: the desired exit code
566 if opts.skip_nplusone_mem:
567 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
571 op = opcodes.OpClusterVerify(verbose=opts.verbose,
572 error_codes=opts.error_codes,
573 debug_simulate_errors=opts.simulate_errors,
574 skip_checks=skip_checks,
575 ignore_errors=opts.ignore_errors,
576 group_name=opts.nodegroup)
577 result = SubmitOpCode(op, cl=cl, opts=opts)
579 # Keep track of submitted jobs
580 jex = JobExecutor(cl=cl, opts=opts)
582 for (status, job_id) in result[constants.JOB_IDS_KEY]:
583 jex.AddJobId(None, status, job_id)
585 results = jex.GetResults()
587 (bad_jobs, bad_results) = \
589 # Convert iterators to lists
592 map(compat.partial(itertools.ifilterfalse, bool),
593 # Convert result to booleans in a tuple
594 zip(*((job_success, len(op_results) == 1 and op_results[0])
595 for (job_success, op_results) in results)))))
597 if bad_jobs == 0 and bad_results == 0:
598 rcode = constants.EXIT_SUCCESS
600 rcode = constants.EXIT_FAILURE
602 ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
607 def VerifyDisks(opts, args):
608 """Verify integrity of cluster disks.
610 @param opts: the command line options selected by the user
612 @param args: should be an empty list
614 @return: the desired exit code
619 op = opcodes.OpClusterVerifyDisks()
621 result = SubmitOpCode(op, cl=cl, opts=opts)
623 # Keep track of submitted jobs
624 jex = JobExecutor(cl=cl, opts=opts)
626 for (status, job_id) in result[constants.JOB_IDS_KEY]:
627 jex.AddJobId(None, status, job_id)
629 retcode = constants.EXIT_SUCCESS
631 for (status, result) in jex.GetResults():
633 ToStdout("Job failed: %s", result)
636 ((bad_nodes, instances, missing), ) = result
638 for node, text in bad_nodes.items():
639 ToStdout("Error gathering data on node %s: %s",
640 node, utils.SafeEncode(text[-400:]))
641 retcode = constants.EXIT_FAILURE
642 ToStdout("You need to fix these nodes first before fixing instances")
644 for iname in instances:
647 op = opcodes.OpInstanceActivateDisks(instance_name=iname)
649 ToStdout("Activating disks for instance '%s'", iname)
650 SubmitOpCode(op, opts=opts, cl=cl)
651 except errors.GenericError, err:
652 nret, msg = FormatError(err)
654 ToStderr("Error activating disks for instance %s: %s", iname, msg)
657 for iname, ival in missing.iteritems():
658 all_missing = compat.all(x[0] in bad_nodes for x in ival)
660 ToStdout("Instance %s cannot be verified as it lives on"
661 " broken nodes", iname)
663 ToStdout("Instance %s has missing logical volumes:", iname)
665 for node, vol in ival:
666 if node in bad_nodes:
667 ToStdout("\tbroken node %s /dev/%s", node, vol)
669 ToStdout("\t%s /dev/%s", node, vol)
671 ToStdout("You need to replace or recreate disks for all the above"
672 " instances if this message persists after fixing broken nodes.")
673 retcode = constants.EXIT_FAILURE
675 ToStdout("No disks need to be activated.")
680 def RepairDiskSizes(opts, args):
681 """Verify sizes of cluster disks.
683 @param opts: the command line options selected by the user
685 @param args: optional list of instances to restrict check to
687 @return: the desired exit code
690 op = opcodes.OpClusterRepairDiskSizes(instances=args)
691 SubmitOpCode(op, opts=opts)
695 def MasterFailover(opts, args):
696 """Failover the master node.
698 This command, when run on a non-master node, will cause the current
699 master to cease being master, and the non-master to become new
702 @param opts: the command line options selected by the user
704 @param args: should be an empty list
706 @return: the desired exit code
710 usertext = ("This will perform the failover even if most other nodes"
711 " are down, or if this node is outdated. This is dangerous"
712 " as it can lead to a non-consistent cluster. Check the"
713 " gnt-cluster(8) man page before proceeding. Continue?")
714 if not AskUser(usertext):
717 return bootstrap.MasterFailover(no_voting=opts.no_voting)
720 def MasterPing(opts, args):
721 """Checks if the master is alive.
723 @param opts: the command line options selected by the user
725 @param args: should be an empty list
727 @return: the desired exit code
732 cl.QueryClusterInfo()
734 except Exception: # pylint: disable=W0703
738 def SearchTags(opts, args):
739 """Searches the tags on all the cluster.
741 @param opts: the command line options selected by the user
743 @param args: should contain only one element, the tag pattern
745 @return: the desired exit code
748 op = opcodes.OpTagsSearch(pattern=args[0])
749 result = SubmitOpCode(op, opts=opts)
752 result = list(result)
754 for path, tag in result:
755 ToStdout("%s %s", path, tag)
758 def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
759 """Reads and verifies an X509 certificate.
761 @type cert_filename: string
762 @param cert_filename: the path of the file containing the certificate to
763 verify encoded in PEM format
764 @type verify_private_key: bool
765 @param verify_private_key: whether to verify the private key in addition to
766 the public certificate
768 @return: a string containing the PEM-encoded certificate.
772 pem = utils.ReadFile(cert_filename)
774 raise errors.X509CertError(cert_filename,
775 "Unable to read certificate: %s" % str(err))
778 OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
779 except Exception, err:
780 raise errors.X509CertError(cert_filename,
781 "Unable to load certificate: %s" % str(err))
783 if verify_private_key:
785 OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
786 except Exception, err:
787 raise errors.X509CertError(cert_filename,
788 "Unable to load private key: %s" % str(err))
793 def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
794 rapi_cert_filename, new_spice_cert, spice_cert_filename,
795 spice_cacert_filename, new_confd_hmac_key, new_cds,
796 cds_filename, force):
797 """Renews cluster certificates, keys and secrets.
799 @type new_cluster_cert: bool
800 @param new_cluster_cert: Whether to generate a new cluster certificate
801 @type new_rapi_cert: bool
802 @param new_rapi_cert: Whether to generate a new RAPI certificate
803 @type rapi_cert_filename: string
804 @param rapi_cert_filename: Path to file containing new RAPI certificate
805 @type new_spice_cert: bool
806 @param new_spice_cert: Whether to generate a new SPICE certificate
807 @type spice_cert_filename: string
808 @param spice_cert_filename: Path to file containing new SPICE certificate
809 @type spice_cacert_filename: string
810 @param spice_cacert_filename: Path to file containing the certificate of the
811 CA that signed the SPICE certificate
812 @type new_confd_hmac_key: bool
813 @param new_confd_hmac_key: Whether to generate a new HMAC key
815 @param new_cds: Whether to generate a new cluster domain secret
816 @type cds_filename: string
817 @param cds_filename: Path to file containing new cluster domain secret
819 @param force: Whether to ask user for confirmation
822 if new_rapi_cert and rapi_cert_filename:
823 ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
824 " options can be specified at the same time.")
827 if new_cds and cds_filename:
828 ToStderr("Only one of the --new-cluster-domain-secret and"
829 " --cluster-domain-secret options can be specified at"
833 if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
834 ToStderr("When using --new-spice-certificate, the --spice-certificate"
835 " and --spice-ca-certificate must not be used.")
838 if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
839 ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
843 rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
845 if rapi_cert_filename:
846 rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
847 if spice_cert_filename:
848 spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
849 spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
850 except errors.X509CertError, err:
851 ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
856 cds = utils.ReadFile(cds_filename)
857 except Exception, err: # pylint: disable=W0703
858 ToStderr("Can't load new cluster domain secret from %s: %s" %
859 (cds_filename, str(err)))
865 usertext = ("This requires all daemons on all nodes to be restarted and"
866 " may take some time. Continue?")
867 if not AskUser(usertext):
870 def _RenewCryptoInner(ctx):
871 ctx.feedback_fn("Updating certificates and keys")
872 bootstrap.GenerateClusterCrypto(new_cluster_cert,
877 rapi_cert_pem=rapi_cert_pem,
878 spice_cert_pem=spice_cert_pem,
879 spice_cacert_pem=spice_cacert_pem,
885 files_to_copy.append(pathutils.NODED_CERT_FILE)
887 if new_rapi_cert or rapi_cert_pem:
888 files_to_copy.append(pathutils.RAPI_CERT_FILE)
890 if new_spice_cert or spice_cert_pem:
891 files_to_copy.append(pathutils.SPICE_CERT_FILE)
892 files_to_copy.append(pathutils.SPICE_CACERT_FILE)
894 if new_confd_hmac_key:
895 files_to_copy.append(pathutils.CONFD_HMAC_KEY)
898 files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
901 for node_name in ctx.nonmaster_nodes:
902 ctx.feedback_fn("Copying %s to %s" %
903 (", ".join(files_to_copy), node_name))
904 for file_name in files_to_copy:
905 ctx.ssh.CopyFileToNode(node_name, file_name)
907 RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
909 ToStdout("All requested certificates and keys have been replaced."
910 " Running \"gnt-cluster verify\" now is recommended.")
915 def RenewCrypto(opts, args):
916 """Renews cluster certificates, keys and secrets.
919 return _RenewCrypto(opts.new_cluster_cert,
925 opts.new_confd_hmac_key,
926 opts.new_cluster_domain_secret,
927 opts.cluster_domain_secret,
931 def SetClusterParams(opts, args):
932 """Modify the cluster.
934 @param opts: the command line options selected by the user
936 @param args: should be an empty list
938 @return: the desired exit code
941 if not (not opts.lvm_storage or opts.vg_name or
942 not opts.drbd_storage or opts.drbd_helper or
943 opts.enabled_hypervisors or opts.hvparams or
944 opts.beparams or opts.nicparams or
945 opts.ndparams or opts.diskparams or
946 opts.candidate_pool_size is not None or
947 opts.uid_pool is not None or
948 opts.maintain_node_health is not None or
949 opts.add_uids is not None or
950 opts.remove_uids is not None or
951 opts.default_iallocator is not None or
952 opts.reserved_lvs is not None or
953 opts.master_netdev is not None or
954 opts.master_netmask is not None or
955 opts.use_external_mip_script is not None or
956 opts.prealloc_wipe_disks is not None or
959 opts.ispecs_mem_size or
960 opts.ispecs_cpu_count or
961 opts.ispecs_disk_count or
962 opts.ispecs_disk_size or
963 opts.ispecs_nic_count or
964 opts.ipolicy_disk_templates is not None or
965 opts.ipolicy_vcpu_ratio is not None or
966 opts.ipolicy_spindle_ratio is not None):
967 ToStderr("Please give at least one of the parameters.")
970 vg_name = opts.vg_name
971 if not opts.lvm_storage and opts.vg_name:
972 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
975 if not opts.lvm_storage:
978 drbd_helper = opts.drbd_helper
979 if not opts.drbd_storage and opts.drbd_helper:
980 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
983 if not opts.drbd_storage:
986 hvlist = opts.enabled_hypervisors
987 if hvlist is not None:
988 hvlist = hvlist.split(",")
990 # a list of (name, dict) we can pass directly to dict() (or [])
991 hvparams = dict(opts.hvparams)
992 for hv_params in hvparams.values():
993 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
995 diskparams = dict(opts.diskparams)
997 for dt_params in diskparams.values():
998 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
1000 beparams = opts.beparams
1001 utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
1003 nicparams = opts.nicparams
1004 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
1006 ndparams = opts.ndparams
1007 if ndparams is not None:
1008 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
1010 ipolicy = CreateIPolicyFromOpts(
1011 ispecs_mem_size=opts.ispecs_mem_size,
1012 ispecs_cpu_count=opts.ispecs_cpu_count,
1013 ispecs_disk_count=opts.ispecs_disk_count,
1014 ispecs_disk_size=opts.ispecs_disk_size,
1015 ispecs_nic_count=opts.ispecs_nic_count,
1016 ipolicy_disk_templates=opts.ipolicy_disk_templates,
1017 ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
1018 ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
1021 mnh = opts.maintain_node_health
1023 uid_pool = opts.uid_pool
1024 if uid_pool is not None:
1025 uid_pool = uidpool.ParseUidPool(uid_pool)
1027 add_uids = opts.add_uids
1028 if add_uids is not None:
1029 add_uids = uidpool.ParseUidPool(add_uids)
1031 remove_uids = opts.remove_uids
1032 if remove_uids is not None:
1033 remove_uids = uidpool.ParseUidPool(remove_uids)
1035 if opts.reserved_lvs is not None:
1036 if opts.reserved_lvs == "":
1037 opts.reserved_lvs = []
1039 opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
1041 if opts.master_netmask is not None:
1043 opts.master_netmask = int(opts.master_netmask)
1045 ToStderr("The --master-netmask option expects an int parameter.")
1048 ext_ip_script = opts.use_external_mip_script
1051 disk_state = utils.FlatToDict(opts.disk_state)
1055 hv_state = dict(opts.hv_state)
1057 op = opcodes.OpClusterSetParams(vg_name=vg_name,
1058 drbd_helper=drbd_helper,
1059 enabled_hypervisors=hvlist,
1063 nicparams=nicparams,
1065 diskparams=diskparams,
1067 candidate_pool_size=opts.candidate_pool_size,
1068 maintain_node_health=mnh,
1071 remove_uids=remove_uids,
1072 default_iallocator=opts.default_iallocator,
1073 prealloc_wipe_disks=opts.prealloc_wipe_disks,
1074 master_netdev=opts.master_netdev,
1075 master_netmask=opts.master_netmask,
1076 reserved_lvs=opts.reserved_lvs,
1077 use_external_mip_script=ext_ip_script,
1079 disk_state=disk_state,
1081 SubmitOrSend(op, opts)
1085 def QueueOps(opts, args):
1086 """Queue operations.
1088 @param opts: the command line options selected by the user
1090 @param args: should contain only one element, the subcommand
1092 @return: the desired exit code
1096 client = GetClient()
1097 if command in ("drain", "undrain"):
1098 drain_flag = command == "drain"
1099 client.SetQueueDrainFlag(drain_flag)
1100 elif command == "info":
1101 result = client.QueryConfigValues(["drain_flag"])
1106 ToStdout("The drain flag is %s" % val)
1108 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1114 def _ShowWatcherPause(until):
1115 if until is None or until < time.time():
1116 ToStdout("The watcher is not paused.")
1118 ToStdout("The watcher is paused until %s.", time.ctime(until))
1121 def WatcherOps(opts, args):
1122 """Watcher operations.
1124 @param opts: the command line options selected by the user
1126 @param args: should contain only one element, the subcommand
1128 @return: the desired exit code
1132 client = GetClient()
1134 if command == "continue":
1135 client.SetWatcherPause(None)
1136 ToStdout("The watcher is no longer paused.")
1138 elif command == "pause":
1140 raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1142 result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1143 _ShowWatcherPause(result)
1145 elif command == "info":
1146 result = client.QueryConfigValues(["watcher_pause"])
1147 _ShowWatcherPause(result[0])
1150 raise errors.OpPrereqError("Command '%s' is not valid." % command,
1156 def _OobPower(opts, node_list, power):
1157 """Puts the node in the list to desired power state.
1159 @param opts: The command line options selected by the user
1160 @param node_list: The list of nodes to operate on
1161 @param power: True if they should be powered on, False otherwise
1162 @return: The success of the operation (none failed)
1166 command = constants.OOB_POWER_ON
1168 command = constants.OOB_POWER_OFF
1170 op = opcodes.OpOobCommand(node_names=node_list,
1173 timeout=opts.oob_timeout,
1174 power_delay=opts.power_delay)
1175 result = SubmitOpCode(op, opts=opts)
1177 for node_result in result:
1178 (node_tuple, data_tuple) = node_result
1179 (_, node_name) = node_tuple
1180 (data_status, _) = data_tuple
1181 if data_status != constants.RS_NORMAL:
1182 assert data_status != constants.RS_UNAVAIL
1184 ToStderr("There was a problem changing power for %s, please investigate",
1193 def _InstanceStart(opts, inst_list, start, no_remember=False):
1194 """Puts the instances in the list to desired state.
1196 @param opts: The command line options selected by the user
1197 @param inst_list: The list of instances to operate on
1198 @param start: True if they should be started, False for shutdown
1199 @param no_remember: If the instance state should be remembered
1200 @return: The success of the operation (none failed)
1204 opcls = opcodes.OpInstanceStartup
1205 text_submit, text_success, text_failed = ("startup", "started", "starting")
1207 opcls = compat.partial(opcodes.OpInstanceShutdown,
1208 timeout=opts.shutdown_timeout,
1209 no_remember=no_remember)
1210 text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1212 jex = JobExecutor(opts=opts)
1214 for inst in inst_list:
1215 ToStdout("Submit %s of instance %s", text_submit, inst)
1216 op = opcls(instance_name=inst)
1217 jex.QueueJob(inst, op)
1219 results = jex.GetResults()
1220 bad_cnt = len([1 for (success, _) in results if not success])
1223 ToStdout("All instances have been %s successfully", text_success)
1225 ToStderr("There were errors while %s instances:\n"
1226 "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1233 class _RunWhenNodesReachableHelper:
1234 """Helper class to make shared internal state sharing easier.
1236 @ivar success: Indicates if all action_cb calls were successful
1239 def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1240 _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1243 @param node_list: The list of nodes to be reachable
1244 @param action_cb: Callback called when a new host is reachable
1246 @param node2ip: Node to ip mapping
1247 @param port: The port to use for the TCP ping
1248 @param feedback_fn: The function used for feedback
1249 @param _ping_fn: Function to check reachabilty (for unittest use only)
1250 @param _sleep_fn: Function to sleep (for unittest use only)
1253 self.down = set(node_list)
1255 self.node2ip = node2ip
1257 self.action_cb = action_cb
1259 self.feedback_fn = feedback_fn
1260 self._ping_fn = _ping_fn
1261 self._sleep_fn = _sleep_fn
1264 """When called we run action_cb.
1266 @raises utils.RetryAgain: When there are still down nodes
1269 if not self.action_cb(self.up):
1270 self.success = False
1273 raise utils.RetryAgain()
1277 def Wait(self, secs):
1278 """Checks if a host is up or waits remaining seconds.
1280 @param secs: The secs remaining
1284 for node in self.down:
1285 if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1286 live_port_needed=True):
1287 self.feedback_fn("Node %s became available" % node)
1289 self.down -= self.up
1290 # If we have a node available there is the possibility to run the
1291 # action callback successfully, therefore we don't wait and return
1294 self._sleep_fn(max(0.0, start + secs - time.time()))
1297 def _RunWhenNodesReachable(node_list, action_cb, interval):
1298 """Run action_cb when nodes become reachable.
1300 @param node_list: The list of nodes to be reachable
1301 @param action_cb: Callback called when a new host is reachable
1302 @param interval: The earliest time to retry
1305 client = GetClient()
1306 cluster_info = client.QueryClusterInfo()
1307 if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1308 family = netutils.IPAddress.family
1310 family = netutils.IP6Address.family
1312 node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1313 for node in node_list)
1315 port = netutils.GetDaemonPort(constants.NODED)
1316 helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1320 return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1321 wait_fn=helper.Wait)
1322 except utils.RetryTimeout:
1323 ToStderr("Time exceeded while waiting for nodes to become reachable"
1324 " again:\n - %s", " - ".join(helper.down))
1328 def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1329 _instance_start_fn=_InstanceStart):
1330 """Start the instances conditional based on node_states.
1332 @param opts: The command line options selected by the user
1333 @param inst_map: A dict of inst -> nodes mapping
1334 @param nodes_online: A list of nodes online
1335 @param _instance_start_fn: Callback to start instances (unittest use only)
1336 @return: Success of the operation on all instances
1339 start_inst_list = []
1340 for (inst, nodes) in inst_map.items():
1341 if not (nodes - nodes_online):
1342 # All nodes the instance lives on are back online
1343 start_inst_list.append(inst)
1345 for inst in start_inst_list:
1349 return _instance_start_fn(opts, start_inst_list, True)
1354 def _EpoOn(opts, full_node_list, node_list, inst_map):
1355 """Does the actual power on.
1357 @param opts: The command line options selected by the user
1358 @param full_node_list: All nodes to operate on (includes nodes not supporting
1360 @param node_list: The list of nodes to operate on (all need to support OOB)
1361 @param inst_map: A dict of inst -> nodes mapping
1362 @return: The desired exit status
1365 if node_list and not _OobPower(opts, node_list, False):
1366 ToStderr("Not all nodes seem to get back up, investigate and start"
1367 " manually if needed")
1369 # Wait for the nodes to be back up
1370 action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1372 ToStdout("Waiting until all nodes are available again")
1373 if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1374 ToStderr("Please investigate and start stopped instances manually")
1375 return constants.EXIT_FAILURE
1377 return constants.EXIT_SUCCESS
1380 def _EpoOff(opts, node_list, inst_map):
1381 """Does the actual power off.
1383 @param opts: The command line options selected by the user
1384 @param node_list: The list of nodes to operate on (all need to support OOB)
1385 @param inst_map: A dict of inst -> nodes mapping
1386 @return: The desired exit status
1389 if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1390 ToStderr("Please investigate and stop instances manually before continuing")
1391 return constants.EXIT_FAILURE
1394 return constants.EXIT_SUCCESS
1396 if _OobPower(opts, node_list, False):
1397 return constants.EXIT_SUCCESS
1399 return constants.EXIT_FAILURE
1402 def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
1403 _confirm_fn=ConfirmOperation,
1404 _stdout_fn=ToStdout, _stderr_fn=ToStderr):
1407 @param opts: the command line options selected by the user
1409 @param args: should contain only one element, the subcommand
1411 @return: the desired exit code
1414 if opts.groups and opts.show_all:
1415 _stderr_fn("Only one of --groups or --all are allowed")
1416 return constants.EXIT_FAILURE
1417 elif args and opts.show_all:
1418 _stderr_fn("Arguments in combination with --all are not allowed")
1419 return constants.EXIT_FAILURE
1426 itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
1428 node_query_list = args
1430 result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
1431 "sinst_list", "powered", "offline"],
1434 all_nodes = map(compat.fst, result)
1437 for (node, master, pinsts, sinsts, powered, offline) in result:
1439 for inst in (pinsts + sinsts):
1440 if inst in inst_map:
1442 inst_map[inst].add(node)
1444 inst_map[inst] = set()
1446 inst_map[inst] = set([node])
1448 if master and opts.on:
1449 # We ignore the master for turning on the machines, in fact we are
1450 # already operating on the master at this point :)
1452 elif master and not opts.show_all:
1453 _stderr_fn("%s is the master node, please do a master-failover to another"
1454 " node not affected by the EPO or use --all if you intend to"
1455 " shutdown the whole cluster", node)
1456 return constants.EXIT_FAILURE
1457 elif powered is None:
1458 _stdout_fn("Node %s does not support out-of-band handling, it can not be"
1459 " handled in a fully automated manner", node)
1460 elif powered == opts.on:
1461 _stdout_fn("Node %s is already in desired power state, skipping", node)
1462 elif not offline or (offline and powered):
1463 node_list.append(node)
1465 if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
1466 return constants.EXIT_FAILURE
1469 return _on_fn(opts, all_nodes, node_list, inst_map)
1471 return _off_fn(opts, node_list, inst_map)
1476 InitCluster, [ArgHost(min=1, max=1)],
1477 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1478 HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
1479 NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
1480 NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
1481 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1482 DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1483 NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
1484 DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] + INSTANCE_POLICY_OPTS,
1485 "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1487 DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1488 "", "Destroy cluster"),
1490 RenameCluster, [ArgHost(min=1, max=1)],
1491 [FORCE_OPT, DRY_RUN_OPT],
1493 "Renames the cluster"),
1495 RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1496 "", "Forces a push of the configuration file and ssconf files"
1497 " to the nodes in the cluster"),
1499 VerifyCluster, ARGS_NONE,
1500 [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1501 DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
1502 "", "Does a check on the cluster configuration"),
1504 VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1505 "", "Does a check on the cluster disk status"),
1506 "repair-disk-sizes": (
1507 RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1508 "[instance...]", "Updates mismatches in recorded disk sizes"),
1509 "master-failover": (
1510 MasterFailover, ARGS_NONE, [NOVOTING_OPT],
1511 "", "Makes the current node the master"),
1513 MasterPing, ARGS_NONE, [],
1514 "", "Checks if the master is alive"),
1516 ShowClusterVersion, ARGS_NONE, [],
1517 "", "Shows the cluster version"),
1519 ShowClusterMaster, ARGS_NONE, [],
1520 "", "Shows the cluster master"),
1522 ClusterCopyFile, [ArgFile(min=1, max=1)],
1523 [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1524 "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1526 RunClusterCommand, [ArgCommand(min=1)],
1527 [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT],
1528 "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1530 ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1531 "[--roman]", "Show cluster configuration"),
1533 ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1535 AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1536 "tag...", "Add tags to the cluster"),
1538 RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
1539 "tag...", "Remove tags from the cluster"),
1541 SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1542 "Searches the tags on all objects on"
1543 " the cluster for a given pattern (regex)"),
1546 [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1547 [], "drain|undrain|info", "Change queue properties"),
1550 [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1551 ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1553 "{pause <timespec>|continue|info}", "Change watcher properties"),
1555 SetClusterParams, ARGS_NONE,
1556 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1557 MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
1558 MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
1559 DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
1560 RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
1561 NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
1562 DISK_STATE_OPT, SUBMIT_OPT] +
1563 INSTANCE_POLICY_OPTS,
1565 "Alters the parameters of the cluster"),
1567 RenewCrypto, ARGS_NONE,
1568 [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1569 NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1570 NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1571 NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1573 "Renews cluster certificates, keys and secrets"),
1575 Epo, [ArgUnknown()],
1576 [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1577 SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1579 "Performs an emergency power-off on given args"),
1580 "activate-master-ip": (
1581 ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1582 "deactivate-master-ip": (
1583 DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1584 "Deactivates the master IP"),
1588 #: dictionary with aliases for commands
1590 "masterfailover": "master-failover",
1596 return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},