4 # Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 """Cluster related commands"""
23 # pylint: disable-msg=W0401,W0613,W0614,C0103
24 # W0401: Wildcard import ganeti.cli
25 # W0613: Unused argument, since all functions follow the same API
26 # W0614: Unused import %s from wildcard import (since we need cli)
27 # C0103: Invalid name gnt-cluster
34 from ganeti.cli import *
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import utils
39 from ganeti import bootstrap
40 from ganeti import ssh
41 from ganeti import objects
42 from ganeti import uidpool
43 from ganeti import compat
44 from ganeti import netutils
47 ON_OPT = cli_option("--on", default=False,
48 action="store_true", dest="on",
49 help="Recover from an EPO")
51 GROUPS_OPT = cli_option("--groups", default=False,
52 action="store_true", dest="groups",
53 help="Arguments are node groups instead of nodes")
55 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
56 _EPO_PING_TIMEOUT = 1 # 1 second
57 _EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
61 def InitCluster(opts, args):
62 """Initialize the cluster.
64 @param opts: the command line options selected by the user
66 @param args: should contain only one element, the desired
69 @return: the desired exit code
72 if not opts.lvm_storage and opts.vg_name:
73 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
76 vg_name = opts.vg_name
77 if opts.lvm_storage and not opts.vg_name:
78 vg_name = constants.DEFAULT_VG
80 if not opts.drbd_storage and opts.drbd_helper:
81 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
84 drbd_helper = opts.drbd_helper
85 if opts.drbd_storage and not opts.drbd_helper:
86 drbd_helper = constants.DEFAULT_DRBD_HELPER
88 master_netdev = opts.master_netdev
89 if master_netdev is None:
90 master_netdev = constants.DEFAULT_BRIDGE
92 hvlist = opts.enabled_hypervisors
94 hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
95 hvlist = hvlist.split(",")
97 hvparams = dict(opts.hvparams)
98 beparams = opts.beparams
99 nicparams = opts.nicparams
101 # prepare beparams dict
102 beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
103 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
105 # prepare nicparams dict
106 nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
107 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
109 # prepare ndparams dict
110 if opts.ndparams is None:
111 ndparams = dict(constants.NDC_DEFAULTS)
113 ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
114 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
116 # prepare hvparams dict
117 for hv in constants.HYPER_TYPES:
118 if hv not in hvparams:
120 hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
121 utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
123 if opts.candidate_pool_size is None:
124 opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
126 if opts.mac_prefix is None:
127 opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
129 uid_pool = opts.uid_pool
130 if uid_pool is not None:
131 uid_pool = uidpool.ParseUidPool(uid_pool)
133 if opts.prealloc_wipe_disks is None:
134 opts.prealloc_wipe_disks = False
137 primary_ip_version = int(opts.primary_ip_version)
138 except (ValueError, TypeError), err:
139 ToStderr("Invalid primary ip version value: %s" % str(err))
142 bootstrap.InitCluster(cluster_name=args[0],
143 secondary_ip=opts.secondary_ip,
145 mac_prefix=opts.mac_prefix,
146 master_netdev=master_netdev,
147 file_storage_dir=opts.file_storage_dir,
148 shared_file_storage_dir=opts.shared_file_storage_dir,
149 enabled_hypervisors=hvlist,
154 candidate_pool_size=opts.candidate_pool_size,
155 modify_etc_hosts=opts.modify_etc_hosts,
156 modify_ssh_setup=opts.modify_ssh_setup,
157 maintain_node_health=opts.maintain_node_health,
158 drbd_helper=drbd_helper,
160 default_iallocator=opts.default_iallocator,
161 primary_ip_version=primary_ip_version,
162 prealloc_wipe_disks=opts.prealloc_wipe_disks,
164 op = opcodes.OpClusterPostInit()
165 SubmitOpCode(op, opts=opts)
170 def DestroyCluster(opts, args):
171 """Destroy the cluster.
173 @param opts: the command line options selected by the user
175 @param args: should be an empty list
177 @return: the desired exit code
180 if not opts.yes_do_it:
181 ToStderr("Destroying a cluster is irreversible. If you really want"
182 " destroy this cluster, supply the --yes-do-it option.")
185 op = opcodes.OpClusterDestroy()
186 master = SubmitOpCode(op, opts=opts)
187 # if we reached this, the opcode didn't fail; we can proceed to
188 # shutdown all the daemons
189 bootstrap.FinalizeClusterDestroy(master)
193 def RenameCluster(opts, args):
194 """Rename the cluster.
196 @param opts: the command line options selected by the user
198 @param args: should contain only one element, the new cluster name
200 @return: the desired exit code
205 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
209 usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
210 " connected over the network to the cluster name, the"
211 " operation is very dangerous as the IP address will be"
212 " removed from the node and the change may not go through."
213 " Continue?") % (cluster_name, new_name)
214 if not AskUser(usertext):
217 op = opcodes.OpClusterRename(name=new_name)
218 result = SubmitOpCode(op, opts=opts, cl=cl)
221 ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
226 def RedistributeConfig(opts, args):
227 """Forces push of the cluster configuration.
229 @param opts: the command line options selected by the user
231 @param args: empty list
233 @return: the desired exit code
236 op = opcodes.OpClusterRedistConf()
237 SubmitOrSend(op, opts)
241 def ShowClusterVersion(opts, args):
242 """Write version of ganeti software to the standard output.
244 @param opts: the command line options selected by the user
246 @param args: should be an empty list
248 @return: the desired exit code
252 result = cl.QueryClusterInfo()
253 ToStdout("Software version: %s", result["software_version"])
254 ToStdout("Internode protocol: %s", result["protocol_version"])
255 ToStdout("Configuration format: %s", result["config_version"])
256 ToStdout("OS api version: %s", result["os_api_version"])
257 ToStdout("Export interface: %s", result["export_version"])
261 def ShowClusterMaster(opts, args):
262 """Write name of master node to the standard output.
264 @param opts: the command line options selected by the user
266 @param args: should be an empty list
268 @return: the desired exit code
271 master = bootstrap.GetMaster()
276 def _PrintGroupedParams(paramsdict, level=1, roman=False):
277 """Print Grouped parameters (be, nic, disk) by group.
279 @type paramsdict: dict of dicts
280 @param paramsdict: {group: {param: value, ...}, ...}
282 @param level: Level of indention
286 for item, val in sorted(paramsdict.items()):
287 if isinstance(val, dict):
288 ToStdout("%s- %s:", indent, item)
289 _PrintGroupedParams(val, level=level + 1, roman=roman)
290 elif roman and isinstance(val, int):
291 ToStdout("%s %s: %s", indent, item, compat.TryToRoman(val))
293 ToStdout("%s %s: %s", indent, item, val)
296 def ShowClusterConfig(opts, args):
297 """Shows cluster information.
299 @param opts: the command line options selected by the user
301 @param args: should be an empty list
303 @return: the desired exit code
307 result = cl.QueryClusterInfo()
309 ToStdout("Cluster name: %s", result["name"])
310 ToStdout("Cluster UUID: %s", result["uuid"])
312 ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
313 ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
315 ToStdout("Master node: %s", result["master"])
317 ToStdout("Architecture (this node): %s (%s)",
318 result["architecture"][0], result["architecture"][1])
321 tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
325 ToStdout("Tags: %s", tags)
327 ToStdout("Default hypervisor: %s", result["default_hypervisor"])
328 ToStdout("Enabled hypervisors: %s",
329 utils.CommaJoin(result["enabled_hypervisors"]))
331 ToStdout("Hypervisor parameters:")
332 _PrintGroupedParams(result["hvparams"])
334 ToStdout("OS-specific hypervisor parameters:")
335 _PrintGroupedParams(result["os_hvp"])
337 ToStdout("OS parameters:")
338 _PrintGroupedParams(result["osparams"])
340 ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
341 ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
343 ToStdout("Cluster parameters:")
344 ToStdout(" - candidate pool size: %s",
345 compat.TryToRoman(result["candidate_pool_size"],
346 convert=opts.roman_integers))
347 ToStdout(" - master netdev: %s", result["master_netdev"])
348 ToStdout(" - lvm volume group: %s", result["volume_group_name"])
349 if result["reserved_lvs"]:
350 reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
352 reserved_lvs = "(none)"
353 ToStdout(" - lvm reserved volumes: %s", reserved_lvs)
354 ToStdout(" - drbd usermode helper: %s", result["drbd_usermode_helper"])
355 ToStdout(" - file storage path: %s", result["file_storage_dir"])
356 ToStdout(" - shared file storage path: %s",
357 result["shared_file_storage_dir"])
358 ToStdout(" - maintenance of node health: %s",
359 result["maintain_node_health"])
360 ToStdout(" - uid pool: %s",
361 uidpool.FormatUidPool(result["uid_pool"],
362 roman=opts.roman_integers))
363 ToStdout(" - default instance allocator: %s", result["default_iallocator"])
364 ToStdout(" - primary ip version: %d", result["primary_ip_version"])
365 ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
367 ToStdout("Default node parameters:")
368 _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
370 ToStdout("Default instance parameters:")
371 _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
373 ToStdout("Default nic parameters:")
374 _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
379 def ClusterCopyFile(opts, args):
380 """Copy a file from master to some nodes.
382 @param opts: the command line options selected by the user
384 @param args: should contain only one element, the path of
385 the file to be copied
387 @return: the desired exit code
391 if not os.path.exists(filename):
392 raise errors.OpPrereqError("No such filename '%s'" % filename,
397 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
399 results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
400 secondary_ips=opts.use_replication_network)
402 srun = ssh.SshRunner(cluster_name=cluster_name)
404 if not srun.CopyFileToNode(node, filename):
405 ToStderr("Copy of file %s to node %s failed", filename, node)
410 def RunClusterCommand(opts, args):
411 """Run a command on some nodes.
413 @param opts: the command line options selected by the user
415 @param args: should contain the command to be run and its arguments
417 @return: the desired exit code
422 command = " ".join(args)
424 nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl)
426 cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
429 srun = ssh.SshRunner(cluster_name=cluster_name)
431 # Make sure master node is at list end
432 if master_node in nodes:
433 nodes.remove(master_node)
434 nodes.append(master_node)
437 result = srun.Run(name, "root", command)
438 ToStdout("------------------------------------------------")
439 ToStdout("node: %s", name)
440 ToStdout("%s", result.output)
441 ToStdout("return code = %s", result.exit_code)
446 def VerifyCluster(opts, args):
447 """Verify integrity of cluster, performing various test on nodes.
449 @param opts: the command line options selected by the user
451 @param args: should be an empty list
453 @return: the desired exit code
456 simulate = opts.simulate_errors
459 if opts.nodegroup is None:
460 # Verify cluster config.
461 op = opcodes.OpClusterVerifyConfig(verbose=opts.verbose,
462 error_codes=opts.error_codes,
463 debug_simulate_errors=simulate)
465 success, all_groups = SubmitOpCode(op, opts=opts)
468 all_groups = [opts.nodegroup]
470 if opts.skip_nplusone_mem:
471 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
473 jex = JobExecutor(opts=opts, verbose=False)
475 for group in all_groups:
476 op = opcodes.OpClusterVerifyGroup(group_name=group,
477 skip_checks=skip_checks,
478 verbose=opts.verbose,
479 error_codes=opts.error_codes,
480 debug_simulate_errors=simulate)
481 jex.QueueJob("group " + group, op)
483 results = jex.GetResults()
484 success &= compat.all(r[1][0] for r in results)
487 return constants.EXIT_SUCCESS
489 return constants.EXIT_FAILURE
492 def VerifyDisks(opts, args):
493 """Verify integrity of cluster disks.
495 @param opts: the command line options selected by the user
497 @param args: should be an empty list
499 @return: the desired exit code
504 op = opcodes.OpClusterVerifyDisks()
505 result = SubmitOpCode(op, opts=opts, cl=cl)
506 if not isinstance(result, (list, tuple)) or len(result) != 3:
507 raise errors.ProgrammerError("Unknown result type for OpClusterVerifyDisks")
509 bad_nodes, instances, missing = result
511 retcode = constants.EXIT_SUCCESS
514 for node, text in bad_nodes.items():
515 ToStdout("Error gathering data on node %s: %s",
516 node, utils.SafeEncode(text[-400:]))
518 ToStdout("You need to fix these nodes first before fixing instances")
521 for iname in instances:
524 op = opcodes.OpInstanceActivateDisks(instance_name=iname)
526 ToStdout("Activating disks for instance '%s'", iname)
527 SubmitOpCode(op, opts=opts, cl=cl)
528 except errors.GenericError, err:
529 nret, msg = FormatError(err)
531 ToStderr("Error activating disks for instance %s: %s", iname, msg)
534 for iname, ival in missing.iteritems():
535 all_missing = compat.all(x[0] in bad_nodes for x in ival)
537 ToStdout("Instance %s cannot be verified as it lives on"
538 " broken nodes", iname)
540 ToStdout("Instance %s has missing logical volumes:", iname)
542 for node, vol in ival:
543 if node in bad_nodes:
544 ToStdout("\tbroken node %s /dev/%s", node, vol)
546 ToStdout("\t%s /dev/%s", node, vol)
548 ToStdout("You need to run replace or recreate disks for all the above"
549 " instances, if this message persist after fixing nodes.")
555 def RepairDiskSizes(opts, args):
556 """Verify sizes of cluster disks.
558 @param opts: the command line options selected by the user
560 @param args: optional list of instances to restrict check to
562 @return: the desired exit code
565 op = opcodes.OpClusterRepairDiskSizes(instances=args)
566 SubmitOpCode(op, opts=opts)
570 def MasterFailover(opts, args):
571 """Failover the master node.
573 This command, when run on a non-master node, will cause the current
574 master to cease being master, and the non-master to become new
577 @param opts: the command line options selected by the user
579 @param args: should be an empty list
581 @return: the desired exit code
585 usertext = ("This will perform the failover even if most other nodes"
586 " are down, or if this node is outdated. This is dangerous"
587 " as it can lead to a non-consistent cluster. Check the"
588 " gnt-cluster(8) man page before proceeding. Continue?")
589 if not AskUser(usertext):
592 return bootstrap.MasterFailover(no_voting=opts.no_voting)
595 def MasterPing(opts, args):
596 """Checks if the master is alive.
598 @param opts: the command line options selected by the user
600 @param args: should be an empty list
602 @return: the desired exit code
607 cl.QueryClusterInfo()
609 except Exception: # pylint: disable-msg=W0703
613 def SearchTags(opts, args):
614 """Searches the tags on all the cluster.
616 @param opts: the command line options selected by the user
618 @param args: should contain only one element, the tag pattern
620 @return: the desired exit code
623 op = opcodes.OpTagsSearch(pattern=args[0])
624 result = SubmitOpCode(op, opts=opts)
627 result = list(result)
629 for path, tag in result:
630 ToStdout("%s %s", path, tag)
633 def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
634 new_confd_hmac_key, new_cds, cds_filename,
636 """Renews cluster certificates, keys and secrets.
638 @type new_cluster_cert: bool
639 @param new_cluster_cert: Whether to generate a new cluster certificate
640 @type new_rapi_cert: bool
641 @param new_rapi_cert: Whether to generate a new RAPI certificate
642 @type rapi_cert_filename: string
643 @param rapi_cert_filename: Path to file containing new RAPI certificate
644 @type new_confd_hmac_key: bool
645 @param new_confd_hmac_key: Whether to generate a new HMAC key
647 @param new_cds: Whether to generate a new cluster domain secret
648 @type cds_filename: string
649 @param cds_filename: Path to file containing new cluster domain secret
651 @param force: Whether to ask user for confirmation
654 if new_rapi_cert and rapi_cert_filename:
655 ToStderr("Only one of the --new-rapi-certficate and --rapi-certificate"
656 " options can be specified at the same time.")
659 if new_cds and cds_filename:
660 ToStderr("Only one of the --new-cluster-domain-secret and"
661 " --cluster-domain-secret options can be specified at"
665 if rapi_cert_filename:
666 # Read and verify new certificate
668 rapi_cert_pem = utils.ReadFile(rapi_cert_filename)
670 OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
672 except Exception, err: # pylint: disable-msg=W0703
673 ToStderr("Can't load new RAPI certificate from %s: %s" %
674 (rapi_cert_filename, str(err)))
678 OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem)
679 except Exception, err: # pylint: disable-msg=W0703
680 ToStderr("Can't load new RAPI private key from %s: %s" %
681 (rapi_cert_filename, str(err)))
689 cds = utils.ReadFile(cds_filename)
690 except Exception, err: # pylint: disable-msg=W0703
691 ToStderr("Can't load new cluster domain secret from %s: %s" %
692 (cds_filename, str(err)))
698 usertext = ("This requires all daemons on all nodes to be restarted and"
699 " may take some time. Continue?")
700 if not AskUser(usertext):
703 def _RenewCryptoInner(ctx):
704 ctx.feedback_fn("Updating certificates and keys")
705 bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert,
708 rapi_cert_pem=rapi_cert_pem,
714 files_to_copy.append(constants.NODED_CERT_FILE)
716 if new_rapi_cert or rapi_cert_pem:
717 files_to_copy.append(constants.RAPI_CERT_FILE)
719 if new_confd_hmac_key:
720 files_to_copy.append(constants.CONFD_HMAC_KEY)
723 files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
726 for node_name in ctx.nonmaster_nodes:
727 ctx.feedback_fn("Copying %s to %s" %
728 (", ".join(files_to_copy), node_name))
729 for file_name in files_to_copy:
730 ctx.ssh.CopyFileToNode(node_name, file_name)
732 RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
734 ToStdout("All requested certificates and keys have been replaced."
735 " Running \"gnt-cluster verify\" now is recommended.")
740 def RenewCrypto(opts, args):
741 """Renews cluster certificates, keys and secrets.
744 return _RenewCrypto(opts.new_cluster_cert,
747 opts.new_confd_hmac_key,
748 opts.new_cluster_domain_secret,
749 opts.cluster_domain_secret,
753 def SetClusterParams(opts, args):
754 """Modify the cluster.
756 @param opts: the command line options selected by the user
758 @param args: should be an empty list
760 @return: the desired exit code
763 if not (not opts.lvm_storage or opts.vg_name or
764 not opts.drbd_storage or opts.drbd_helper or
765 opts.enabled_hypervisors or opts.hvparams or
766 opts.beparams or opts.nicparams or opts.ndparams or
767 opts.candidate_pool_size is not None or
768 opts.uid_pool is not None or
769 opts.maintain_node_health is not None or
770 opts.add_uids is not None or
771 opts.remove_uids is not None or
772 opts.default_iallocator is not None or
773 opts.reserved_lvs is not None or
774 opts.master_netdev is not None or
775 opts.prealloc_wipe_disks is not None):
776 ToStderr("Please give at least one of the parameters.")
779 vg_name = opts.vg_name
780 if not opts.lvm_storage and opts.vg_name:
781 ToStderr("Options --no-lvm-storage and --vg-name conflict.")
784 if not opts.lvm_storage:
787 drbd_helper = opts.drbd_helper
788 if not opts.drbd_storage and opts.drbd_helper:
789 ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
792 if not opts.drbd_storage:
795 hvlist = opts.enabled_hypervisors
796 if hvlist is not None:
797 hvlist = hvlist.split(",")
799 # a list of (name, dict) we can pass directly to dict() (or [])
800 hvparams = dict(opts.hvparams)
801 for hv_params in hvparams.values():
802 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
804 beparams = opts.beparams
805 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
807 nicparams = opts.nicparams
808 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
810 ndparams = opts.ndparams
811 if ndparams is not None:
812 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
814 mnh = opts.maintain_node_health
816 uid_pool = opts.uid_pool
817 if uid_pool is not None:
818 uid_pool = uidpool.ParseUidPool(uid_pool)
820 add_uids = opts.add_uids
821 if add_uids is not None:
822 add_uids = uidpool.ParseUidPool(add_uids)
824 remove_uids = opts.remove_uids
825 if remove_uids is not None:
826 remove_uids = uidpool.ParseUidPool(remove_uids)
828 if opts.reserved_lvs is not None:
829 if opts.reserved_lvs == "":
830 opts.reserved_lvs = []
832 opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
834 op = opcodes.OpClusterSetParams(vg_name=vg_name,
835 drbd_helper=drbd_helper,
836 enabled_hypervisors=hvlist,
842 candidate_pool_size=opts.candidate_pool_size,
843 maintain_node_health=mnh,
846 remove_uids=remove_uids,
847 default_iallocator=opts.default_iallocator,
848 prealloc_wipe_disks=opts.prealloc_wipe_disks,
849 master_netdev=opts.master_netdev,
850 reserved_lvs=opts.reserved_lvs)
851 SubmitOpCode(op, opts=opts)
855 def QueueOps(opts, args):
858 @param opts: the command line options selected by the user
860 @param args: should contain only one element, the subcommand
862 @return: the desired exit code
867 if command in ("drain", "undrain"):
868 drain_flag = command == "drain"
869 client.SetQueueDrainFlag(drain_flag)
870 elif command == "info":
871 result = client.QueryConfigValues(["drain_flag"])
876 ToStdout("The drain flag is %s" % val)
878 raise errors.OpPrereqError("Command '%s' is not valid." % command,
884 def _ShowWatcherPause(until):
885 if until is None or until < time.time():
886 ToStdout("The watcher is not paused.")
888 ToStdout("The watcher is paused until %s.", time.ctime(until))
891 def WatcherOps(opts, args):
892 """Watcher operations.
894 @param opts: the command line options selected by the user
896 @param args: should contain only one element, the subcommand
898 @return: the desired exit code
904 if command == "continue":
905 client.SetWatcherPause(None)
906 ToStdout("The watcher is no longer paused.")
908 elif command == "pause":
910 raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
912 result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
913 _ShowWatcherPause(result)
915 elif command == "info":
916 result = client.QueryConfigValues(["watcher_pause"])
917 _ShowWatcherPause(result[0])
920 raise errors.OpPrereqError("Command '%s' is not valid." % command,
926 def _OobPower(opts, node_list, power):
927 """Puts the node in the list to desired power state.
929 @param opts: The command line options selected by the user
930 @param node_list: The list of nodes to operate on
931 @param power: True if they should be powered on, False otherwise
932 @return: The success of the operation (none failed)
936 command = constants.OOB_POWER_ON
938 command = constants.OOB_POWER_OFF
940 op = opcodes.OpOobCommand(node_names=node_list,
943 timeout=opts.oob_timeout,
944 power_delay=opts.power_delay)
945 result = SubmitOpCode(op, opts=opts)
947 for node_result in result:
948 (node_tuple, data_tuple) = node_result
949 (_, node_name) = node_tuple
950 (data_status, _) = data_tuple
951 if data_status != constants.RS_NORMAL:
952 assert data_status != constants.RS_UNAVAIL
954 ToStderr("There was a problem changing power for %s, please investigate",
963 def _InstanceStart(opts, inst_list, start):
964 """Puts the instances in the list to desired state.
966 @param opts: The command line options selected by the user
967 @param inst_list: The list of instances to operate on
968 @param start: True if they should be started, False for shutdown
969 @return: The success of the operation (none failed)
973 opcls = opcodes.OpInstanceStartup
974 text_submit, text_success, text_failed = ("startup", "started", "starting")
976 opcls = compat.partial(opcodes.OpInstanceShutdown,
977 timeout=opts.shutdown_timeout)
978 text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
980 jex = JobExecutor(opts=opts)
982 for inst in inst_list:
983 ToStdout("Submit %s of instance %s", text_submit, inst)
984 op = opcls(instance_name=inst)
985 jex.QueueJob(inst, op)
987 results = jex.GetResults()
988 bad_cnt = len([1 for (success, _) in results if not success])
991 ToStdout("All instances have been %s successfully", text_success)
993 ToStderr("There were errors while %s instances:\n"
994 "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1001 class _RunWhenNodesReachableHelper:
1002 """Helper class to make shared internal state sharing easier.
1004 @ivar success: Indicates if all action_cb calls were successful
1007 def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1008 _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1011 @param node_list: The list of nodes to be reachable
1012 @param action_cb: Callback called when a new host is reachable
1014 @param node2ip: Node to ip mapping
1015 @param port: The port to use for the TCP ping
1016 @param feedback_fn: The function used for feedback
1017 @param _ping_fn: Function to check reachabilty (for unittest use only)
1018 @param _sleep_fn: Function to sleep (for unittest use only)
1021 self.down = set(node_list)
1023 self.node2ip = node2ip
1025 self.action_cb = action_cb
1027 self.feedback_fn = feedback_fn
1028 self._ping_fn = _ping_fn
1029 self._sleep_fn = _sleep_fn
1032 """When called we run action_cb.
1034 @raises utils.RetryAgain: When there are still down nodes
1037 if not self.action_cb(self.up):
1038 self.success = False
1041 raise utils.RetryAgain()
1045 def Wait(self, secs):
1046 """Checks if a host is up or waits remaining seconds.
1048 @param secs: The secs remaining
1052 for node in self.down:
1053 if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1054 live_port_needed=True):
1055 self.feedback_fn("Node %s became available" % node)
1057 self.down -= self.up
1058 # If we have a node available there is the possibility to run the
1059 # action callback successfully, therefore we don't wait and return
1062 self._sleep_fn(max(0.0, start + secs - time.time()))
1065 def _RunWhenNodesReachable(node_list, action_cb, interval):
1066 """Run action_cb when nodes become reachable.
1068 @param node_list: The list of nodes to be reachable
1069 @param action_cb: Callback called when a new host is reachable
1070 @param interval: The earliest time to retry
1073 client = GetClient()
1074 cluster_info = client.QueryClusterInfo()
1075 if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1076 family = netutils.IPAddress.family
1078 family = netutils.IP6Address.family
1080 node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1081 for node in node_list)
1083 port = netutils.GetDaemonPort(constants.NODED)
1084 helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1088 return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1089 wait_fn=helper.Wait)
1090 except utils.RetryTimeout:
1091 ToStderr("Time exceeded while waiting for nodes to become reachable"
1092 " again:\n - %s", " - ".join(helper.down))
1096 def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1097 _instance_start_fn=_InstanceStart):
1098 """Start the instances conditional based on node_states.
1100 @param opts: The command line options selected by the user
1101 @param inst_map: A dict of inst -> nodes mapping
1102 @param nodes_online: A list of nodes online
1103 @param _instance_start_fn: Callback to start instances (unittest use only)
1104 @return: Success of the operation on all instances
1107 start_inst_list = []
1108 for (inst, nodes) in inst_map.items():
1109 if not (nodes - nodes_online):
1110 # All nodes the instance lives on are back online
1111 start_inst_list.append(inst)
1113 for inst in start_inst_list:
1117 return _instance_start_fn(opts, start_inst_list, True)
1122 def _EpoOn(opts, full_node_list, node_list, inst_map):
1123 """Does the actual power on.
1125 @param opts: The command line options selected by the user
1126 @param full_node_list: All nodes to operate on (includes nodes not supporting
1128 @param node_list: The list of nodes to operate on (all need to support OOB)
1129 @param inst_map: A dict of inst -> nodes mapping
1130 @return: The desired exit status
1133 if node_list and not _OobPower(opts, node_list, False):
1134 ToStderr("Not all nodes seem to get back up, investigate and start"
1135 " manually if needed")
1137 # Wait for the nodes to be back up
1138 action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1140 ToStdout("Waiting until all nodes are available again")
1141 if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1142 ToStderr("Please investigate and start stopped instances manually")
1143 return constants.EXIT_FAILURE
1145 return constants.EXIT_SUCCESS
1148 def _EpoOff(opts, node_list, inst_map):
1149 """Does the actual power off.
1151 @param opts: The command line options selected by the user
1152 @param node_list: The list of nodes to operate on (all need to support OOB)
1153 @param inst_map: A dict of inst -> nodes mapping
1154 @return: The desired exit status
1157 if not _InstanceStart(opts, inst_map.keys(), False):
1158 ToStderr("Please investigate and stop instances manually before continuing")
1159 return constants.EXIT_FAILURE
1162 return constants.EXIT_SUCCESS
1164 if _OobPower(opts, node_list, False):
1165 return constants.EXIT_SUCCESS
1167 return constants.EXIT_FAILURE
1170 def Epo(opts, args):
1173 @param opts: the command line options selected by the user
1175 @param args: should contain only one element, the subcommand
1177 @return: the desired exit code
1180 if opts.groups and opts.show_all:
1181 ToStderr("Only one of --groups or --all are allowed")
1182 return constants.EXIT_FAILURE
1183 elif args and opts.show_all:
1184 ToStderr("Arguments in combination with --all are not allowed")
1185 return constants.EXIT_FAILURE
1187 client = GetClient()
1190 node_query_list = itertools.chain(*client.QueryGroups(names=args,
1191 fields=["node_list"],
1194 node_query_list = args
1196 result = client.QueryNodes(names=node_query_list,
1197 fields=["name", "master", "pinst_list",
1198 "sinst_list", "powered", "offline"],
1202 for (idx, (node, master, pinsts, sinsts, powered,
1203 offline)) in enumerate(result):
1204 # Normalize the node_query_list as well
1205 if not opts.show_all:
1206 node_query_list[idx] = node
1208 for inst in (pinsts + sinsts):
1209 if inst in inst_map:
1211 inst_map[inst].add(node)
1213 inst_map[inst] = set()
1215 inst_map[inst] = set([node])
1217 if master and opts.on:
1218 # We ignore the master for turning on the machines, in fact we are
1219 # already operating on the master at this point :)
1221 elif master and not opts.show_all:
1222 ToStderr("%s is the master node, please do a master-failover to another"
1223 " node not affected by the EPO or use --all if you intend to"
1224 " shutdown the whole cluster", node)
1225 return constants.EXIT_FAILURE
1226 elif powered is None:
1227 ToStdout("Node %s does not support out-of-band handling, it can not be"
1228 " handled in a fully automated manner", node)
1229 elif powered == opts.on:
1230 ToStdout("Node %s is already in desired power state, skipping", node)
1231 elif not offline or (offline and powered):
1232 node_list.append(node)
1234 if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
1235 return constants.EXIT_FAILURE
1238 return _EpoOn(opts, node_query_list, node_list, inst_map)
1240 return _EpoOff(opts, node_list, inst_map)
1245 InitCluster, [ArgHost(min=1, max=1)],
1246 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1247 HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
1248 NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
1249 SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1250 UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1251 DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1252 NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
1253 "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1255 DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1256 "", "Destroy cluster"),
1258 RenameCluster, [ArgHost(min=1, max=1)],
1259 [FORCE_OPT, DRY_RUN_OPT],
1261 "Renames the cluster"),
1263 RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1264 "", "Forces a push of the configuration file and ssconf files"
1265 " to the nodes in the cluster"),
1267 VerifyCluster, ARGS_NONE,
1268 [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1269 DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
1270 "", "Does a check on the cluster configuration"),
1272 VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1273 "", "Does a check on the cluster disk status"),
1274 'repair-disk-sizes': (
1275 RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1276 "", "Updates mismatches in recorded disk sizes"),
1277 'master-failover': (
1278 MasterFailover, ARGS_NONE, [NOVOTING_OPT],
1279 "", "Makes the current node the master"),
1281 MasterPing, ARGS_NONE, [],
1282 "", "Checks if the master is alive"),
1284 ShowClusterVersion, ARGS_NONE, [],
1285 "", "Shows the cluster version"),
1287 ShowClusterMaster, ARGS_NONE, [],
1288 "", "Shows the cluster master"),
1290 ClusterCopyFile, [ArgFile(min=1, max=1)],
1291 [NODE_LIST_OPT, USE_REPL_NET_OPT],
1292 "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1294 RunClusterCommand, [ArgCommand(min=1)],
1296 "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1298 ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1299 "[--roman]", "Show cluster configuration"),
1301 ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1303 AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1304 "tag...", "Add tags to the cluster"),
1306 RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1307 "tag...", "Remove tags from the cluster"),
1309 SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1310 "Searches the tags on all objects on"
1311 " the cluster for a given pattern (regex)"),
1314 [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1315 [], "drain|undrain|info", "Change queue properties"),
1318 [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1319 ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1321 "{pause <timespec>|continue|info}", "Change watcher properties"),
1323 SetClusterParams, ARGS_NONE,
1324 [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1325 NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1326 UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
1327 NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
1328 DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
1330 "Alters the parameters of the cluster"),
1332 RenewCrypto, ARGS_NONE,
1333 [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1334 NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1335 NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT],
1337 "Renews cluster certificates, keys and secrets"),
1339 Epo, [ArgUnknown()],
1340 [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1341 SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1343 "Performs an emergency power-off on given args"),
1347 #: dictionary with aliases for commands
1349 'masterfailover': 'master-failover',
1354 return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},