4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
66 "CLUSTER_DOMAIN_SECRET_OPT",
84 "FILESTORE_DRIVER_OPT",
90 "GLOBAL_SHARED_FILEDIR_OPT",
95 "DEFAULT_IALLOCATOR_OPT",
96 "IDENTIFY_DEFAULTS_OPT",
99 "IGNORE_FAILURES_OPT",
100 "IGNORE_OFFLINE_OPT",
101 "IGNORE_REMOVE_FAILURES_OPT",
102 "IGNORE_SECONDARIES_OPT",
106 "MAINTAIN_NODE_HEALTH_OPT",
108 "MASTER_NETMASK_OPT",
110 "MIGRATION_MODE_OPT",
112 "NEW_CLUSTER_CERT_OPT",
113 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
114 "NEW_CONFD_HMAC_KEY_OPT",
117 "NEW_SPICE_CERT_OPT",
119 "NODE_FORCE_JOIN_OPT",
121 "NODE_PLACEMENT_OPT",
125 "NODRBD_STORAGE_OPT",
131 "NOMODIFY_ETCHOSTS_OPT",
132 "NOMODIFY_SSH_SETUP_OPT",
136 "NORUNTIME_CHGS_OPT",
139 "NOSSH_KEYCHECK_OPT",
153 "PREALLOC_WIPE_DISKS_OPT",
154 "PRIMARY_IP_VERSION_OPT",
160 "REMOVE_INSTANCE_OPT",
166 "SECONDARY_ONLY_OPT",
170 "SHUTDOWN_TIMEOUT_OPT",
172 "SPECS_CPU_COUNT_OPT",
173 "SPECS_DISK_COUNT_OPT",
174 "SPECS_DISK_SIZE_OPT",
175 "SPECS_MEM_SIZE_OPT",
176 "SPECS_NIC_COUNT_OPT",
177 "IPOLICY_DISK_TEMPLATES",
178 "IPOLICY_VCPU_RATIO",
184 "STARTUP_PAUSED_OPT",
193 "USE_EXTERNAL_MIP_SCRIPT",
200 "IGNORE_IPOLICY_OPT",
201 "INSTANCE_POLICY_OPTS",
202 # Generic functions for CLI programs
205 "GenericInstanceCreate",
211 "JobSubmittedException",
213 "RunWhileClusterStopped",
217 # Formatting functions
218 "ToStderr", "ToStdout",
221 "FormatParameterDict",
230 # command line options support infrastructure
231 "ARGS_MANY_INSTANCES",
250 "OPT_COMPL_INST_ADD_NODES",
251 "OPT_COMPL_MANY_NODES",
252 "OPT_COMPL_ONE_IALLOCATOR",
253 "OPT_COMPL_ONE_INSTANCE",
254 "OPT_COMPL_ONE_NODE",
255 "OPT_COMPL_ONE_NODEGROUP",
261 "COMMON_CREATE_OPTS",
267 #: Priorities (sorted)
269 ("low", constants.OP_PRIO_LOW),
270 ("normal", constants.OP_PRIO_NORMAL),
271 ("high", constants.OP_PRIO_HIGH),
274 #: Priority dictionary for easier lookup
275 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
276 # we migrate to Python 2.6
277 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
279 # Query result status for clients
282 QR_INCOMPLETE) = range(3)
284 #: Maximum batch size for ChooseJob
289 def __init__(self, min=0, max=None): # pylint: disable=W0622
294 return ("<%s min=%s max=%s>" %
295 (self.__class__.__name__, self.min, self.max))
298 class ArgSuggest(_Argument):
299 """Suggesting argument.
301 Value can be any of the ones passed to the constructor.
304 # pylint: disable=W0622
305 def __init__(self, min=0, max=None, choices=None):
306 _Argument.__init__(self, min=min, max=max)
307 self.choices = choices
310 return ("<%s min=%s max=%s choices=%r>" %
311 (self.__class__.__name__, self.min, self.max, self.choices))
314 class ArgChoice(ArgSuggest):
317 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
318 but value must be one of the choices.
323 class ArgUnknown(_Argument):
324 """Unknown argument to program (e.g. determined at runtime).
329 class ArgInstance(_Argument):
330 """Instances argument.
335 class ArgNode(_Argument):
341 class ArgGroup(_Argument):
342 """Node group argument.
347 class ArgJobId(_Argument):
353 class ArgFile(_Argument):
354 """File path argument.
359 class ArgCommand(_Argument):
365 class ArgHost(_Argument):
371 class ArgOs(_Argument):
378 ARGS_MANY_INSTANCES = [ArgInstance()]
379 ARGS_MANY_NODES = [ArgNode()]
380 ARGS_MANY_GROUPS = [ArgGroup()]
381 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
382 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
384 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
385 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
388 def _ExtractTagsObject(opts, args):
389 """Extract the tag type object.
391 Note that this function will modify its args parameter.
394 if not hasattr(opts, "tag_type"):
395 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
397 if kind == constants.TAG_CLUSTER:
399 elif kind in (constants.TAG_NODEGROUP,
401 constants.TAG_INSTANCE):
403 raise errors.OpPrereqError("no arguments passed to the command")
407 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
411 def _ExtendTags(opts, args):
412 """Extend the args if a source file has been given.
414 This function will extend the tags with the contents of the file
415 passed in the 'tags_source' attribute of the opts parameter. A file
416 named '-' will be replaced by stdin.
419 fname = opts.tags_source
425 new_fh = open(fname, "r")
428 # we don't use the nice 'new_data = [line.strip() for line in fh]'
429 # because of python bug 1633941
431 line = new_fh.readline()
434 new_data.append(line.strip())
437 args.extend(new_data)
440 def ListTags(opts, args):
441 """List the tags on a given object.
443 This is a generic implementation that knows how to deal with all
444 three cases of tag objects (cluster, node, instance). The opts
445 argument is expected to contain a tag_type field denoting what
446 object type we work on.
449 kind, name = _ExtractTagsObject(opts, args)
451 result = cl.QueryTags(kind, name)
452 result = list(result)
458 def AddTags(opts, args):
459 """Add tags on a given object.
461 This is a generic implementation that knows how to deal with all
462 three cases of tag objects (cluster, node, instance). The opts
463 argument is expected to contain a tag_type field denoting what
464 object type we work on.
467 kind, name = _ExtractTagsObject(opts, args)
468 _ExtendTags(opts, args)
470 raise errors.OpPrereqError("No tags to be added")
471 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
472 SubmitOrSend(op, opts)
475 def RemoveTags(opts, args):
476 """Remove tags from a given object.
478 This is a generic implementation that knows how to deal with all
479 three cases of tag objects (cluster, node, instance). The opts
480 argument is expected to contain a tag_type field denoting what
481 object type we work on.
484 kind, name = _ExtractTagsObject(opts, args)
485 _ExtendTags(opts, args)
487 raise errors.OpPrereqError("No tags to be removed")
488 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
489 SubmitOrSend(op, opts)
492 def check_unit(option, opt, value): # pylint: disable=W0613
493 """OptParsers custom converter for units.
497 return utils.ParseUnit(value)
498 except errors.UnitParseError, err:
499 raise OptionValueError("option %s: %s" % (opt, err))
502 def _SplitKeyVal(opt, data):
503 """Convert a KeyVal string into a dict.
505 This function will convert a key=val[,...] string into a dict. Empty
506 values will be converted specially: keys which have the prefix 'no_'
507 will have the value=False and the prefix stripped, the others will
511 @param opt: a string holding the option name for which we process the
512 data, used in building error messages
514 @param data: a string of the format key=val,key=val,...
516 @return: {key=val, key=val}
517 @raises errors.ParameterError: if there are duplicate keys
522 for elem in utils.UnescapeAndSplit(data, sep=","):
524 key, val = elem.split("=", 1)
526 if elem.startswith(NO_PREFIX):
527 key, val = elem[len(NO_PREFIX):], False
528 elif elem.startswith(UN_PREFIX):
529 key, val = elem[len(UN_PREFIX):], None
531 key, val = elem, True
533 raise errors.ParameterError("Duplicate key '%s' in option %s" %
539 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
540 """Custom parser for ident:key=val,key=val options.
542 This will store the parsed values as a tuple (ident, {key: val}). As such,
543 multiple uses of this option via action=append is possible.
547 ident, rest = value, ""
549 ident, rest = value.split(":", 1)
551 if ident.startswith(NO_PREFIX):
553 msg = "Cannot pass options when removing parameter groups: %s" % value
554 raise errors.ParameterError(msg)
555 retval = (ident[len(NO_PREFIX):], False)
556 elif (ident.startswith(UN_PREFIX) and
557 (len(ident) <= len(UN_PREFIX) or
558 not ident[len(UN_PREFIX)][0].isdigit())):
560 msg = "Cannot pass options when removing parameter groups: %s" % value
561 raise errors.ParameterError(msg)
562 retval = (ident[len(UN_PREFIX):], None)
564 kv_dict = _SplitKeyVal(opt, rest)
565 retval = (ident, kv_dict)
569 def check_key_val(option, opt, value): # pylint: disable=W0613
570 """Custom parser class for key=val,key=val options.
572 This will store the parsed values as a dict {key: val}.
575 return _SplitKeyVal(opt, value)
578 def check_bool(option, opt, value): # pylint: disable=W0613
579 """Custom parser for yes/no options.
581 This will store the parsed value as either True or False.
584 value = value.lower()
585 if value == constants.VALUE_FALSE or value == "no":
587 elif value == constants.VALUE_TRUE or value == "yes":
590 raise errors.ParameterError("Invalid boolean value '%s'" % value)
593 def check_list(option, opt, value): # pylint: disable=W0613
594 """Custom parser for comma-separated lists.
597 # we have to make this explicit check since "".split(",") is [""],
598 # not an empty list :(
602 return utils.UnescapeAndSplit(value)
605 # completion_suggestion is normally a list. Using numeric values not evaluating
606 # to False for dynamic completion.
607 (OPT_COMPL_MANY_NODES,
609 OPT_COMPL_ONE_INSTANCE,
611 OPT_COMPL_ONE_IALLOCATOR,
612 OPT_COMPL_INST_ADD_NODES,
613 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
615 OPT_COMPL_ALL = frozenset([
616 OPT_COMPL_MANY_NODES,
618 OPT_COMPL_ONE_INSTANCE,
620 OPT_COMPL_ONE_IALLOCATOR,
621 OPT_COMPL_INST_ADD_NODES,
622 OPT_COMPL_ONE_NODEGROUP,
626 class CliOption(Option):
627 """Custom option class for optparse.
630 ATTRS = Option.ATTRS + [
631 "completion_suggest",
633 TYPES = Option.TYPES + (
640 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
641 TYPE_CHECKER["identkeyval"] = check_ident_key_val
642 TYPE_CHECKER["keyval"] = check_key_val
643 TYPE_CHECKER["unit"] = check_unit
644 TYPE_CHECKER["bool"] = check_bool
645 TYPE_CHECKER["list"] = check_list
648 # optparse.py sets make_option, so we do it for our own option class, too
649 cli_option = CliOption
654 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
655 help="Increase debugging level")
657 NOHDR_OPT = cli_option("--no-headers", default=False,
658 action="store_true", dest="no_headers",
659 help="Don't display column headers")
661 SEP_OPT = cli_option("--separator", default=None,
662 action="store", dest="separator",
663 help=("Separator between output fields"
664 " (defaults to one space)"))
666 USEUNITS_OPT = cli_option("--units", default=None,
667 dest="units", choices=("h", "m", "g", "t"),
668 help="Specify units for output (one of h/m/g/t)")
670 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
671 type="string", metavar="FIELDS",
672 help="Comma separated list of output fields")
674 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
675 default=False, help="Force the operation")
677 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
678 default=False, help="Do not require confirmation")
680 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
681 action="store_true", default=False,
682 help=("Ignore offline nodes and do as much"
685 TAG_ADD_OPT = cli_option("--tags", dest="tags",
686 default=None, help="Comma-separated list of instance"
689 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
690 default=None, help="File with tag names")
692 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
693 default=False, action="store_true",
694 help=("Submit the job and return the job ID, but"
695 " don't wait for the job to finish"))
697 SYNC_OPT = cli_option("--sync", dest="do_locking",
698 default=False, action="store_true",
699 help=("Grab locks while doing the queries"
700 " in order to ensure more consistent results"))
702 DRY_RUN_OPT = cli_option("--dry-run", default=False,
704 help=("Do not execute the operation, just run the"
705 " check steps and verify it it could be"
708 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
710 help="Increase the verbosity of the operation")
712 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
713 action="store_true", dest="simulate_errors",
714 help="Debugging option that makes the operation"
715 " treat most runtime checks as failed")
717 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
718 default=True, action="store_false",
719 help="Don't wait for sync (DANGEROUS!)")
721 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
722 action="store_true", default=False,
723 help="Enable offline instance")
725 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
726 action="store_true", default=False,
727 help="Disable down instance")
729 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
730 help=("Custom disk setup (%s)" %
731 utils.CommaJoin(constants.DISK_TEMPLATES)),
732 default=None, metavar="TEMPL",
733 choices=list(constants.DISK_TEMPLATES))
735 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
736 help="Do not create any network cards for"
739 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
740 help="Relative path under default cluster-wide"
741 " file storage dir to store file-based disks",
742 default=None, metavar="<DIR>")
744 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
745 help="Driver to use for image files",
746 default="loop", metavar="<DRIVER>",
747 choices=list(constants.FILE_DRIVER))
749 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
750 help="Select nodes for the instance automatically"
751 " using the <NAME> iallocator plugin",
752 default=None, type="string",
753 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
755 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
757 help="Set the default instance allocator plugin",
758 default=None, type="string",
759 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
761 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
763 completion_suggest=OPT_COMPL_ONE_OS)
765 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
766 type="keyval", default={},
767 help="OS parameters")
769 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
770 action="store_true", default=False,
771 help="Force an unknown variant")
773 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
774 action="store_true", default=False,
775 help="Do not install the OS (will"
778 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
779 dest="allow_runtime_chgs",
780 default=True, action="store_false",
781 help="Don't allow runtime changes")
783 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
784 type="keyval", default={},
785 help="Backend parameters")
787 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
788 default={}, dest="hvparams",
789 help="Hypervisor parameters")
791 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
792 help="Disk template parameters, in the format"
793 " template:option=value,option=value,...",
794 type="identkeyval", action="append", default=[])
796 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
797 type="keyval", default={},
798 help="Memory count specs: min, max, std"
801 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
802 type="keyval", default={},
803 help="CPU count specs: min, max, std")
805 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
806 dest="ispecs_disk_count",
807 type="keyval", default={},
808 help="Disk count specs: min, max, std")
810 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
811 type="keyval", default={},
812 help="Disk size specs: min, max, std (in MB)")
814 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
815 type="keyval", default={},
816 help="NIC count specs: min, max, std")
818 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
819 dest="ipolicy_disk_templates",
820 type="list", default=None,
821 help="Comma-separated list of"
822 " enabled disk templates")
824 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
825 dest="ipolicy_vcpu_ratio",
826 type="float", default=None,
827 help="The maximum allowed vcpu-to-cpu ratio")
829 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
830 help="Hypervisor and hypervisor options, in the"
831 " format hypervisor:option=value,option=value,...",
832 default=None, type="identkeyval")
834 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
835 help="Hypervisor and hypervisor options, in the"
836 " format hypervisor:option=value,option=value,...",
837 default=[], action="append", type="identkeyval")
839 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
840 action="store_false",
841 help="Don't check that the instance's IP"
844 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
845 default=True, action="store_false",
846 help="Don't check that the instance's name"
849 NET_OPT = cli_option("--net",
850 help="NIC parameters", default=[],
851 dest="nics", action="append", type="identkeyval")
853 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
854 dest="disks", action="append", type="identkeyval")
856 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
857 help="Comma-separated list of disks"
858 " indices to act on (e.g. 0,2) (optional,"
859 " defaults to all disks)")
861 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
862 help="Enforces a single-disk configuration using the"
863 " given disk size, in MiB unless a suffix is used",
864 default=None, type="unit", metavar="<size>")
866 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
867 dest="ignore_consistency",
868 action="store_true", default=False,
869 help="Ignore the consistency of the disks on"
872 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
873 dest="allow_failover",
874 action="store_true", default=False,
875 help="If migration is not possible fallback to"
878 NONLIVE_OPT = cli_option("--non-live", dest="live",
879 default=True, action="store_false",
880 help="Do a non-live migration (this usually means"
881 " freeze the instance, save the state, transfer and"
882 " only then resume running on the secondary node)")
884 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
886 choices=list(constants.HT_MIGRATION_MODES),
887 help="Override default migration mode (choose"
888 " either live or non-live")
890 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
891 help="Target node and optional secondary node",
892 metavar="<pnode>[:<snode>]",
893 completion_suggest=OPT_COMPL_INST_ADD_NODES)
895 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
896 action="append", metavar="<node>",
897 help="Use only this node (can be used multiple"
898 " times, if not given defaults to all nodes)",
899 completion_suggest=OPT_COMPL_ONE_NODE)
901 NODEGROUP_OPT_NAME = "--node-group"
902 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
904 help="Node group (name or uuid)",
905 metavar="<nodegroup>",
906 default=None, type="string",
907 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
909 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
911 completion_suggest=OPT_COMPL_ONE_NODE)
913 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
914 action="store_false",
915 help="Don't start the instance after creation")
917 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
918 action="store_true", default=False,
919 help="Show command instead of executing it")
921 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
922 default=False, action="store_true",
923 help="Instead of performing the migration, try to"
924 " recover from a failed cleanup. This is safe"
925 " to run even if the instance is healthy, but it"
926 " will create extra replication traffic and "
927 " disrupt briefly the replication (like during the"
930 STATIC_OPT = cli_option("-s", "--static", dest="static",
931 action="store_true", default=False,
932 help="Only show configuration data, not runtime data")
934 ALL_OPT = cli_option("--all", dest="show_all",
935 default=False, action="store_true",
936 help="Show info on all instances on the cluster."
937 " This can take a long time to run, use wisely")
939 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
940 action="store_true", default=False,
941 help="Interactive OS reinstall, lists available"
942 " OS templates for selection")
944 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
945 action="store_true", default=False,
946 help="Remove the instance from the cluster"
947 " configuration even if there are failures"
948 " during the removal process")
950 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
951 dest="ignore_remove_failures",
952 action="store_true", default=False,
953 help="Remove the instance from the"
954 " cluster configuration even if there"
955 " are failures during the removal"
958 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
959 action="store_true", default=False,
960 help="Remove the instance from the cluster")
962 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
963 help="Specifies the new node for the instance",
964 metavar="NODE", default=None,
965 completion_suggest=OPT_COMPL_ONE_NODE)
967 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
968 help="Specifies the new secondary node",
969 metavar="NODE", default=None,
970 completion_suggest=OPT_COMPL_ONE_NODE)
972 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
973 default=False, action="store_true",
974 help="Replace the disk(s) on the primary"
975 " node (applies only to internally mirrored"
976 " disk templates, e.g. %s)" %
977 utils.CommaJoin(constants.DTS_INT_MIRROR))
979 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
980 default=False, action="store_true",
981 help="Replace the disk(s) on the secondary"
982 " node (applies only to internally mirrored"
983 " disk templates, e.g. %s)" %
984 utils.CommaJoin(constants.DTS_INT_MIRROR))
986 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
987 default=False, action="store_true",
988 help="Lock all nodes and auto-promote as needed"
991 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
992 default=False, action="store_true",
993 help="Automatically replace faulty disks"
994 " (applies only to internally mirrored"
995 " disk templates, e.g. %s)" %
996 utils.CommaJoin(constants.DTS_INT_MIRROR))
998 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
999 default=False, action="store_true",
1000 help="Ignore current recorded size"
1001 " (useful for forcing activation when"
1002 " the recorded size is wrong)")
1004 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1006 completion_suggest=OPT_COMPL_ONE_NODE)
1008 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1011 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1012 help="Specify the secondary ip for the node",
1013 metavar="ADDRESS", default=None)
1015 READD_OPT = cli_option("--readd", dest="readd",
1016 default=False, action="store_true",
1017 help="Readd old node after replacing it")
1019 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1020 default=True, action="store_false",
1021 help="Disable SSH key fingerprint checking")
1023 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1024 default=False, action="store_true",
1025 help="Force the joining of a node")
1027 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1028 type="bool", default=None, metavar=_YORNO,
1029 help="Set the master_candidate flag on the node")
1031 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1032 type="bool", default=None,
1033 help=("Set the offline flag on the node"
1034 " (cluster does not communicate with offline"
1037 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1038 type="bool", default=None,
1039 help=("Set the drained flag on the node"
1040 " (excluded from allocation operations)"))
1042 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1043 type="bool", default=None, metavar=_YORNO,
1044 help="Set the master_capable flag on the node")
1046 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1047 type="bool", default=None, metavar=_YORNO,
1048 help="Set the vm_capable flag on the node")
1050 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1051 type="bool", default=None, metavar=_YORNO,
1052 help="Set the allocatable flag on a volume")
1054 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1055 help="Disable support for lvm based instances"
1057 action="store_false", default=True)
1059 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1060 dest="enabled_hypervisors",
1061 help="Comma-separated list of hypervisors",
1062 type="string", default=None)
1064 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1065 type="keyval", default={},
1066 help="NIC parameters")
1068 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1069 dest="candidate_pool_size", type="int",
1070 help="Set the candidate pool size")
1072 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1073 help=("Enables LVM and specifies the volume group"
1074 " name (cluster-wide) for disk allocation"
1075 " [%s]" % constants.DEFAULT_VG),
1076 metavar="VG", default=None)
1078 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1079 help="Destroy cluster", action="store_true")
1081 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1082 help="Skip node agreement check (dangerous)",
1083 action="store_true", default=False)
1085 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1086 help="Specify the mac prefix for the instance IP"
1087 " addresses, in the format XX:XX:XX",
1091 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1092 help="Specify the node interface (cluster-wide)"
1093 " on which the master IP address will be added"
1094 " (cluster init default: %s)" %
1095 constants.DEFAULT_BRIDGE,
1099 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1100 help="Specify the netmask of the master IP",
1104 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1105 dest="use_external_mip_script",
1106 help="Specify whether to run a user-provided"
1107 " script for the master IP address turnup and"
1108 " turndown operations",
1109 type="bool", metavar=_YORNO, default=None)
1111 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1112 help="Specify the default directory (cluster-"
1113 "wide) for storing the file-based disks [%s]" %
1114 constants.DEFAULT_FILE_STORAGE_DIR,
1116 default=constants.DEFAULT_FILE_STORAGE_DIR)
1118 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1119 dest="shared_file_storage_dir",
1120 help="Specify the default directory (cluster-"
1121 "wide) for storing the shared file-based"
1123 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1124 metavar="SHAREDDIR",
1125 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1127 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1128 help="Don't modify /etc/hosts",
1129 action="store_false", default=True)
1131 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1132 help="Don't initialize SSH keys",
1133 action="store_false", default=True)
1135 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1136 help="Enable parseable error messages",
1137 action="store_true", default=False)
1139 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1140 help="Skip N+1 memory redundancy tests",
1141 action="store_true", default=False)
1143 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1144 help="Type of reboot: soft/hard/full",
1145 default=constants.INSTANCE_REBOOT_HARD,
1147 choices=list(constants.REBOOT_TYPES))
1149 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1150 dest="ignore_secondaries",
1151 default=False, action="store_true",
1152 help="Ignore errors from secondaries")
1154 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1155 action="store_false", default=True,
1156 help="Don't shutdown the instance (unsafe)")
1158 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1159 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1160 help="Maximum time to wait")
1162 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1163 dest="shutdown_timeout", type="int",
1164 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1165 help="Maximum time to wait for instance shutdown")
1167 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1169 help=("Number of seconds between repetions of the"
1172 EARLY_RELEASE_OPT = cli_option("--early-release",
1173 dest="early_release", default=False,
1174 action="store_true",
1175 help="Release the locks on the secondary"
1178 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1179 dest="new_cluster_cert",
1180 default=False, action="store_true",
1181 help="Generate a new cluster certificate")
1183 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1185 help="File containing new RAPI certificate")
1187 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1188 default=None, action="store_true",
1189 help=("Generate a new self-signed RAPI"
1192 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1194 help="File containing new SPICE certificate")
1196 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1198 help="File containing the certificate of the CA"
1199 " which signed the SPICE certificate")
1201 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1202 dest="new_spice_cert", default=None,
1203 action="store_true",
1204 help=("Generate a new self-signed SPICE"
1207 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1208 dest="new_confd_hmac_key",
1209 default=False, action="store_true",
1210 help=("Create a new HMAC key for %s" %
1213 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1214 dest="cluster_domain_secret",
1216 help=("Load new new cluster domain"
1217 " secret from file"))
1219 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1220 dest="new_cluster_domain_secret",
1221 default=False, action="store_true",
1222 help=("Create a new cluster domain"
1225 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1226 dest="use_replication_network",
1227 help="Whether to use the replication network"
1228 " for talking to the nodes",
1229 action="store_true", default=False)
1231 MAINTAIN_NODE_HEALTH_OPT = \
1232 cli_option("--maintain-node-health", dest="maintain_node_health",
1233 metavar=_YORNO, default=None, type="bool",
1234 help="Configure the cluster to automatically maintain node"
1235 " health, by shutting down unknown instances, shutting down"
1236 " unknown DRBD devices, etc.")
1238 IDENTIFY_DEFAULTS_OPT = \
1239 cli_option("--identify-defaults", dest="identify_defaults",
1240 default=False, action="store_true",
1241 help="Identify which saved instance parameters are equal to"
1242 " the current cluster defaults and set them as such, instead"
1243 " of marking them as overridden")
1245 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1246 action="store", dest="uid_pool",
1247 help=("A list of user-ids or user-id"
1248 " ranges separated by commas"))
1250 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1251 action="store", dest="add_uids",
1252 help=("A list of user-ids or user-id"
1253 " ranges separated by commas, to be"
1254 " added to the user-id pool"))
1256 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1257 action="store", dest="remove_uids",
1258 help=("A list of user-ids or user-id"
1259 " ranges separated by commas, to be"
1260 " removed from the user-id pool"))
1262 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1263 action="store", dest="reserved_lvs",
1264 help=("A comma-separated list of reserved"
1265 " logical volumes names, that will be"
1266 " ignored by cluster verify"))
1268 ROMAN_OPT = cli_option("--roman",
1269 dest="roman_integers", default=False,
1270 action="store_true",
1271 help="Use roman numbers for positive integers")
1273 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1274 action="store", default=None,
1275 help="Specifies usermode helper for DRBD")
1277 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1278 action="store_false", default=True,
1279 help="Disable support for DRBD")
1281 PRIMARY_IP_VERSION_OPT = \
1282 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1283 action="store", dest="primary_ip_version",
1284 metavar="%d|%d" % (constants.IP4_VERSION,
1285 constants.IP6_VERSION),
1286 help="Cluster-wide IP version for primary IP")
1288 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1289 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1290 choices=_PRIONAME_TO_VALUE.keys(),
1291 help="Priority for opcode processing")
1293 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1294 type="bool", default=None, metavar=_YORNO,
1295 help="Sets the hidden flag on the OS")
1297 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1298 type="bool", default=None, metavar=_YORNO,
1299 help="Sets the blacklisted flag on the OS")
1301 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1302 type="bool", metavar=_YORNO,
1303 dest="prealloc_wipe_disks",
1304 help=("Wipe disks prior to instance"
1307 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1308 type="keyval", default=None,
1309 help="Node parameters")
1311 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1312 action="store", metavar="POLICY", default=None,
1313 help="Allocation policy for the node group")
1315 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1316 type="bool", metavar=_YORNO,
1317 dest="node_powered",
1318 help="Specify if the SoR for node is powered")
1320 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1321 default=constants.OOB_TIMEOUT,
1322 help="Maximum time to wait for out-of-band helper")
1324 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1325 default=constants.OOB_POWER_DELAY,
1326 help="Time in seconds to wait between power-ons")
1328 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1329 action="store_true", default=False,
1330 help=("Whether command argument should be treated"
1333 NO_REMEMBER_OPT = cli_option("--no-remember",
1335 action="store_true", default=False,
1336 help="Perform but do not record the change"
1337 " in the configuration")
1339 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1340 default=False, action="store_true",
1341 help="Evacuate primary instances only")
1343 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1344 default=False, action="store_true",
1345 help="Evacuate secondary instances only"
1346 " (applies only to internally mirrored"
1347 " disk templates, e.g. %s)" %
1348 utils.CommaJoin(constants.DTS_INT_MIRROR))
1350 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1351 action="store_true", default=False,
1352 help="Pause instance at startup")
1354 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1355 help="Destination node group (name or uuid)",
1356 default=None, action="append",
1357 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1359 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1360 action="append", dest="ignore_errors",
1361 choices=list(constants.CV_ALL_ECODES_STRINGS),
1362 help="Error code to be ignored")
1364 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1366 help=("Specify disk state information in the format"
1367 " storage_type/identifier:option=value,..."),
1370 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1372 help=("Specify hypervisor state information in the"
1373 " format hypervisor:option=value,..."),
1376 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1377 action="store_true", default=False,
1378 help="Ignore instance policy violations")
1380 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1381 help="Sets the instance's runtime memory,"
1382 " ballooning it up or down to the new value",
1383 default=None, type="unit", metavar="<size>")
1385 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1386 action="store_true", default=False,
1387 help="Marks the grow as absolute instead of the"
1388 " (default) relative mode")
1390 #: Options provided by all commands
1391 COMMON_OPTS = [DEBUG_OPT]
1393 # common options for creating instances. add and import then add their own
1395 COMMON_CREATE_OPTS = [
1400 FILESTORE_DRIVER_OPT,
1417 # common instance policy options
1418 INSTANCE_POLICY_OPTS = [
1419 SPECS_CPU_COUNT_OPT,
1420 SPECS_DISK_COUNT_OPT,
1421 SPECS_DISK_SIZE_OPT,
1423 SPECS_NIC_COUNT_OPT,
1424 IPOLICY_DISK_TEMPLATES,
1429 def _ParseArgs(argv, commands, aliases, env_override):
1430 """Parser for the command line arguments.
1432 This function parses the arguments and returns the function which
1433 must be executed together with its (modified) arguments.
1435 @param argv: the command line
1436 @param commands: dictionary with special contents, see the design
1437 doc for cmdline handling
1438 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1439 @param env_override: list of env variables allowed for default args
1442 assert not (env_override - set(commands))
1445 binary = "<command>"
1447 binary = argv[0].split("/")[-1]
1449 if len(argv) > 1 and argv[1] == "--version":
1450 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1451 constants.RELEASE_VERSION)
1452 # Quit right away. That way we don't have to care about this special
1453 # argument. optparse.py does it the same.
1456 if len(argv) < 2 or not (argv[1] in commands or
1457 argv[1] in aliases):
1458 # let's do a nice thing
1459 sortedcmds = commands.keys()
1462 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1463 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1466 # compute the max line length for cmd + usage
1467 mlen = max([len(" %s" % cmd) for cmd in commands])
1468 mlen = min(60, mlen) # should not get here...
1470 # and format a nice command list
1471 ToStdout("Commands:")
1472 for cmd in sortedcmds:
1473 cmdstr = " %s" % (cmd,)
1474 help_text = commands[cmd][4]
1475 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1476 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1477 for line in help_lines:
1478 ToStdout("%-*s %s", mlen, "", line)
1482 return None, None, None
1484 # get command, unalias it, and look it up in commands
1488 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1491 if aliases[cmd] not in commands:
1492 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1493 " command '%s'" % (cmd, aliases[cmd]))
1497 if cmd in env_override:
1498 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1499 env_args = os.environ.get(args_env_name)
1501 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1503 func, args_def, parser_opts, usage, description = commands[cmd]
1504 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1505 description=description,
1506 formatter=TitledHelpFormatter(),
1507 usage="%%prog %s %s" % (cmd, usage))
1508 parser.disable_interspersed_args()
1509 options, args = parser.parse_args(args=argv[1:])
1511 if not _CheckArguments(cmd, args_def, args):
1512 return None, None, None
1514 return func, options, args
1517 def _CheckArguments(cmd, args_def, args):
1518 """Verifies the arguments using the argument definition.
1522 1. Abort with error if values specified by user but none expected.
1524 1. For each argument in definition
1526 1. Keep running count of minimum number of values (min_count)
1527 1. Keep running count of maximum number of values (max_count)
1528 1. If it has an unlimited number of values
1530 1. Abort with error if it's not the last argument in the definition
1532 1. If last argument has limited number of values
1534 1. Abort with error if number of values doesn't match or is too large
1536 1. Abort with error if user didn't pass enough values (min_count)
1539 if args and not args_def:
1540 ToStderr("Error: Command %s expects no arguments", cmd)
1547 last_idx = len(args_def) - 1
1549 for idx, arg in enumerate(args_def):
1550 if min_count is None:
1552 elif arg.min is not None:
1553 min_count += arg.min
1555 if max_count is None:
1557 elif arg.max is not None:
1558 max_count += arg.max
1561 check_max = (arg.max is not None)
1563 elif arg.max is None:
1564 raise errors.ProgrammerError("Only the last argument can have max=None")
1567 # Command with exact number of arguments
1568 if (min_count is not None and max_count is not None and
1569 min_count == max_count and len(args) != min_count):
1570 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1573 # Command with limited number of arguments
1574 if max_count is not None and len(args) > max_count:
1575 ToStderr("Error: Command %s expects only %d argument(s)",
1579 # Command with some required arguments
1580 if min_count is not None and len(args) < min_count:
1581 ToStderr("Error: Command %s expects at least %d argument(s)",
1588 def SplitNodeOption(value):
1589 """Splits the value of a --node option.
1592 if value and ":" in value:
1593 return value.split(":", 1)
1595 return (value, None)
1598 def CalculateOSNames(os_name, os_variants):
1599 """Calculates all the names an OS can be called, according to its variants.
1601 @type os_name: string
1602 @param os_name: base name of the os
1603 @type os_variants: list or None
1604 @param os_variants: list of supported variants
1606 @return: list of valid names
1610 return ["%s+%s" % (os_name, v) for v in os_variants]
1615 def ParseFields(selected, default):
1616 """Parses the values of "--field"-like options.
1618 @type selected: string or None
1619 @param selected: User-selected options
1621 @param default: Default fields
1624 if selected is None:
1627 if selected.startswith("+"):
1628 return default + selected[1:].split(",")
1630 return selected.split(",")
1633 UsesRPC = rpc.RunWithRPC
1636 def AskUser(text, choices=None):
1637 """Ask the user a question.
1639 @param text: the question to ask
1641 @param choices: list with elements tuples (input_char, return_value,
1642 description); if not given, it will default to: [('y', True,
1643 'Perform the operation'), ('n', False, 'Do no do the operation')];
1644 note that the '?' char is reserved for help
1646 @return: one of the return values from the choices list; if input is
1647 not possible (i.e. not running with a tty, we return the last
1652 choices = [("y", True, "Perform the operation"),
1653 ("n", False, "Do not perform the operation")]
1654 if not choices or not isinstance(choices, list):
1655 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1656 for entry in choices:
1657 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1658 raise errors.ProgrammerError("Invalid choices element to AskUser")
1660 answer = choices[-1][1]
1662 for line in text.splitlines():
1663 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1664 text = "\n".join(new_text)
1666 f = file("/dev/tty", "a+")
1670 chars = [entry[0] for entry in choices]
1671 chars[-1] = "[%s]" % chars[-1]
1673 maps = dict([(entry[0], entry[1]) for entry in choices])
1677 f.write("/".join(chars))
1679 line = f.readline(2).strip().lower()
1684 for entry in choices:
1685 f.write(" %s - %s\n" % (entry[0], entry[2]))
1693 class JobSubmittedException(Exception):
1694 """Job was submitted, client should exit.
1696 This exception has one argument, the ID of the job that was
1697 submitted. The handler should print this ID.
1699 This is not an error, just a structured way to exit from clients.
1704 def SendJob(ops, cl=None):
1705 """Function to submit an opcode without waiting for the results.
1708 @param ops: list of opcodes
1709 @type cl: luxi.Client
1710 @param cl: the luxi client to use for communicating with the master;
1711 if None, a new client will be created
1717 job_id = cl.SubmitJob(ops)
1722 def GenericPollJob(job_id, cbs, report_cbs):
1723 """Generic job-polling function.
1725 @type job_id: number
1726 @param job_id: Job ID
1727 @type cbs: Instance of L{JobPollCbBase}
1728 @param cbs: Data callbacks
1729 @type report_cbs: Instance of L{JobPollReportCbBase}
1730 @param report_cbs: Reporting callbacks
1733 prev_job_info = None
1734 prev_logmsg_serial = None
1739 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1742 # job not found, go away!
1743 raise errors.JobLost("Job with id %s lost" % job_id)
1745 if result == constants.JOB_NOTCHANGED:
1746 report_cbs.ReportNotChanged(job_id, status)
1751 # Split result, a tuple of (field values, log entries)
1752 (job_info, log_entries) = result
1753 (status, ) = job_info
1756 for log_entry in log_entries:
1757 (serial, timestamp, log_type, message) = log_entry
1758 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1760 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1762 # TODO: Handle canceled and archived jobs
1763 elif status in (constants.JOB_STATUS_SUCCESS,
1764 constants.JOB_STATUS_ERROR,
1765 constants.JOB_STATUS_CANCELING,
1766 constants.JOB_STATUS_CANCELED):
1769 prev_job_info = job_info
1771 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1773 raise errors.JobLost("Job with id %s lost" % job_id)
1775 status, opstatus, result = jobs[0]
1777 if status == constants.JOB_STATUS_SUCCESS:
1780 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1781 raise errors.OpExecError("Job was canceled")
1784 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1785 if status == constants.OP_STATUS_SUCCESS:
1787 elif status == constants.OP_STATUS_ERROR:
1788 errors.MaybeRaise(msg)
1791 raise errors.OpExecError("partial failure (opcode %d): %s" %
1794 raise errors.OpExecError(str(msg))
1796 # default failure mode
1797 raise errors.OpExecError(result)
1800 class JobPollCbBase:
1801 """Base class for L{GenericPollJob} callbacks.
1805 """Initializes this class.
1809 def WaitForJobChangeOnce(self, job_id, fields,
1810 prev_job_info, prev_log_serial):
1811 """Waits for changes on a job.
1814 raise NotImplementedError()
1816 def QueryJobs(self, job_ids, fields):
1817 """Returns the selected fields for the selected job IDs.
1819 @type job_ids: list of numbers
1820 @param job_ids: Job IDs
1821 @type fields: list of strings
1822 @param fields: Fields
1825 raise NotImplementedError()
1828 class JobPollReportCbBase:
1829 """Base class for L{GenericPollJob} reporting callbacks.
1833 """Initializes this class.
1837 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1838 """Handles a log message.
1841 raise NotImplementedError()
1843 def ReportNotChanged(self, job_id, status):
1844 """Called for if a job hasn't changed in a while.
1846 @type job_id: number
1847 @param job_id: Job ID
1848 @type status: string or None
1849 @param status: Job status if available
1852 raise NotImplementedError()
1855 class _LuxiJobPollCb(JobPollCbBase):
1856 def __init__(self, cl):
1857 """Initializes this class.
1860 JobPollCbBase.__init__(self)
1863 def WaitForJobChangeOnce(self, job_id, fields,
1864 prev_job_info, prev_log_serial):
1865 """Waits for changes on a job.
1868 return self.cl.WaitForJobChangeOnce(job_id, fields,
1869 prev_job_info, prev_log_serial)
1871 def QueryJobs(self, job_ids, fields):
1872 """Returns the selected fields for the selected job IDs.
1875 return self.cl.QueryJobs(job_ids, fields)
1878 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1879 def __init__(self, feedback_fn):
1880 """Initializes this class.
1883 JobPollReportCbBase.__init__(self)
1885 self.feedback_fn = feedback_fn
1887 assert callable(feedback_fn)
1889 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1890 """Handles a log message.
1893 self.feedback_fn((timestamp, log_type, log_msg))
1895 def ReportNotChanged(self, job_id, status):
1896 """Called if a job hasn't changed in a while.
1902 class StdioJobPollReportCb(JobPollReportCbBase):
1904 """Initializes this class.
1907 JobPollReportCbBase.__init__(self)
1909 self.notified_queued = False
1910 self.notified_waitlock = False
1912 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1913 """Handles a log message.
1916 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1917 FormatLogMessage(log_type, log_msg))
1919 def ReportNotChanged(self, job_id, status):
1920 """Called if a job hasn't changed in a while.
1926 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1927 ToStderr("Job %s is waiting in queue", job_id)
1928 self.notified_queued = True
1930 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1931 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1932 self.notified_waitlock = True
1935 def FormatLogMessage(log_type, log_msg):
1936 """Formats a job message according to its type.
1939 if log_type != constants.ELOG_MESSAGE:
1940 log_msg = str(log_msg)
1942 return utils.SafeEncode(log_msg)
1945 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1946 """Function to poll for the result of a job.
1948 @type job_id: job identified
1949 @param job_id: the job to poll for results
1950 @type cl: luxi.Client
1951 @param cl: the luxi client to use for communicating with the master;
1952 if None, a new client will be created
1958 if reporter is None:
1960 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1962 reporter = StdioJobPollReportCb()
1964 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1966 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1969 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1970 """Legacy function to submit an opcode.
1972 This is just a simple wrapper over the construction of the processor
1973 instance. It should be extended to better handle feedback and
1974 interaction functions.
1980 SetGenericOpcodeOpts([op], opts)
1982 job_id = SendJob([op], cl=cl)
1984 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1987 return op_results[0]
1990 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1991 """Wrapper around SubmitOpCode or SendJob.
1993 This function will decide, based on the 'opts' parameter, whether to
1994 submit and wait for the result of the opcode (and return it), or
1995 whether to just send the job and print its identifier. It is used in
1996 order to simplify the implementation of the '--submit' option.
1998 It will also process the opcodes if we're sending the via SendJob
1999 (otherwise SubmitOpCode does it).
2002 if opts and opts.submit_only:
2004 SetGenericOpcodeOpts(job, opts)
2005 job_id = SendJob(job, cl=cl)
2006 raise JobSubmittedException(job_id)
2008 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2011 def SetGenericOpcodeOpts(opcode_list, options):
2012 """Processor for generic options.
2014 This function updates the given opcodes based on generic command
2015 line options (like debug, dry-run, etc.).
2017 @param opcode_list: list of opcodes
2018 @param options: command line options or None
2019 @return: None (in-place modification)
2024 for op in opcode_list:
2025 op.debug_level = options.debug
2026 if hasattr(options, "dry_run"):
2027 op.dry_run = options.dry_run
2028 if getattr(options, "priority", None) is not None:
2029 op.priority = _PRIONAME_TO_VALUE[options.priority]
2033 # TODO: Cache object?
2035 client = luxi.Client()
2036 except luxi.NoMasterError:
2037 ss = ssconf.SimpleStore()
2039 # Try to read ssconf file
2042 except errors.ConfigurationError:
2043 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2044 " not part of a cluster")
2046 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2047 if master != myself:
2048 raise errors.OpPrereqError("This is not the master node, please connect"
2049 " to node '%s' and rerun the command" %
2055 def FormatError(err):
2056 """Return a formatted error message for a given error.
2058 This function takes an exception instance and returns a tuple
2059 consisting of two values: first, the recommended exit code, and
2060 second, a string describing the error message (not
2061 newline-terminated).
2067 if isinstance(err, errors.ConfigurationError):
2068 txt = "Corrupt configuration file: %s" % msg
2070 obuf.write(txt + "\n")
2071 obuf.write("Aborting.")
2073 elif isinstance(err, errors.HooksAbort):
2074 obuf.write("Failure: hooks execution failed:\n")
2075 for node, script, out in err.args[0]:
2077 obuf.write(" node: %s, script: %s, output: %s\n" %
2078 (node, script, out))
2080 obuf.write(" node: %s, script: %s (no output)\n" %
2082 elif isinstance(err, errors.HooksFailure):
2083 obuf.write("Failure: hooks general failure: %s" % msg)
2084 elif isinstance(err, errors.ResolverError):
2085 this_host = netutils.Hostname.GetSysName()
2086 if err.args[0] == this_host:
2087 msg = "Failure: can't resolve my own hostname ('%s')"
2089 msg = "Failure: can't resolve hostname '%s'"
2090 obuf.write(msg % err.args[0])
2091 elif isinstance(err, errors.OpPrereqError):
2092 if len(err.args) == 2:
2093 obuf.write("Failure: prerequisites not met for this"
2094 " operation:\nerror type: %s, error details:\n%s" %
2095 (err.args[1], err.args[0]))
2097 obuf.write("Failure: prerequisites not met for this"
2098 " operation:\n%s" % msg)
2099 elif isinstance(err, errors.OpExecError):
2100 obuf.write("Failure: command execution error:\n%s" % msg)
2101 elif isinstance(err, errors.TagError):
2102 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2103 elif isinstance(err, errors.JobQueueDrainError):
2104 obuf.write("Failure: the job queue is marked for drain and doesn't"
2105 " accept new requests\n")
2106 elif isinstance(err, errors.JobQueueFull):
2107 obuf.write("Failure: the job queue is full and doesn't accept new"
2108 " job submissions until old jobs are archived\n")
2109 elif isinstance(err, errors.TypeEnforcementError):
2110 obuf.write("Parameter Error: %s" % msg)
2111 elif isinstance(err, errors.ParameterError):
2112 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2113 elif isinstance(err, luxi.NoMasterError):
2114 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2115 " and listening for connections?")
2116 elif isinstance(err, luxi.TimeoutError):
2117 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2118 " been submitted and will continue to run even if the call"
2119 " timed out. Useful commands in this situation are \"gnt-job"
2120 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2122 elif isinstance(err, luxi.PermissionError):
2123 obuf.write("It seems you don't have permissions to connect to the"
2124 " master daemon.\nPlease retry as a different user.")
2125 elif isinstance(err, luxi.ProtocolError):
2126 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2128 elif isinstance(err, errors.JobLost):
2129 obuf.write("Error checking job status: %s" % msg)
2130 elif isinstance(err, errors.QueryFilterParseError):
2131 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2132 obuf.write("\n".join(err.GetDetails()))
2133 elif isinstance(err, errors.GenericError):
2134 obuf.write("Unhandled Ganeti error: %s" % msg)
2135 elif isinstance(err, JobSubmittedException):
2136 obuf.write("JobID: %s\n" % err.args[0])
2139 obuf.write("Unhandled exception: %s" % msg)
2140 return retcode, obuf.getvalue().rstrip("\n")
2143 def GenericMain(commands, override=None, aliases=None,
2144 env_override=frozenset()):
2145 """Generic main function for all the gnt-* commands.
2147 @param commands: a dictionary with a special structure, see the design doc
2148 for command line handling.
2149 @param override: if not None, we expect a dictionary with keys that will
2150 override command line options; this can be used to pass
2151 options from the scripts to generic functions
2152 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2153 @param env_override: list of environment names which are allowed to submit
2154 default args for commands
2157 # save the program name and the entire command line for later logging
2159 binary = os.path.basename(sys.argv[0])
2161 binary = sys.argv[0]
2163 if len(sys.argv) >= 2:
2164 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2168 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2170 binary = "<unknown program>"
2171 cmdline = "<unknown>"
2177 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2178 except errors.ParameterError, err:
2179 result, err_msg = FormatError(err)
2183 if func is None: # parse error
2186 if override is not None:
2187 for key, val in override.iteritems():
2188 setattr(options, key, val)
2190 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2191 stderr_logging=True)
2193 logging.info("Command line: %s", cmdline)
2196 result = func(options, args)
2197 except (errors.GenericError, luxi.ProtocolError,
2198 JobSubmittedException), err:
2199 result, err_msg = FormatError(err)
2200 logging.exception("Error during command processing")
2202 except KeyboardInterrupt:
2203 result = constants.EXIT_FAILURE
2204 ToStderr("Aborted. Note that if the operation created any jobs, they"
2205 " might have been submitted and"
2206 " will continue to run in the background.")
2207 except IOError, err:
2208 if err.errno == errno.EPIPE:
2209 # our terminal went away, we'll exit
2210 sys.exit(constants.EXIT_FAILURE)
2217 def ParseNicOption(optvalue):
2218 """Parses the value of the --net option(s).
2222 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2223 except (TypeError, ValueError), err:
2224 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2226 nics = [{}] * nic_max
2227 for nidx, ndict in optvalue:
2230 if not isinstance(ndict, dict):
2231 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2232 " got %s" % (nidx, ndict))
2234 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2241 def GenericInstanceCreate(mode, opts, args):
2242 """Add an instance to the cluster via either creation or import.
2244 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2245 @param opts: the command line options selected by the user
2247 @param args: should contain only one element, the new instance name
2249 @return: the desired exit code
2254 (pnode, snode) = SplitNodeOption(opts.node)
2259 hypervisor, hvparams = opts.hypervisor
2262 nics = ParseNicOption(opts.nics)
2266 elif mode == constants.INSTANCE_CREATE:
2267 # default of one nic, all auto
2273 if opts.disk_template == constants.DT_DISKLESS:
2274 if opts.disks or opts.sd_size is not None:
2275 raise errors.OpPrereqError("Diskless instance but disk"
2276 " information passed")
2279 if (not opts.disks and not opts.sd_size
2280 and mode == constants.INSTANCE_CREATE):
2281 raise errors.OpPrereqError("No disk information specified")
2282 if opts.disks and opts.sd_size is not None:
2283 raise errors.OpPrereqError("Please use either the '--disk' or"
2285 if opts.sd_size is not None:
2286 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2290 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2291 except ValueError, err:
2292 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2293 disks = [{}] * disk_max
2296 for didx, ddict in opts.disks:
2298 if not isinstance(ddict, dict):
2299 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2300 raise errors.OpPrereqError(msg)
2301 elif constants.IDISK_SIZE in ddict:
2302 if constants.IDISK_ADOPT in ddict:
2303 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2304 " (disk %d)" % didx)
2306 ddict[constants.IDISK_SIZE] = \
2307 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2308 except ValueError, err:
2309 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2311 elif constants.IDISK_ADOPT in ddict:
2312 if mode == constants.INSTANCE_IMPORT:
2313 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2315 ddict[constants.IDISK_SIZE] = 0
2317 raise errors.OpPrereqError("Missing size or adoption source for"
2321 if opts.tags is not None:
2322 tags = opts.tags.split(",")
2326 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2327 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2329 if mode == constants.INSTANCE_CREATE:
2332 force_variant = opts.force_variant
2335 no_install = opts.no_install
2336 identify_defaults = False
2337 elif mode == constants.INSTANCE_IMPORT:
2340 force_variant = False
2341 src_node = opts.src_node
2342 src_path = opts.src_dir
2344 identify_defaults = opts.identify_defaults
2346 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2348 op = opcodes.OpInstanceCreate(instance_name=instance,
2350 disk_template=opts.disk_template,
2352 pnode=pnode, snode=snode,
2353 ip_check=opts.ip_check,
2354 name_check=opts.name_check,
2355 wait_for_sync=opts.wait_for_sync,
2356 file_storage_dir=opts.file_storage_dir,
2357 file_driver=opts.file_driver,
2358 iallocator=opts.iallocator,
2359 hypervisor=hypervisor,
2361 beparams=opts.beparams,
2362 osparams=opts.osparams,
2366 force_variant=force_variant,
2370 no_install=no_install,
2371 identify_defaults=identify_defaults,
2372 ignore_ipolicy=opts.ignore_ipolicy)
2374 SubmitOrSend(op, opts)
2378 class _RunWhileClusterStoppedHelper:
2379 """Helper class for L{RunWhileClusterStopped} to simplify state management
2382 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2383 """Initializes this class.
2385 @type feedback_fn: callable
2386 @param feedback_fn: Feedback function
2387 @type cluster_name: string
2388 @param cluster_name: Cluster name
2389 @type master_node: string
2390 @param master_node Master node name
2391 @type online_nodes: list
2392 @param online_nodes: List of names of online nodes
2395 self.feedback_fn = feedback_fn
2396 self.cluster_name = cluster_name
2397 self.master_node = master_node
2398 self.online_nodes = online_nodes
2400 self.ssh = ssh.SshRunner(self.cluster_name)
2402 self.nonmaster_nodes = [name for name in online_nodes
2403 if name != master_node]
2405 assert self.master_node not in self.nonmaster_nodes
2407 def _RunCmd(self, node_name, cmd):
2408 """Runs a command on the local or a remote machine.
2410 @type node_name: string
2411 @param node_name: Machine name
2416 if node_name is None or node_name == self.master_node:
2417 # No need to use SSH
2418 result = utils.RunCmd(cmd)
2420 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2423 errmsg = ["Failed to run command %s" % result.cmd]
2425 errmsg.append("on node %s" % node_name)
2426 errmsg.append(": exitcode %s and error %s" %
2427 (result.exit_code, result.output))
2428 raise errors.OpExecError(" ".join(errmsg))
2430 def Call(self, fn, *args):
2431 """Call function while all daemons are stopped.
2434 @param fn: Function to be called
2437 # Pause watcher by acquiring an exclusive lock on watcher state file
2438 self.feedback_fn("Blocking watcher")
2439 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2441 # TODO: Currently, this just blocks. There's no timeout.
2442 # TODO: Should it be a shared lock?
2443 watcher_block.Exclusive(blocking=True)
2445 # Stop master daemons, so that no new jobs can come in and all running
2447 self.feedback_fn("Stopping master daemons")
2448 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2450 # Stop daemons on all nodes
2451 for node_name in self.online_nodes:
2452 self.feedback_fn("Stopping daemons on %s" % node_name)
2453 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2455 # All daemons are shut down now
2457 return fn(self, *args)
2458 except Exception, err:
2459 _, errmsg = FormatError(err)
2460 logging.exception("Caught exception")
2461 self.feedback_fn(errmsg)
2464 # Start cluster again, master node last
2465 for node_name in self.nonmaster_nodes + [self.master_node]:
2466 self.feedback_fn("Starting daemons on %s" % node_name)
2467 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2470 watcher_block.Close()
2473 def RunWhileClusterStopped(feedback_fn, fn, *args):
2474 """Calls a function while all cluster daemons are stopped.
2476 @type feedback_fn: callable
2477 @param feedback_fn: Feedback function
2479 @param fn: Function to be called when daemons are stopped
2482 feedback_fn("Gathering cluster information")
2484 # This ensures we're running on the master daemon
2487 (cluster_name, master_node) = \
2488 cl.QueryConfigValues(["cluster_name", "master_node"])
2490 online_nodes = GetOnlineNodes([], cl=cl)
2492 # Don't keep a reference to the client. The master daemon will go away.
2495 assert master_node in online_nodes
2497 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2498 online_nodes).Call(fn, *args)
2501 def GenerateTable(headers, fields, separator, data,
2502 numfields=None, unitfields=None,
2504 """Prints a table with headers and different fields.
2507 @param headers: dictionary mapping field names to headers for
2510 @param fields: the field names corresponding to each row in
2512 @param separator: the separator to be used; if this is None,
2513 the default 'smart' algorithm is used which computes optimal
2514 field width, otherwise just the separator is used between
2517 @param data: a list of lists, each sublist being one row to be output
2518 @type numfields: list
2519 @param numfields: a list with the fields that hold numeric
2520 values and thus should be right-aligned
2521 @type unitfields: list
2522 @param unitfields: a list with the fields that hold numeric
2523 values that should be formatted with the units field
2524 @type units: string or None
2525 @param units: the units we should use for formatting, or None for
2526 automatic choice (human-readable for non-separator usage, otherwise
2527 megabytes); this is a one-letter string
2536 if numfields is None:
2538 if unitfields is None:
2541 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2542 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2545 for field in fields:
2546 if headers and field not in headers:
2547 # TODO: handle better unknown fields (either revert to old
2548 # style of raising exception, or deal more intelligently with
2550 headers[field] = field
2551 if separator is not None:
2552 format_fields.append("%s")
2553 elif numfields.Matches(field):
2554 format_fields.append("%*s")
2556 format_fields.append("%-*s")
2558 if separator is None:
2559 mlens = [0 for name in fields]
2560 format_str = " ".join(format_fields)
2562 format_str = separator.replace("%", "%%").join(format_fields)
2567 for idx, val in enumerate(row):
2568 if unitfields.Matches(fields[idx]):
2571 except (TypeError, ValueError):
2574 val = row[idx] = utils.FormatUnit(val, units)
2575 val = row[idx] = str(val)
2576 if separator is None:
2577 mlens[idx] = max(mlens[idx], len(val))
2582 for idx, name in enumerate(fields):
2584 if separator is None:
2585 mlens[idx] = max(mlens[idx], len(hdr))
2586 args.append(mlens[idx])
2588 result.append(format_str % tuple(args))
2590 if separator is None:
2591 assert len(mlens) == len(fields)
2593 if fields and not numfields.Matches(fields[-1]):
2599 line = ["-" for _ in fields]
2600 for idx in range(len(fields)):
2601 if separator is None:
2602 args.append(mlens[idx])
2603 args.append(line[idx])
2604 result.append(format_str % tuple(args))
2609 def _FormatBool(value):
2610 """Formats a boolean value as a string.
2618 #: Default formatting for query results; (callback, align right)
2619 _DEFAULT_FORMAT_QUERY = {
2620 constants.QFT_TEXT: (str, False),
2621 constants.QFT_BOOL: (_FormatBool, False),
2622 constants.QFT_NUMBER: (str, True),
2623 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2624 constants.QFT_OTHER: (str, False),
2625 constants.QFT_UNKNOWN: (str, False),
2629 def _GetColumnFormatter(fdef, override, unit):
2630 """Returns formatting function for a field.
2632 @type fdef: L{objects.QueryFieldDefinition}
2633 @type override: dict
2634 @param override: Dictionary for overriding field formatting functions,
2635 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2637 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2638 @rtype: tuple; (callable, bool)
2639 @return: Returns the function to format a value (takes one parameter) and a
2640 boolean for aligning the value on the right-hand side
2643 fmt = override.get(fdef.name, None)
2647 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2649 if fdef.kind == constants.QFT_UNIT:
2650 # Can't keep this information in the static dictionary
2651 return (lambda value: utils.FormatUnit(value, unit), True)
2653 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2657 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2660 class _QueryColumnFormatter:
2661 """Callable class for formatting fields of a query.
2664 def __init__(self, fn, status_fn, verbose):
2665 """Initializes this class.
2668 @param fn: Formatting function
2669 @type status_fn: callable
2670 @param status_fn: Function to report fields' status
2671 @type verbose: boolean
2672 @param verbose: whether to use verbose field descriptions or not
2676 self._status_fn = status_fn
2677 self._verbose = verbose
2679 def __call__(self, data):
2680 """Returns a field's string representation.
2683 (status, value) = data
2686 self._status_fn(status)
2688 if status == constants.RS_NORMAL:
2689 return self._fn(value)
2691 assert value is None, \
2692 "Found value %r for abnormal status %s" % (value, status)
2694 return FormatResultError(status, self._verbose)
2697 def FormatResultError(status, verbose):
2698 """Formats result status other than L{constants.RS_NORMAL}.
2700 @param status: The result status
2701 @type verbose: boolean
2702 @param verbose: Whether to return the verbose text
2703 @return: Text of result status
2706 assert status != constants.RS_NORMAL, \
2707 "FormatResultError called with status equal to constants.RS_NORMAL"
2709 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2711 raise NotImplementedError("Unknown status %s" % status)
2718 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2719 header=False, verbose=False):
2720 """Formats data in L{objects.QueryResponse}.
2722 @type result: L{objects.QueryResponse}
2723 @param result: result of query operation
2725 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2726 see L{utils.text.FormatUnit}
2727 @type format_override: dict
2728 @param format_override: Dictionary for overriding field formatting functions,
2729 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2730 @type separator: string or None
2731 @param separator: String used to separate fields
2733 @param header: Whether to output header row
2734 @type verbose: boolean
2735 @param verbose: whether to use verbose field descriptions or not
2744 if format_override is None:
2745 format_override = {}
2747 stats = dict.fromkeys(constants.RS_ALL, 0)
2749 def _RecordStatus(status):
2754 for fdef in result.fields:
2755 assert fdef.title and fdef.name
2756 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2757 columns.append(TableColumn(fdef.title,
2758 _QueryColumnFormatter(fn, _RecordStatus,
2762 table = FormatTable(result.data, columns, header, separator)
2764 # Collect statistics
2765 assert len(stats) == len(constants.RS_ALL)
2766 assert compat.all(count >= 0 for count in stats.values())
2768 # Determine overall status. If there was no data, unknown fields must be
2769 # detected via the field definitions.
2770 if (stats[constants.RS_UNKNOWN] or
2771 (not result.data and _GetUnknownFields(result.fields))):
2773 elif compat.any(count > 0 for key, count in stats.items()
2774 if key != constants.RS_NORMAL):
2775 status = QR_INCOMPLETE
2779 return (status, table)
2782 def _GetUnknownFields(fdefs):
2783 """Returns list of unknown fields included in C{fdefs}.
2785 @type fdefs: list of L{objects.QueryFieldDefinition}
2788 return [fdef for fdef in fdefs
2789 if fdef.kind == constants.QFT_UNKNOWN]
2792 def _WarnUnknownFields(fdefs):
2793 """Prints a warning to stderr if a query included unknown fields.
2795 @type fdefs: list of L{objects.QueryFieldDefinition}
2798 unknown = _GetUnknownFields(fdefs)
2800 ToStderr("Warning: Queried for unknown fields %s",
2801 utils.CommaJoin(fdef.name for fdef in unknown))
2807 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2808 format_override=None, verbose=False, force_filter=False,
2809 namefield=None, qfilter=None):
2810 """Generic implementation for listing all items of a resource.
2812 @param resource: One of L{constants.QR_VIA_LUXI}
2813 @type fields: list of strings
2814 @param fields: List of fields to query for
2815 @type names: list of strings
2816 @param names: Names of items to query for
2817 @type unit: string or None
2818 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2819 None for automatic choice (human-readable for non-separator usage,
2820 otherwise megabytes); this is a one-letter string
2821 @type separator: string or None
2822 @param separator: String used to separate fields
2824 @param header: Whether to show header row
2825 @type force_filter: bool
2826 @param force_filter: Whether to always treat names as filter
2827 @type format_override: dict
2828 @param format_override: Dictionary for overriding field formatting functions,
2829 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2830 @type verbose: boolean
2831 @param verbose: whether to use verbose field descriptions or not
2832 @type namefield: string
2833 @param namefield: Name of field to use for simple filters (see
2834 L{qlang.MakeFilter} for details)
2835 @type qfilter: list or None
2836 @param qfilter: Query filter (in addition to names)
2842 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2845 qfilter = namefilter
2846 elif namefilter is not None:
2847 qfilter = [qlang.OP_AND, namefilter, qfilter]
2852 response = cl.Query(resource, fields, qfilter)
2854 found_unknown = _WarnUnknownFields(response.fields)
2856 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2858 format_override=format_override,
2864 assert ((found_unknown and status == QR_UNKNOWN) or
2865 (not found_unknown and status != QR_UNKNOWN))
2867 if status == QR_UNKNOWN:
2868 return constants.EXIT_UNKNOWN_FIELD
2870 # TODO: Should the list command fail if not all data could be collected?
2871 return constants.EXIT_SUCCESS
2874 def GenericListFields(resource, fields, separator, header, cl=None):
2875 """Generic implementation for listing fields for a resource.
2877 @param resource: One of L{constants.QR_VIA_LUXI}
2878 @type fields: list of strings
2879 @param fields: List of fields to query for
2880 @type separator: string or None
2881 @param separator: String used to separate fields
2883 @param header: Whether to show header row
2892 response = cl.QueryFields(resource, fields)
2894 found_unknown = _WarnUnknownFields(response.fields)
2897 TableColumn("Name", str, False),
2898 TableColumn("Title", str, False),
2899 TableColumn("Description", str, False),
2902 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2904 for line in FormatTable(rows, columns, header, separator):
2908 return constants.EXIT_UNKNOWN_FIELD
2910 return constants.EXIT_SUCCESS
2914 """Describes a column for L{FormatTable}.
2917 def __init__(self, title, fn, align_right):
2918 """Initializes this class.
2921 @param title: Column title
2923 @param fn: Formatting function
2924 @type align_right: bool
2925 @param align_right: Whether to align values on the right-hand side
2930 self.align_right = align_right
2933 def _GetColFormatString(width, align_right):
2934 """Returns the format string for a field.
2942 return "%%%s%ss" % (sign, width)
2945 def FormatTable(rows, columns, header, separator):
2946 """Formats data as a table.
2948 @type rows: list of lists
2949 @param rows: Row data, one list per row
2950 @type columns: list of L{TableColumn}
2951 @param columns: Column descriptions
2953 @param header: Whether to show header row
2954 @type separator: string or None
2955 @param separator: String used to separate columns
2959 data = [[col.title for col in columns]]
2960 colwidth = [len(col.title) for col in columns]
2963 colwidth = [0 for _ in columns]
2967 assert len(row) == len(columns)
2969 formatted = [col.format(value) for value, col in zip(row, columns)]
2971 if separator is None:
2972 # Update column widths
2973 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2974 # Modifying a list's items while iterating is fine
2975 colwidth[idx] = max(oldwidth, len(value))
2977 data.append(formatted)
2979 if separator is not None:
2980 # Return early if a separator is used
2981 return [separator.join(row) for row in data]
2983 if columns and not columns[-1].align_right:
2984 # Avoid unnecessary spaces at end of line
2987 # Build format string
2988 fmt = " ".join([_GetColFormatString(width, col.align_right)
2989 for col, width in zip(columns, colwidth)])
2991 return [fmt % tuple(row) for row in data]
2994 def FormatTimestamp(ts):
2995 """Formats a given timestamp.
2998 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3001 @return: a string with the formatted timestamp
3004 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3008 return utils.FormatTime(sec, usecs=usecs)
3011 def ParseTimespec(value):
3012 """Parse a time specification.
3014 The following suffixed will be recognized:
3022 Without any suffix, the value will be taken to be in seconds.
3027 raise errors.OpPrereqError("Empty time specification passed")
3035 if value[-1] not in suffix_map:
3038 except (TypeError, ValueError):
3039 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3041 multiplier = suffix_map[value[-1]]
3043 if not value: # no data left after stripping the suffix
3044 raise errors.OpPrereqError("Invalid time specification (only"
3047 value = int(value) * multiplier
3048 except (TypeError, ValueError):
3049 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3053 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3054 filter_master=False, nodegroup=None):
3055 """Returns the names of online nodes.
3057 This function will also log a warning on stderr with the names of
3060 @param nodes: if not empty, use only this subset of nodes (minus the
3062 @param cl: if not None, luxi client to use
3063 @type nowarn: boolean
3064 @param nowarn: by default, this function will output a note with the
3065 offline nodes that are skipped; if this parameter is True the
3066 note is not displayed
3067 @type secondary_ips: boolean
3068 @param secondary_ips: if True, return the secondary IPs instead of the
3069 names, useful for doing network traffic over the replication interface
3071 @type filter_master: boolean
3072 @param filter_master: if True, do not return the master node in the list
3073 (useful in coordination with secondary_ips where we cannot check our
3074 node name against the list)
3075 @type nodegroup: string
3076 @param nodegroup: If set, only return nodes in this node group
3085 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3087 if nodegroup is not None:
3088 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3089 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3092 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3095 if len(qfilter) > 1:
3096 final_filter = [qlang.OP_AND] + qfilter
3098 assert len(qfilter) == 1
3099 final_filter = qfilter[0]
3103 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3105 def _IsOffline(row):
3106 (_, (_, offline), _) = row
3110 ((_, name), _, _) = row
3114 (_, _, (_, sip)) = row
3117 (offline, online) = compat.partition(result.data, _IsOffline)
3119 if offline and not nowarn:
3120 ToStderr("Note: skipping offline node(s): %s" %
3121 utils.CommaJoin(map(_GetName, offline)))
3128 return map(fn, online)
3131 def _ToStream(stream, txt, *args):
3132 """Write a message to a stream, bypassing the logging system
3134 @type stream: file object
3135 @param stream: the file to which we should write
3137 @param txt: the message
3143 stream.write(txt % args)
3148 except IOError, err:
3149 if err.errno == errno.EPIPE:
3150 # our terminal went away, we'll exit
3151 sys.exit(constants.EXIT_FAILURE)
3156 def ToStdout(txt, *args):
3157 """Write a message to stdout only, bypassing the logging system
3159 This is just a wrapper over _ToStream.
3162 @param txt: the message
3165 _ToStream(sys.stdout, txt, *args)
3168 def ToStderr(txt, *args):
3169 """Write a message to stderr only, bypassing the logging system
3171 This is just a wrapper over _ToStream.
3174 @param txt: the message
3177 _ToStream(sys.stderr, txt, *args)
3180 class JobExecutor(object):
3181 """Class which manages the submission and execution of multiple jobs.
3183 Note that instances of this class should not be reused between
3187 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3192 self.verbose = verbose
3195 self.feedback_fn = feedback_fn
3196 self._counter = itertools.count()
3199 def _IfName(name, fmt):
3200 """Helper function for formatting name.
3208 def QueueJob(self, name, *ops):
3209 """Record a job for later submit.
3212 @param name: a description of the job, will be used in WaitJobSet
3215 SetGenericOpcodeOpts(ops, self.opts)
3216 self.queue.append((self._counter.next(), name, ops))
3218 def AddJobId(self, name, status, job_id):
3219 """Adds a job ID to the internal queue.
3222 self.jobs.append((self._counter.next(), status, job_id, name))
3224 def SubmitPending(self, each=False):
3225 """Submit all pending jobs.
3230 for (_, _, ops) in self.queue:
3231 # SubmitJob will remove the success status, but raise an exception if
3232 # the submission fails, so we'll notice that anyway.
3233 results.append([True, self.cl.SubmitJob(ops)[0]])
3235 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3236 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3237 self.jobs.append((idx, status, data, name))
3239 def _ChooseJob(self):
3240 """Choose a non-waiting/queued job to poll next.
3243 assert self.jobs, "_ChooseJob called with empty job list"
3245 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3249 for job_data, status in zip(self.jobs, result):
3250 if (isinstance(status, list) and status and
3251 status[0] in (constants.JOB_STATUS_QUEUED,
3252 constants.JOB_STATUS_WAITING,
3253 constants.JOB_STATUS_CANCELING)):
3254 # job is still present and waiting
3256 # good candidate found (either running job or lost job)
3257 self.jobs.remove(job_data)
3261 return self.jobs.pop(0)
3263 def GetResults(self):
3264 """Wait for and return the results of all jobs.
3267 @return: list of tuples (success, job results), in the same order
3268 as the submitted jobs; if a job has failed, instead of the result
3269 there will be the error message
3273 self.SubmitPending()
3276 ok_jobs = [row[2] for row in self.jobs if row[1]]
3278 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3280 # first, remove any non-submitted jobs
3281 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3282 for idx, _, jid, name in failures:
3283 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3284 results.append((idx, False, jid))
3287 (idx, _, jid, name) = self._ChooseJob()
3288 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3290 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3292 except errors.JobLost, err:
3293 _, job_result = FormatError(err)
3294 ToStderr("Job %s%s has been archived, cannot check its result",
3295 jid, self._IfName(name, " for %s"))
3297 except (errors.GenericError, luxi.ProtocolError), err:
3298 _, job_result = FormatError(err)
3300 # the error message will always be shown, verbose or not
3301 ToStderr("Job %s%s has failed: %s",
3302 jid, self._IfName(name, " for %s"), job_result)
3304 results.append((idx, success, job_result))
3306 # sort based on the index, then drop it
3308 results = [i[1:] for i in results]
3312 def WaitOrShow(self, wait):
3313 """Wait for job results or only print the job IDs.
3316 @param wait: whether to wait or not
3320 return self.GetResults()
3323 self.SubmitPending()
3324 for _, status, result, name in self.jobs:
3326 ToStdout("%s: %s", result, name)
3328 ToStderr("Failure for %s: %s", name, result)
3329 return [row[1:3] for row in self.jobs]
3332 def FormatParameterDict(buf, param_dict, actual, level=1):
3333 """Formats a parameter dictionary.
3335 @type buf: L{StringIO}
3336 @param buf: the buffer into which to write
3337 @type param_dict: dict
3338 @param param_dict: the own parameters
3340 @param actual: the current parameter set (including defaults)
3341 @param level: Level of indent
3344 indent = " " * level
3346 for key in sorted(actual):
3348 buf.write("%s- %s:" % (indent, key))
3350 if isinstance(data, dict) and data:
3352 FormatParameterDict(buf, param_dict.get(key, {}), data,
3355 val = param_dict.get(key, "default (%s)" % data)
3356 buf.write(" %s\n" % val)
3359 def ConfirmOperation(names, list_type, text, extra=""):
3360 """Ask the user to confirm an operation on a list of list_type.
3362 This function is used to request confirmation for doing an operation
3363 on a given list of list_type.
3366 @param names: the list of names that we display when
3367 we ask for confirmation
3368 @type list_type: str
3369 @param list_type: Human readable name for elements in the list (e.g. nodes)
3371 @param text: the operation that the user should confirm
3373 @return: True or False depending on user's confirmation.
3377 msg = ("The %s will operate on %d %s.\n%s"
3378 "Do you want to continue?" % (text, count, list_type, extra))
3379 affected = (("\nAffected %s:\n" % list_type) +
3380 "\n".join([" %s" % name for name in names]))
3382 choices = [("y", True, "Yes, execute the %s" % text),
3383 ("n", False, "No, abort the %s" % text)]
3386 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3389 question = msg + affected
3391 choice = AskUser(question, choices)
3394 choice = AskUser(msg + affected, choices)