4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
135 "NORUNTIME_CHGS_OPT",
138 "NOSSH_KEYCHECK_OPT",
152 "PREALLOC_WIPE_DISKS_OPT",
153 "PRIMARY_IP_VERSION_OPT",
159 "REMOVE_INSTANCE_OPT",
165 "SECONDARY_ONLY_OPT",
169 "SHUTDOWN_TIMEOUT_OPT",
171 "SPECS_CPU_COUNT_OPT",
172 "SPECS_DISK_COUNT_OPT",
173 "SPECS_DISK_SIZE_OPT",
174 "SPECS_MEM_SIZE_OPT",
175 "SPECS_NIC_COUNT_OPT",
176 "IPOLICY_DISK_TEMPLATES",
177 "IPOLICY_VCPU_RATIO",
183 "STARTUP_PAUSED_OPT",
192 "USE_EXTERNAL_MIP_SCRIPT",
199 "IGNORE_IPOLICY_OPT",
200 "INSTANCE_POLICY_OPTS",
201 # Generic functions for CLI programs
204 "GenericInstanceCreate",
210 "JobSubmittedException",
212 "RunWhileClusterStopped",
216 # Formatting functions
217 "ToStderr", "ToStdout",
220 "FormatParameterDict",
229 # command line options support infrastructure
230 "ARGS_MANY_INSTANCES",
249 "OPT_COMPL_INST_ADD_NODES",
250 "OPT_COMPL_MANY_NODES",
251 "OPT_COMPL_ONE_IALLOCATOR",
252 "OPT_COMPL_ONE_INSTANCE",
253 "OPT_COMPL_ONE_NODE",
254 "OPT_COMPL_ONE_NODEGROUP",
260 "COMMON_CREATE_OPTS",
266 #: Priorities (sorted)
268 ("low", constants.OP_PRIO_LOW),
269 ("normal", constants.OP_PRIO_NORMAL),
270 ("high", constants.OP_PRIO_HIGH),
273 #: Priority dictionary for easier lookup
274 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
275 # we migrate to Python 2.6
276 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
278 # Query result status for clients
281 QR_INCOMPLETE) = range(3)
283 #: Maximum batch size for ChooseJob
288 def __init__(self, min=0, max=None): # pylint: disable=W0622
293 return ("<%s min=%s max=%s>" %
294 (self.__class__.__name__, self.min, self.max))
297 class ArgSuggest(_Argument):
298 """Suggesting argument.
300 Value can be any of the ones passed to the constructor.
303 # pylint: disable=W0622
304 def __init__(self, min=0, max=None, choices=None):
305 _Argument.__init__(self, min=min, max=max)
306 self.choices = choices
309 return ("<%s min=%s max=%s choices=%r>" %
310 (self.__class__.__name__, self.min, self.max, self.choices))
313 class ArgChoice(ArgSuggest):
316 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
317 but value must be one of the choices.
322 class ArgUnknown(_Argument):
323 """Unknown argument to program (e.g. determined at runtime).
328 class ArgInstance(_Argument):
329 """Instances argument.
334 class ArgNode(_Argument):
340 class ArgGroup(_Argument):
341 """Node group argument.
346 class ArgJobId(_Argument):
352 class ArgFile(_Argument):
353 """File path argument.
358 class ArgCommand(_Argument):
364 class ArgHost(_Argument):
370 class ArgOs(_Argument):
377 ARGS_MANY_INSTANCES = [ArgInstance()]
378 ARGS_MANY_NODES = [ArgNode()]
379 ARGS_MANY_GROUPS = [ArgGroup()]
380 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
381 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
383 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
384 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
387 def _ExtractTagsObject(opts, args):
388 """Extract the tag type object.
390 Note that this function will modify its args parameter.
393 if not hasattr(opts, "tag_type"):
394 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
396 if kind == constants.TAG_CLUSTER:
398 elif kind in (constants.TAG_NODEGROUP,
400 constants.TAG_INSTANCE):
402 raise errors.OpPrereqError("no arguments passed to the command")
406 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
410 def _ExtendTags(opts, args):
411 """Extend the args if a source file has been given.
413 This function will extend the tags with the contents of the file
414 passed in the 'tags_source' attribute of the opts parameter. A file
415 named '-' will be replaced by stdin.
418 fname = opts.tags_source
424 new_fh = open(fname, "r")
427 # we don't use the nice 'new_data = [line.strip() for line in fh]'
428 # because of python bug 1633941
430 line = new_fh.readline()
433 new_data.append(line.strip())
436 args.extend(new_data)
439 def ListTags(opts, args):
440 """List the tags on a given object.
442 This is a generic implementation that knows how to deal with all
443 three cases of tag objects (cluster, node, instance). The opts
444 argument is expected to contain a tag_type field denoting what
445 object type we work on.
448 kind, name = _ExtractTagsObject(opts, args)
450 result = cl.QueryTags(kind, name)
451 result = list(result)
457 def AddTags(opts, args):
458 """Add tags on a given object.
460 This is a generic implementation that knows how to deal with all
461 three cases of tag objects (cluster, node, instance). The opts
462 argument is expected to contain a tag_type field denoting what
463 object type we work on.
466 kind, name = _ExtractTagsObject(opts, args)
467 _ExtendTags(opts, args)
469 raise errors.OpPrereqError("No tags to be added")
470 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
471 SubmitOpCode(op, opts=opts)
474 def RemoveTags(opts, args):
475 """Remove tags from a given object.
477 This is a generic implementation that knows how to deal with all
478 three cases of tag objects (cluster, node, instance). The opts
479 argument is expected to contain a tag_type field denoting what
480 object type we work on.
483 kind, name = _ExtractTagsObject(opts, args)
484 _ExtendTags(opts, args)
486 raise errors.OpPrereqError("No tags to be removed")
487 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
488 SubmitOpCode(op, opts=opts)
491 def check_unit(option, opt, value): # pylint: disable=W0613
492 """OptParsers custom converter for units.
496 return utils.ParseUnit(value)
497 except errors.UnitParseError, err:
498 raise OptionValueError("option %s: %s" % (opt, err))
501 def _SplitKeyVal(opt, data):
502 """Convert a KeyVal string into a dict.
504 This function will convert a key=val[,...] string into a dict. Empty
505 values will be converted specially: keys which have the prefix 'no_'
506 will have the value=False and the prefix stripped, the others will
510 @param opt: a string holding the option name for which we process the
511 data, used in building error messages
513 @param data: a string of the format key=val,key=val,...
515 @return: {key=val, key=val}
516 @raises errors.ParameterError: if there are duplicate keys
521 for elem in utils.UnescapeAndSplit(data, sep=","):
523 key, val = elem.split("=", 1)
525 if elem.startswith(NO_PREFIX):
526 key, val = elem[len(NO_PREFIX):], False
527 elif elem.startswith(UN_PREFIX):
528 key, val = elem[len(UN_PREFIX):], None
530 key, val = elem, True
532 raise errors.ParameterError("Duplicate key '%s' in option %s" %
538 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
539 """Custom parser for ident:key=val,key=val options.
541 This will store the parsed values as a tuple (ident, {key: val}). As such,
542 multiple uses of this option via action=append is possible.
546 ident, rest = value, ""
548 ident, rest = value.split(":", 1)
550 if ident.startswith(NO_PREFIX):
552 msg = "Cannot pass options when removing parameter groups: %s" % value
553 raise errors.ParameterError(msg)
554 retval = (ident[len(NO_PREFIX):], False)
555 elif (ident.startswith(UN_PREFIX) and
556 (len(ident) <= len(UN_PREFIX) or
557 not ident[len(UN_PREFIX)][0].isdigit())):
559 msg = "Cannot pass options when removing parameter groups: %s" % value
560 raise errors.ParameterError(msg)
561 retval = (ident[len(UN_PREFIX):], None)
563 kv_dict = _SplitKeyVal(opt, rest)
564 retval = (ident, kv_dict)
568 def check_key_val(option, opt, value): # pylint: disable=W0613
569 """Custom parser class for key=val,key=val options.
571 This will store the parsed values as a dict {key: val}.
574 return _SplitKeyVal(opt, value)
577 def check_bool(option, opt, value): # pylint: disable=W0613
578 """Custom parser for yes/no options.
580 This will store the parsed value as either True or False.
583 value = value.lower()
584 if value == constants.VALUE_FALSE or value == "no":
586 elif value == constants.VALUE_TRUE or value == "yes":
589 raise errors.ParameterError("Invalid boolean value '%s'" % value)
592 def check_list(option, opt, value): # pylint: disable=W0613
593 """Custom parser for comma-separated lists.
596 # we have to make this explicit check since "".split(",") is [""],
597 # not an empty list :(
601 return utils.UnescapeAndSplit(value)
604 # completion_suggestion is normally a list. Using numeric values not evaluating
605 # to False for dynamic completion.
606 (OPT_COMPL_MANY_NODES,
608 OPT_COMPL_ONE_INSTANCE,
610 OPT_COMPL_ONE_IALLOCATOR,
611 OPT_COMPL_INST_ADD_NODES,
612 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
614 OPT_COMPL_ALL = frozenset([
615 OPT_COMPL_MANY_NODES,
617 OPT_COMPL_ONE_INSTANCE,
619 OPT_COMPL_ONE_IALLOCATOR,
620 OPT_COMPL_INST_ADD_NODES,
621 OPT_COMPL_ONE_NODEGROUP,
625 class CliOption(Option):
626 """Custom option class for optparse.
629 ATTRS = Option.ATTRS + [
630 "completion_suggest",
632 TYPES = Option.TYPES + (
639 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
640 TYPE_CHECKER["identkeyval"] = check_ident_key_val
641 TYPE_CHECKER["keyval"] = check_key_val
642 TYPE_CHECKER["unit"] = check_unit
643 TYPE_CHECKER["bool"] = check_bool
644 TYPE_CHECKER["list"] = check_list
647 # optparse.py sets make_option, so we do it for our own option class, too
648 cli_option = CliOption
653 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
654 help="Increase debugging level")
656 NOHDR_OPT = cli_option("--no-headers", default=False,
657 action="store_true", dest="no_headers",
658 help="Don't display column headers")
660 SEP_OPT = cli_option("--separator", default=None,
661 action="store", dest="separator",
662 help=("Separator between output fields"
663 " (defaults to one space)"))
665 USEUNITS_OPT = cli_option("--units", default=None,
666 dest="units", choices=("h", "m", "g", "t"),
667 help="Specify units for output (one of h/m/g/t)")
669 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
670 type="string", metavar="FIELDS",
671 help="Comma separated list of output fields")
673 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
674 default=False, help="Force the operation")
676 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
677 default=False, help="Do not require confirmation")
679 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
680 action="store_true", default=False,
681 help=("Ignore offline nodes and do as much"
684 TAG_ADD_OPT = cli_option("--tags", dest="tags",
685 default=None, help="Comma-separated list of instance"
688 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
689 default=None, help="File with tag names")
691 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
692 default=False, action="store_true",
693 help=("Submit the job and return the job ID, but"
694 " don't wait for the job to finish"))
696 SYNC_OPT = cli_option("--sync", dest="do_locking",
697 default=False, action="store_true",
698 help=("Grab locks while doing the queries"
699 " in order to ensure more consistent results"))
701 DRY_RUN_OPT = cli_option("--dry-run", default=False,
703 help=("Do not execute the operation, just run the"
704 " check steps and verify it it could be"
707 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
709 help="Increase the verbosity of the operation")
711 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
712 action="store_true", dest="simulate_errors",
713 help="Debugging option that makes the operation"
714 " treat most runtime checks as failed")
716 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
717 default=True, action="store_false",
718 help="Don't wait for sync (DANGEROUS!)")
720 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
721 action="store_true", default=False,
722 help="Enable offline instance")
724 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
725 action="store_true", default=False,
726 help="Disable down instance")
728 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
729 help=("Custom disk setup (%s)" %
730 utils.CommaJoin(constants.DISK_TEMPLATES)),
731 default=None, metavar="TEMPL",
732 choices=list(constants.DISK_TEMPLATES))
734 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
735 help="Do not create any network cards for"
738 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
739 help="Relative path under default cluster-wide"
740 " file storage dir to store file-based disks",
741 default=None, metavar="<DIR>")
743 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
744 help="Driver to use for image files",
745 default="loop", metavar="<DRIVER>",
746 choices=list(constants.FILE_DRIVER))
748 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
749 help="Select nodes for the instance automatically"
750 " using the <NAME> iallocator plugin",
751 default=None, type="string",
752 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
754 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
756 help="Set the default instance allocator plugin",
757 default=None, type="string",
758 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
760 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
762 completion_suggest=OPT_COMPL_ONE_OS)
764 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
765 type="keyval", default={},
766 help="OS parameters")
768 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
769 action="store_true", default=False,
770 help="Force an unknown variant")
772 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
773 action="store_true", default=False,
774 help="Do not install the OS (will"
777 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
778 dest="allow_runtime_chgs",
779 default=True, action="store_false",
780 help="Don't allow runtime changes")
782 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
783 type="keyval", default={},
784 help="Backend parameters")
786 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
787 default={}, dest="hvparams",
788 help="Hypervisor parameters")
790 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
791 help="Disk template parameters, in the format"
792 " template:option=value,option=value,...",
793 type="identkeyval", action="append", default=[])
795 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
796 type="keyval", default={},
797 help="Memory count specs: min, max, std"
800 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
801 type="keyval", default={},
802 help="CPU count specs: min, max, std")
804 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
805 dest="ispecs_disk_count",
806 type="keyval", default={},
807 help="Disk count specs: min, max, std")
809 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
810 type="keyval", default={},
811 help="Disk size specs: min, max, std (in MB)")
813 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
814 type="keyval", default={},
815 help="NIC count specs: min, max, std")
817 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
818 dest="ipolicy_disk_templates",
819 type="list", default=None,
820 help="Comma-separated list of"
821 " enabled disk templates")
823 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
824 dest="ipolicy_vcpu_ratio",
825 type="float", default=None,
826 help="The maximum allowed vcpu-to-cpu ratio")
828 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
829 help="Hypervisor and hypervisor options, in the"
830 " format hypervisor:option=value,option=value,...",
831 default=None, type="identkeyval")
833 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
834 help="Hypervisor and hypervisor options, in the"
835 " format hypervisor:option=value,option=value,...",
836 default=[], action="append", type="identkeyval")
838 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
839 action="store_false",
840 help="Don't check that the instance's IP"
843 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
844 default=True, action="store_false",
845 help="Don't check that the instance's name"
848 NET_OPT = cli_option("--net",
849 help="NIC parameters", default=[],
850 dest="nics", action="append", type="identkeyval")
852 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
853 dest="disks", action="append", type="identkeyval")
855 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
856 help="Comma-separated list of disks"
857 " indices to act on (e.g. 0,2) (optional,"
858 " defaults to all disks)")
860 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
861 help="Enforces a single-disk configuration using the"
862 " given disk size, in MiB unless a suffix is used",
863 default=None, type="unit", metavar="<size>")
865 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
866 dest="ignore_consistency",
867 action="store_true", default=False,
868 help="Ignore the consistency of the disks on"
871 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
872 dest="allow_failover",
873 action="store_true", default=False,
874 help="If migration is not possible fallback to"
877 NONLIVE_OPT = cli_option("--non-live", dest="live",
878 default=True, action="store_false",
879 help="Do a non-live migration (this usually means"
880 " freeze the instance, save the state, transfer and"
881 " only then resume running on the secondary node)")
883 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
885 choices=list(constants.HT_MIGRATION_MODES),
886 help="Override default migration mode (choose"
887 " either live or non-live")
889 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
890 help="Target node and optional secondary node",
891 metavar="<pnode>[:<snode>]",
892 completion_suggest=OPT_COMPL_INST_ADD_NODES)
894 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
895 action="append", metavar="<node>",
896 help="Use only this node (can be used multiple"
897 " times, if not given defaults to all nodes)",
898 completion_suggest=OPT_COMPL_ONE_NODE)
900 NODEGROUP_OPT_NAME = "--node-group"
901 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
903 help="Node group (name or uuid)",
904 metavar="<nodegroup>",
905 default=None, type="string",
906 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
908 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
910 completion_suggest=OPT_COMPL_ONE_NODE)
912 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
913 action="store_false",
914 help="Don't start the instance after creation")
916 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
917 action="store_true", default=False,
918 help="Show command instead of executing it")
920 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
921 default=False, action="store_true",
922 help="Instead of performing the migration, try to"
923 " recover from a failed cleanup. This is safe"
924 " to run even if the instance is healthy, but it"
925 " will create extra replication traffic and "
926 " disrupt briefly the replication (like during the"
929 STATIC_OPT = cli_option("-s", "--static", dest="static",
930 action="store_true", default=False,
931 help="Only show configuration data, not runtime data")
933 ALL_OPT = cli_option("--all", dest="show_all",
934 default=False, action="store_true",
935 help="Show info on all instances on the cluster."
936 " This can take a long time to run, use wisely")
938 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
939 action="store_true", default=False,
940 help="Interactive OS reinstall, lists available"
941 " OS templates for selection")
943 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
944 action="store_true", default=False,
945 help="Remove the instance from the cluster"
946 " configuration even if there are failures"
947 " during the removal process")
949 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
950 dest="ignore_remove_failures",
951 action="store_true", default=False,
952 help="Remove the instance from the"
953 " cluster configuration even if there"
954 " are failures during the removal"
957 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
958 action="store_true", default=False,
959 help="Remove the instance from the cluster")
961 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
962 help="Specifies the new node for the instance",
963 metavar="NODE", default=None,
964 completion_suggest=OPT_COMPL_ONE_NODE)
966 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
967 help="Specifies the new secondary node",
968 metavar="NODE", default=None,
969 completion_suggest=OPT_COMPL_ONE_NODE)
971 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
972 default=False, action="store_true",
973 help="Replace the disk(s) on the primary"
974 " node (applies only to internally mirrored"
975 " disk templates, e.g. %s)" %
976 utils.CommaJoin(constants.DTS_INT_MIRROR))
978 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
979 default=False, action="store_true",
980 help="Replace the disk(s) on the secondary"
981 " node (applies only to internally mirrored"
982 " disk templates, e.g. %s)" %
983 utils.CommaJoin(constants.DTS_INT_MIRROR))
985 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
986 default=False, action="store_true",
987 help="Lock all nodes and auto-promote as needed"
990 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
991 default=False, action="store_true",
992 help="Automatically replace faulty disks"
993 " (applies only to internally mirrored"
994 " disk templates, e.g. %s)" %
995 utils.CommaJoin(constants.DTS_INT_MIRROR))
997 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
998 default=False, action="store_true",
999 help="Ignore current recorded size"
1000 " (useful for forcing activation when"
1001 " the recorded size is wrong)")
1003 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1005 completion_suggest=OPT_COMPL_ONE_NODE)
1007 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1010 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1011 help="Specify the secondary ip for the node",
1012 metavar="ADDRESS", default=None)
1014 READD_OPT = cli_option("--readd", dest="readd",
1015 default=False, action="store_true",
1016 help="Readd old node after replacing it")
1018 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1019 default=True, action="store_false",
1020 help="Disable SSH key fingerprint checking")
1022 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1023 default=False, action="store_true",
1024 help="Force the joining of a node")
1026 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1027 type="bool", default=None, metavar=_YORNO,
1028 help="Set the master_candidate flag on the node")
1030 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1031 type="bool", default=None,
1032 help=("Set the offline flag on the node"
1033 " (cluster does not communicate with offline"
1036 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1037 type="bool", default=None,
1038 help=("Set the drained flag on the node"
1039 " (excluded from allocation operations)"))
1041 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1042 type="bool", default=None, metavar=_YORNO,
1043 help="Set the master_capable flag on the node")
1045 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1046 type="bool", default=None, metavar=_YORNO,
1047 help="Set the vm_capable flag on the node")
1049 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1050 type="bool", default=None, metavar=_YORNO,
1051 help="Set the allocatable flag on a volume")
1053 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1054 help="Disable support for lvm based instances"
1056 action="store_false", default=True)
1058 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1059 dest="enabled_hypervisors",
1060 help="Comma-separated list of hypervisors",
1061 type="string", default=None)
1063 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1064 type="keyval", default={},
1065 help="NIC parameters")
1067 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1068 dest="candidate_pool_size", type="int",
1069 help="Set the candidate pool size")
1071 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1072 help=("Enables LVM and specifies the volume group"
1073 " name (cluster-wide) for disk allocation"
1074 " [%s]" % constants.DEFAULT_VG),
1075 metavar="VG", default=None)
1077 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1078 help="Destroy cluster", action="store_true")
1080 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1081 help="Skip node agreement check (dangerous)",
1082 action="store_true", default=False)
1084 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1085 help="Specify the mac prefix for the instance IP"
1086 " addresses, in the format XX:XX:XX",
1090 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1091 help="Specify the node interface (cluster-wide)"
1092 " on which the master IP address will be added"
1093 " (cluster init default: %s)" %
1094 constants.DEFAULT_BRIDGE,
1098 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1099 help="Specify the netmask of the master IP",
1103 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1104 dest="use_external_mip_script",
1105 help="Specify whether to run a user-provided"
1106 " script for the master IP address turnup and"
1107 " turndown operations",
1108 type="bool", metavar=_YORNO, default=None)
1110 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1111 help="Specify the default directory (cluster-"
1112 "wide) for storing the file-based disks [%s]" %
1113 constants.DEFAULT_FILE_STORAGE_DIR,
1115 default=constants.DEFAULT_FILE_STORAGE_DIR)
1117 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1118 dest="shared_file_storage_dir",
1119 help="Specify the default directory (cluster-"
1120 "wide) for storing the shared file-based"
1122 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1123 metavar="SHAREDDIR",
1124 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1126 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1127 help="Don't modify /etc/hosts",
1128 action="store_false", default=True)
1130 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1131 help="Don't initialize SSH keys",
1132 action="store_false", default=True)
1134 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1135 help="Enable parseable error messages",
1136 action="store_true", default=False)
1138 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1139 help="Skip N+1 memory redundancy tests",
1140 action="store_true", default=False)
1142 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1143 help="Type of reboot: soft/hard/full",
1144 default=constants.INSTANCE_REBOOT_HARD,
1146 choices=list(constants.REBOOT_TYPES))
1148 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1149 dest="ignore_secondaries",
1150 default=False, action="store_true",
1151 help="Ignore errors from secondaries")
1153 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1154 action="store_false", default=True,
1155 help="Don't shutdown the instance (unsafe)")
1157 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1158 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1159 help="Maximum time to wait")
1161 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1162 dest="shutdown_timeout", type="int",
1163 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1164 help="Maximum time to wait for instance shutdown")
1166 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1168 help=("Number of seconds between repetions of the"
1171 EARLY_RELEASE_OPT = cli_option("--early-release",
1172 dest="early_release", default=False,
1173 action="store_true",
1174 help="Release the locks on the secondary"
1177 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1178 dest="new_cluster_cert",
1179 default=False, action="store_true",
1180 help="Generate a new cluster certificate")
1182 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1184 help="File containing new RAPI certificate")
1186 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1187 default=None, action="store_true",
1188 help=("Generate a new self-signed RAPI"
1191 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1193 help="File containing new SPICE certificate")
1195 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1197 help="File containing the certificate of the CA"
1198 " which signed the SPICE certificate")
1200 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1201 dest="new_spice_cert", default=None,
1202 action="store_true",
1203 help=("Generate a new self-signed SPICE"
1206 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1207 dest="new_confd_hmac_key",
1208 default=False, action="store_true",
1209 help=("Create a new HMAC key for %s" %
1212 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1213 dest="cluster_domain_secret",
1215 help=("Load new new cluster domain"
1216 " secret from file"))
1218 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1219 dest="new_cluster_domain_secret",
1220 default=False, action="store_true",
1221 help=("Create a new cluster domain"
1224 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1225 dest="use_replication_network",
1226 help="Whether to use the replication network"
1227 " for talking to the nodes",
1228 action="store_true", default=False)
1230 MAINTAIN_NODE_HEALTH_OPT = \
1231 cli_option("--maintain-node-health", dest="maintain_node_health",
1232 metavar=_YORNO, default=None, type="bool",
1233 help="Configure the cluster to automatically maintain node"
1234 " health, by shutting down unknown instances, shutting down"
1235 " unknown DRBD devices, etc.")
1237 IDENTIFY_DEFAULTS_OPT = \
1238 cli_option("--identify-defaults", dest="identify_defaults",
1239 default=False, action="store_true",
1240 help="Identify which saved instance parameters are equal to"
1241 " the current cluster defaults and set them as such, instead"
1242 " of marking them as overridden")
1244 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1245 action="store", dest="uid_pool",
1246 help=("A list of user-ids or user-id"
1247 " ranges separated by commas"))
1249 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1250 action="store", dest="add_uids",
1251 help=("A list of user-ids or user-id"
1252 " ranges separated by commas, to be"
1253 " added to the user-id pool"))
1255 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1256 action="store", dest="remove_uids",
1257 help=("A list of user-ids or user-id"
1258 " ranges separated by commas, to be"
1259 " removed from the user-id pool"))
1261 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1262 action="store", dest="reserved_lvs",
1263 help=("A comma-separated list of reserved"
1264 " logical volumes names, that will be"
1265 " ignored by cluster verify"))
1267 ROMAN_OPT = cli_option("--roman",
1268 dest="roman_integers", default=False,
1269 action="store_true",
1270 help="Use roman numbers for positive integers")
1272 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1273 action="store", default=None,
1274 help="Specifies usermode helper for DRBD")
1276 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1277 action="store_false", default=True,
1278 help="Disable support for DRBD")
1280 PRIMARY_IP_VERSION_OPT = \
1281 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1282 action="store", dest="primary_ip_version",
1283 metavar="%d|%d" % (constants.IP4_VERSION,
1284 constants.IP6_VERSION),
1285 help="Cluster-wide IP version for primary IP")
1287 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1288 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1289 choices=_PRIONAME_TO_VALUE.keys(),
1290 help="Priority for opcode processing")
1292 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1293 type="bool", default=None, metavar=_YORNO,
1294 help="Sets the hidden flag on the OS")
1296 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1297 type="bool", default=None, metavar=_YORNO,
1298 help="Sets the blacklisted flag on the OS")
1300 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1301 type="bool", metavar=_YORNO,
1302 dest="prealloc_wipe_disks",
1303 help=("Wipe disks prior to instance"
1306 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1307 type="keyval", default=None,
1308 help="Node parameters")
1310 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1311 action="store", metavar="POLICY", default=None,
1312 help="Allocation policy for the node group")
1314 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1315 type="bool", metavar=_YORNO,
1316 dest="node_powered",
1317 help="Specify if the SoR for node is powered")
1319 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1320 default=constants.OOB_TIMEOUT,
1321 help="Maximum time to wait for out-of-band helper")
1323 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1324 default=constants.OOB_POWER_DELAY,
1325 help="Time in seconds to wait between power-ons")
1327 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1328 action="store_true", default=False,
1329 help=("Whether command argument should be treated"
1332 NO_REMEMBER_OPT = cli_option("--no-remember",
1334 action="store_true", default=False,
1335 help="Perform but do not record the change"
1336 " in the configuration")
1338 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1339 default=False, action="store_true",
1340 help="Evacuate primary instances only")
1342 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1343 default=False, action="store_true",
1344 help="Evacuate secondary instances only"
1345 " (applies only to internally mirrored"
1346 " disk templates, e.g. %s)" %
1347 utils.CommaJoin(constants.DTS_INT_MIRROR))
1349 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1350 action="store_true", default=False,
1351 help="Pause instance at startup")
1353 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1354 help="Destination node group (name or uuid)",
1355 default=None, action="append",
1356 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1358 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1359 action="append", dest="ignore_errors",
1360 choices=list(constants.CV_ALL_ECODES_STRINGS),
1361 help="Error code to be ignored")
1363 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1365 help=("Specify disk state information in the format"
1366 " storage_type/identifier:option=value,..."),
1369 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1371 help=("Specify hypervisor state information in the"
1372 " format hypervisor:option=value,..."),
1375 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1376 action="store_true", default=False,
1377 help="Ignore instance policy violations")
1379 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1380 help="Sets the instance's runtime memory,"
1381 " ballooning it up or down to the new value",
1382 default=None, type="unit", metavar="<size>")
1384 #: Options provided by all commands
1385 COMMON_OPTS = [DEBUG_OPT]
1387 # common options for creating instances. add and import then add their own
1389 COMMON_CREATE_OPTS = [
1394 FILESTORE_DRIVER_OPT,
1411 # common instance policy options
1412 INSTANCE_POLICY_OPTS = [
1413 SPECS_CPU_COUNT_OPT,
1414 SPECS_DISK_COUNT_OPT,
1415 SPECS_DISK_SIZE_OPT,
1417 SPECS_NIC_COUNT_OPT,
1418 IPOLICY_DISK_TEMPLATES,
1423 def _ParseArgs(argv, commands, aliases, env_override):
1424 """Parser for the command line arguments.
1426 This function parses the arguments and returns the function which
1427 must be executed together with its (modified) arguments.
1429 @param argv: the command line
1430 @param commands: dictionary with special contents, see the design
1431 doc for cmdline handling
1432 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1433 @param env_override: list of env variables allowed for default args
1436 assert not (env_override - set(commands))
1439 binary = "<command>"
1441 binary = argv[0].split("/")[-1]
1443 if len(argv) > 1 and argv[1] == "--version":
1444 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1445 constants.RELEASE_VERSION)
1446 # Quit right away. That way we don't have to care about this special
1447 # argument. optparse.py does it the same.
1450 if len(argv) < 2 or not (argv[1] in commands or
1451 argv[1] in aliases):
1452 # let's do a nice thing
1453 sortedcmds = commands.keys()
1456 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1457 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1460 # compute the max line length for cmd + usage
1461 mlen = max([len(" %s" % cmd) for cmd in commands])
1462 mlen = min(60, mlen) # should not get here...
1464 # and format a nice command list
1465 ToStdout("Commands:")
1466 for cmd in sortedcmds:
1467 cmdstr = " %s" % (cmd,)
1468 help_text = commands[cmd][4]
1469 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1470 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1471 for line in help_lines:
1472 ToStdout("%-*s %s", mlen, "", line)
1476 return None, None, None
1478 # get command, unalias it, and look it up in commands
1482 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1485 if aliases[cmd] not in commands:
1486 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1487 " command '%s'" % (cmd, aliases[cmd]))
1491 if cmd in env_override:
1492 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1493 env_args = os.environ.get(args_env_name)
1495 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1497 func, args_def, parser_opts, usage, description = commands[cmd]
1498 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1499 description=description,
1500 formatter=TitledHelpFormatter(),
1501 usage="%%prog %s %s" % (cmd, usage))
1502 parser.disable_interspersed_args()
1503 options, args = parser.parse_args(args=argv[1:])
1505 if not _CheckArguments(cmd, args_def, args):
1506 return None, None, None
1508 return func, options, args
1511 def _CheckArguments(cmd, args_def, args):
1512 """Verifies the arguments using the argument definition.
1516 1. Abort with error if values specified by user but none expected.
1518 1. For each argument in definition
1520 1. Keep running count of minimum number of values (min_count)
1521 1. Keep running count of maximum number of values (max_count)
1522 1. If it has an unlimited number of values
1524 1. Abort with error if it's not the last argument in the definition
1526 1. If last argument has limited number of values
1528 1. Abort with error if number of values doesn't match or is too large
1530 1. Abort with error if user didn't pass enough values (min_count)
1533 if args and not args_def:
1534 ToStderr("Error: Command %s expects no arguments", cmd)
1541 last_idx = len(args_def) - 1
1543 for idx, arg in enumerate(args_def):
1544 if min_count is None:
1546 elif arg.min is not None:
1547 min_count += arg.min
1549 if max_count is None:
1551 elif arg.max is not None:
1552 max_count += arg.max
1555 check_max = (arg.max is not None)
1557 elif arg.max is None:
1558 raise errors.ProgrammerError("Only the last argument can have max=None")
1561 # Command with exact number of arguments
1562 if (min_count is not None and max_count is not None and
1563 min_count == max_count and len(args) != min_count):
1564 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1567 # Command with limited number of arguments
1568 if max_count is not None and len(args) > max_count:
1569 ToStderr("Error: Command %s expects only %d argument(s)",
1573 # Command with some required arguments
1574 if min_count is not None and len(args) < min_count:
1575 ToStderr("Error: Command %s expects at least %d argument(s)",
1582 def SplitNodeOption(value):
1583 """Splits the value of a --node option.
1586 if value and ":" in value:
1587 return value.split(":", 1)
1589 return (value, None)
1592 def CalculateOSNames(os_name, os_variants):
1593 """Calculates all the names an OS can be called, according to its variants.
1595 @type os_name: string
1596 @param os_name: base name of the os
1597 @type os_variants: list or None
1598 @param os_variants: list of supported variants
1600 @return: list of valid names
1604 return ["%s+%s" % (os_name, v) for v in os_variants]
1609 def ParseFields(selected, default):
1610 """Parses the values of "--field"-like options.
1612 @type selected: string or None
1613 @param selected: User-selected options
1615 @param default: Default fields
1618 if selected is None:
1621 if selected.startswith("+"):
1622 return default + selected[1:].split(",")
1624 return selected.split(",")
1627 UsesRPC = rpc.RunWithRPC
1630 def AskUser(text, choices=None):
1631 """Ask the user a question.
1633 @param text: the question to ask
1635 @param choices: list with elements tuples (input_char, return_value,
1636 description); if not given, it will default to: [('y', True,
1637 'Perform the operation'), ('n', False, 'Do no do the operation')];
1638 note that the '?' char is reserved for help
1640 @return: one of the return values from the choices list; if input is
1641 not possible (i.e. not running with a tty, we return the last
1646 choices = [("y", True, "Perform the operation"),
1647 ("n", False, "Do not perform the operation")]
1648 if not choices or not isinstance(choices, list):
1649 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1650 for entry in choices:
1651 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1652 raise errors.ProgrammerError("Invalid choices element to AskUser")
1654 answer = choices[-1][1]
1656 for line in text.splitlines():
1657 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1658 text = "\n".join(new_text)
1660 f = file("/dev/tty", "a+")
1664 chars = [entry[0] for entry in choices]
1665 chars[-1] = "[%s]" % chars[-1]
1667 maps = dict([(entry[0], entry[1]) for entry in choices])
1671 f.write("/".join(chars))
1673 line = f.readline(2).strip().lower()
1678 for entry in choices:
1679 f.write(" %s - %s\n" % (entry[0], entry[2]))
1687 class JobSubmittedException(Exception):
1688 """Job was submitted, client should exit.
1690 This exception has one argument, the ID of the job that was
1691 submitted. The handler should print this ID.
1693 This is not an error, just a structured way to exit from clients.
1698 def SendJob(ops, cl=None):
1699 """Function to submit an opcode without waiting for the results.
1702 @param ops: list of opcodes
1703 @type cl: luxi.Client
1704 @param cl: the luxi client to use for communicating with the master;
1705 if None, a new client will be created
1711 job_id = cl.SubmitJob(ops)
1716 def GenericPollJob(job_id, cbs, report_cbs):
1717 """Generic job-polling function.
1719 @type job_id: number
1720 @param job_id: Job ID
1721 @type cbs: Instance of L{JobPollCbBase}
1722 @param cbs: Data callbacks
1723 @type report_cbs: Instance of L{JobPollReportCbBase}
1724 @param report_cbs: Reporting callbacks
1727 prev_job_info = None
1728 prev_logmsg_serial = None
1733 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1736 # job not found, go away!
1737 raise errors.JobLost("Job with id %s lost" % job_id)
1739 if result == constants.JOB_NOTCHANGED:
1740 report_cbs.ReportNotChanged(job_id, status)
1745 # Split result, a tuple of (field values, log entries)
1746 (job_info, log_entries) = result
1747 (status, ) = job_info
1750 for log_entry in log_entries:
1751 (serial, timestamp, log_type, message) = log_entry
1752 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1754 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1756 # TODO: Handle canceled and archived jobs
1757 elif status in (constants.JOB_STATUS_SUCCESS,
1758 constants.JOB_STATUS_ERROR,
1759 constants.JOB_STATUS_CANCELING,
1760 constants.JOB_STATUS_CANCELED):
1763 prev_job_info = job_info
1765 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1767 raise errors.JobLost("Job with id %s lost" % job_id)
1769 status, opstatus, result = jobs[0]
1771 if status == constants.JOB_STATUS_SUCCESS:
1774 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1775 raise errors.OpExecError("Job was canceled")
1778 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1779 if status == constants.OP_STATUS_SUCCESS:
1781 elif status == constants.OP_STATUS_ERROR:
1782 errors.MaybeRaise(msg)
1785 raise errors.OpExecError("partial failure (opcode %d): %s" %
1788 raise errors.OpExecError(str(msg))
1790 # default failure mode
1791 raise errors.OpExecError(result)
1794 class JobPollCbBase:
1795 """Base class for L{GenericPollJob} callbacks.
1799 """Initializes this class.
1803 def WaitForJobChangeOnce(self, job_id, fields,
1804 prev_job_info, prev_log_serial):
1805 """Waits for changes on a job.
1808 raise NotImplementedError()
1810 def QueryJobs(self, job_ids, fields):
1811 """Returns the selected fields for the selected job IDs.
1813 @type job_ids: list of numbers
1814 @param job_ids: Job IDs
1815 @type fields: list of strings
1816 @param fields: Fields
1819 raise NotImplementedError()
1822 class JobPollReportCbBase:
1823 """Base class for L{GenericPollJob} reporting callbacks.
1827 """Initializes this class.
1831 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1832 """Handles a log message.
1835 raise NotImplementedError()
1837 def ReportNotChanged(self, job_id, status):
1838 """Called for if a job hasn't changed in a while.
1840 @type job_id: number
1841 @param job_id: Job ID
1842 @type status: string or None
1843 @param status: Job status if available
1846 raise NotImplementedError()
1849 class _LuxiJobPollCb(JobPollCbBase):
1850 def __init__(self, cl):
1851 """Initializes this class.
1854 JobPollCbBase.__init__(self)
1857 def WaitForJobChangeOnce(self, job_id, fields,
1858 prev_job_info, prev_log_serial):
1859 """Waits for changes on a job.
1862 return self.cl.WaitForJobChangeOnce(job_id, fields,
1863 prev_job_info, prev_log_serial)
1865 def QueryJobs(self, job_ids, fields):
1866 """Returns the selected fields for the selected job IDs.
1869 return self.cl.QueryJobs(job_ids, fields)
1872 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1873 def __init__(self, feedback_fn):
1874 """Initializes this class.
1877 JobPollReportCbBase.__init__(self)
1879 self.feedback_fn = feedback_fn
1881 assert callable(feedback_fn)
1883 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1884 """Handles a log message.
1887 self.feedback_fn((timestamp, log_type, log_msg))
1889 def ReportNotChanged(self, job_id, status):
1890 """Called if a job hasn't changed in a while.
1896 class StdioJobPollReportCb(JobPollReportCbBase):
1898 """Initializes this class.
1901 JobPollReportCbBase.__init__(self)
1903 self.notified_queued = False
1904 self.notified_waitlock = False
1906 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1907 """Handles a log message.
1910 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1911 FormatLogMessage(log_type, log_msg))
1913 def ReportNotChanged(self, job_id, status):
1914 """Called if a job hasn't changed in a while.
1920 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1921 ToStderr("Job %s is waiting in queue", job_id)
1922 self.notified_queued = True
1924 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1925 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1926 self.notified_waitlock = True
1929 def FormatLogMessage(log_type, log_msg):
1930 """Formats a job message according to its type.
1933 if log_type != constants.ELOG_MESSAGE:
1934 log_msg = str(log_msg)
1936 return utils.SafeEncode(log_msg)
1939 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1940 """Function to poll for the result of a job.
1942 @type job_id: job identified
1943 @param job_id: the job to poll for results
1944 @type cl: luxi.Client
1945 @param cl: the luxi client to use for communicating with the master;
1946 if None, a new client will be created
1952 if reporter is None:
1954 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1956 reporter = StdioJobPollReportCb()
1958 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1960 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1963 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1964 """Legacy function to submit an opcode.
1966 This is just a simple wrapper over the construction of the processor
1967 instance. It should be extended to better handle feedback and
1968 interaction functions.
1974 SetGenericOpcodeOpts([op], opts)
1976 job_id = SendJob([op], cl=cl)
1978 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1981 return op_results[0]
1984 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1985 """Wrapper around SubmitOpCode or SendJob.
1987 This function will decide, based on the 'opts' parameter, whether to
1988 submit and wait for the result of the opcode (and return it), or
1989 whether to just send the job and print its identifier. It is used in
1990 order to simplify the implementation of the '--submit' option.
1992 It will also process the opcodes if we're sending the via SendJob
1993 (otherwise SubmitOpCode does it).
1996 if opts and opts.submit_only:
1998 SetGenericOpcodeOpts(job, opts)
1999 job_id = SendJob(job, cl=cl)
2000 raise JobSubmittedException(job_id)
2002 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2005 def SetGenericOpcodeOpts(opcode_list, options):
2006 """Processor for generic options.
2008 This function updates the given opcodes based on generic command
2009 line options (like debug, dry-run, etc.).
2011 @param opcode_list: list of opcodes
2012 @param options: command line options or None
2013 @return: None (in-place modification)
2018 for op in opcode_list:
2019 op.debug_level = options.debug
2020 if hasattr(options, "dry_run"):
2021 op.dry_run = options.dry_run
2022 if getattr(options, "priority", None) is not None:
2023 op.priority = _PRIONAME_TO_VALUE[options.priority]
2027 # TODO: Cache object?
2029 client = luxi.Client()
2030 except luxi.NoMasterError:
2031 ss = ssconf.SimpleStore()
2033 # Try to read ssconf file
2036 except errors.ConfigurationError:
2037 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2038 " not part of a cluster")
2040 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2041 if master != myself:
2042 raise errors.OpPrereqError("This is not the master node, please connect"
2043 " to node '%s' and rerun the command" %
2049 def FormatError(err):
2050 """Return a formatted error message for a given error.
2052 This function takes an exception instance and returns a tuple
2053 consisting of two values: first, the recommended exit code, and
2054 second, a string describing the error message (not
2055 newline-terminated).
2061 if isinstance(err, errors.ConfigurationError):
2062 txt = "Corrupt configuration file: %s" % msg
2064 obuf.write(txt + "\n")
2065 obuf.write("Aborting.")
2067 elif isinstance(err, errors.HooksAbort):
2068 obuf.write("Failure: hooks execution failed:\n")
2069 for node, script, out in err.args[0]:
2071 obuf.write(" node: %s, script: %s, output: %s\n" %
2072 (node, script, out))
2074 obuf.write(" node: %s, script: %s (no output)\n" %
2076 elif isinstance(err, errors.HooksFailure):
2077 obuf.write("Failure: hooks general failure: %s" % msg)
2078 elif isinstance(err, errors.ResolverError):
2079 this_host = netutils.Hostname.GetSysName()
2080 if err.args[0] == this_host:
2081 msg = "Failure: can't resolve my own hostname ('%s')"
2083 msg = "Failure: can't resolve hostname '%s'"
2084 obuf.write(msg % err.args[0])
2085 elif isinstance(err, errors.OpPrereqError):
2086 if len(err.args) == 2:
2087 obuf.write("Failure: prerequisites not met for this"
2088 " operation:\nerror type: %s, error details:\n%s" %
2089 (err.args[1], err.args[0]))
2091 obuf.write("Failure: prerequisites not met for this"
2092 " operation:\n%s" % msg)
2093 elif isinstance(err, errors.OpExecError):
2094 obuf.write("Failure: command execution error:\n%s" % msg)
2095 elif isinstance(err, errors.TagError):
2096 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2097 elif isinstance(err, errors.JobQueueDrainError):
2098 obuf.write("Failure: the job queue is marked for drain and doesn't"
2099 " accept new requests\n")
2100 elif isinstance(err, errors.JobQueueFull):
2101 obuf.write("Failure: the job queue is full and doesn't accept new"
2102 " job submissions until old jobs are archived\n")
2103 elif isinstance(err, errors.TypeEnforcementError):
2104 obuf.write("Parameter Error: %s" % msg)
2105 elif isinstance(err, errors.ParameterError):
2106 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2107 elif isinstance(err, luxi.NoMasterError):
2108 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2109 " and listening for connections?")
2110 elif isinstance(err, luxi.TimeoutError):
2111 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2112 " been submitted and will continue to run even if the call"
2113 " timed out. Useful commands in this situation are \"gnt-job"
2114 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2116 elif isinstance(err, luxi.PermissionError):
2117 obuf.write("It seems you don't have permissions to connect to the"
2118 " master daemon.\nPlease retry as a different user.")
2119 elif isinstance(err, luxi.ProtocolError):
2120 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2122 elif isinstance(err, errors.JobLost):
2123 obuf.write("Error checking job status: %s" % msg)
2124 elif isinstance(err, errors.QueryFilterParseError):
2125 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2126 obuf.write("\n".join(err.GetDetails()))
2127 elif isinstance(err, errors.GenericError):
2128 obuf.write("Unhandled Ganeti error: %s" % msg)
2129 elif isinstance(err, JobSubmittedException):
2130 obuf.write("JobID: %s\n" % err.args[0])
2133 obuf.write("Unhandled exception: %s" % msg)
2134 return retcode, obuf.getvalue().rstrip("\n")
2137 def GenericMain(commands, override=None, aliases=None,
2138 env_override=frozenset()):
2139 """Generic main function for all the gnt-* commands.
2141 @param commands: a dictionary with a special structure, see the design doc
2142 for command line handling.
2143 @param override: if not None, we expect a dictionary with keys that will
2144 override command line options; this can be used to pass
2145 options from the scripts to generic functions
2146 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2147 @param env_override: list of environment names which are allowed to submit
2148 default args for commands
2151 # save the program name and the entire command line for later logging
2153 binary = os.path.basename(sys.argv[0])
2155 binary = sys.argv[0]
2157 if len(sys.argv) >= 2:
2158 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2162 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2164 binary = "<unknown program>"
2165 cmdline = "<unknown>"
2171 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2172 except errors.ParameterError, err:
2173 result, err_msg = FormatError(err)
2177 if func is None: # parse error
2180 if override is not None:
2181 for key, val in override.iteritems():
2182 setattr(options, key, val)
2184 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2185 stderr_logging=True)
2187 logging.info("Command line: %s", cmdline)
2190 result = func(options, args)
2191 except (errors.GenericError, luxi.ProtocolError,
2192 JobSubmittedException), err:
2193 result, err_msg = FormatError(err)
2194 logging.exception("Error during command processing")
2196 except KeyboardInterrupt:
2197 result = constants.EXIT_FAILURE
2198 ToStderr("Aborted. Note that if the operation created any jobs, they"
2199 " might have been submitted and"
2200 " will continue to run in the background.")
2201 except IOError, err:
2202 if err.errno == errno.EPIPE:
2203 # our terminal went away, we'll exit
2204 sys.exit(constants.EXIT_FAILURE)
2211 def ParseNicOption(optvalue):
2212 """Parses the value of the --net option(s).
2216 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2217 except (TypeError, ValueError), err:
2218 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2220 nics = [{}] * nic_max
2221 for nidx, ndict in optvalue:
2224 if not isinstance(ndict, dict):
2225 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2226 " got %s" % (nidx, ndict))
2228 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2235 def GenericInstanceCreate(mode, opts, args):
2236 """Add an instance to the cluster via either creation or import.
2238 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2239 @param opts: the command line options selected by the user
2241 @param args: should contain only one element, the new instance name
2243 @return: the desired exit code
2248 (pnode, snode) = SplitNodeOption(opts.node)
2253 hypervisor, hvparams = opts.hypervisor
2256 nics = ParseNicOption(opts.nics)
2260 elif mode == constants.INSTANCE_CREATE:
2261 # default of one nic, all auto
2267 if opts.disk_template == constants.DT_DISKLESS:
2268 if opts.disks or opts.sd_size is not None:
2269 raise errors.OpPrereqError("Diskless instance but disk"
2270 " information passed")
2273 if (not opts.disks and not opts.sd_size
2274 and mode == constants.INSTANCE_CREATE):
2275 raise errors.OpPrereqError("No disk information specified")
2276 if opts.disks and opts.sd_size is not None:
2277 raise errors.OpPrereqError("Please use either the '--disk' or"
2279 if opts.sd_size is not None:
2280 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2284 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2285 except ValueError, err:
2286 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2287 disks = [{}] * disk_max
2290 for didx, ddict in opts.disks:
2292 if not isinstance(ddict, dict):
2293 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2294 raise errors.OpPrereqError(msg)
2295 elif constants.IDISK_SIZE in ddict:
2296 if constants.IDISK_ADOPT in ddict:
2297 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2298 " (disk %d)" % didx)
2300 ddict[constants.IDISK_SIZE] = \
2301 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2302 except ValueError, err:
2303 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2305 elif constants.IDISK_ADOPT in ddict:
2306 if mode == constants.INSTANCE_IMPORT:
2307 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2309 ddict[constants.IDISK_SIZE] = 0
2311 raise errors.OpPrereqError("Missing size or adoption source for"
2315 if opts.tags is not None:
2316 tags = opts.tags.split(",")
2320 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2321 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2323 if mode == constants.INSTANCE_CREATE:
2326 force_variant = opts.force_variant
2329 no_install = opts.no_install
2330 identify_defaults = False
2331 elif mode == constants.INSTANCE_IMPORT:
2334 force_variant = False
2335 src_node = opts.src_node
2336 src_path = opts.src_dir
2338 identify_defaults = opts.identify_defaults
2340 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2342 op = opcodes.OpInstanceCreate(instance_name=instance,
2344 disk_template=opts.disk_template,
2346 pnode=pnode, snode=snode,
2347 ip_check=opts.ip_check,
2348 name_check=opts.name_check,
2349 wait_for_sync=opts.wait_for_sync,
2350 file_storage_dir=opts.file_storage_dir,
2351 file_driver=opts.file_driver,
2352 iallocator=opts.iallocator,
2353 hypervisor=hypervisor,
2355 beparams=opts.beparams,
2356 osparams=opts.osparams,
2360 force_variant=force_variant,
2364 no_install=no_install,
2365 identify_defaults=identify_defaults,
2366 ignore_ipolicy=opts.ignore_ipolicy)
2368 SubmitOrSend(op, opts)
2372 class _RunWhileClusterStoppedHelper:
2373 """Helper class for L{RunWhileClusterStopped} to simplify state management
2376 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2377 """Initializes this class.
2379 @type feedback_fn: callable
2380 @param feedback_fn: Feedback function
2381 @type cluster_name: string
2382 @param cluster_name: Cluster name
2383 @type master_node: string
2384 @param master_node Master node name
2385 @type online_nodes: list
2386 @param online_nodes: List of names of online nodes
2389 self.feedback_fn = feedback_fn
2390 self.cluster_name = cluster_name
2391 self.master_node = master_node
2392 self.online_nodes = online_nodes
2394 self.ssh = ssh.SshRunner(self.cluster_name)
2396 self.nonmaster_nodes = [name for name in online_nodes
2397 if name != master_node]
2399 assert self.master_node not in self.nonmaster_nodes
2401 def _RunCmd(self, node_name, cmd):
2402 """Runs a command on the local or a remote machine.
2404 @type node_name: string
2405 @param node_name: Machine name
2410 if node_name is None or node_name == self.master_node:
2411 # No need to use SSH
2412 result = utils.RunCmd(cmd)
2414 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2417 errmsg = ["Failed to run command %s" % result.cmd]
2419 errmsg.append("on node %s" % node_name)
2420 errmsg.append(": exitcode %s and error %s" %
2421 (result.exit_code, result.output))
2422 raise errors.OpExecError(" ".join(errmsg))
2424 def Call(self, fn, *args):
2425 """Call function while all daemons are stopped.
2428 @param fn: Function to be called
2431 # Pause watcher by acquiring an exclusive lock on watcher state file
2432 self.feedback_fn("Blocking watcher")
2433 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2435 # TODO: Currently, this just blocks. There's no timeout.
2436 # TODO: Should it be a shared lock?
2437 watcher_block.Exclusive(blocking=True)
2439 # Stop master daemons, so that no new jobs can come in and all running
2441 self.feedback_fn("Stopping master daemons")
2442 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2444 # Stop daemons on all nodes
2445 for node_name in self.online_nodes:
2446 self.feedback_fn("Stopping daemons on %s" % node_name)
2447 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2449 # All daemons are shut down now
2451 return fn(self, *args)
2452 except Exception, err:
2453 _, errmsg = FormatError(err)
2454 logging.exception("Caught exception")
2455 self.feedback_fn(errmsg)
2458 # Start cluster again, master node last
2459 for node_name in self.nonmaster_nodes + [self.master_node]:
2460 self.feedback_fn("Starting daemons on %s" % node_name)
2461 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2464 watcher_block.Close()
2467 def RunWhileClusterStopped(feedback_fn, fn, *args):
2468 """Calls a function while all cluster daemons are stopped.
2470 @type feedback_fn: callable
2471 @param feedback_fn: Feedback function
2473 @param fn: Function to be called when daemons are stopped
2476 feedback_fn("Gathering cluster information")
2478 # This ensures we're running on the master daemon
2481 (cluster_name, master_node) = \
2482 cl.QueryConfigValues(["cluster_name", "master_node"])
2484 online_nodes = GetOnlineNodes([], cl=cl)
2486 # Don't keep a reference to the client. The master daemon will go away.
2489 assert master_node in online_nodes
2491 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2492 online_nodes).Call(fn, *args)
2495 def GenerateTable(headers, fields, separator, data,
2496 numfields=None, unitfields=None,
2498 """Prints a table with headers and different fields.
2501 @param headers: dictionary mapping field names to headers for
2504 @param fields: the field names corresponding to each row in
2506 @param separator: the separator to be used; if this is None,
2507 the default 'smart' algorithm is used which computes optimal
2508 field width, otherwise just the separator is used between
2511 @param data: a list of lists, each sublist being one row to be output
2512 @type numfields: list
2513 @param numfields: a list with the fields that hold numeric
2514 values and thus should be right-aligned
2515 @type unitfields: list
2516 @param unitfields: a list with the fields that hold numeric
2517 values that should be formatted with the units field
2518 @type units: string or None
2519 @param units: the units we should use for formatting, or None for
2520 automatic choice (human-readable for non-separator usage, otherwise
2521 megabytes); this is a one-letter string
2530 if numfields is None:
2532 if unitfields is None:
2535 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2536 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2539 for field in fields:
2540 if headers and field not in headers:
2541 # TODO: handle better unknown fields (either revert to old
2542 # style of raising exception, or deal more intelligently with
2544 headers[field] = field
2545 if separator is not None:
2546 format_fields.append("%s")
2547 elif numfields.Matches(field):
2548 format_fields.append("%*s")
2550 format_fields.append("%-*s")
2552 if separator is None:
2553 mlens = [0 for name in fields]
2554 format_str = " ".join(format_fields)
2556 format_str = separator.replace("%", "%%").join(format_fields)
2561 for idx, val in enumerate(row):
2562 if unitfields.Matches(fields[idx]):
2565 except (TypeError, ValueError):
2568 val = row[idx] = utils.FormatUnit(val, units)
2569 val = row[idx] = str(val)
2570 if separator is None:
2571 mlens[idx] = max(mlens[idx], len(val))
2576 for idx, name in enumerate(fields):
2578 if separator is None:
2579 mlens[idx] = max(mlens[idx], len(hdr))
2580 args.append(mlens[idx])
2582 result.append(format_str % tuple(args))
2584 if separator is None:
2585 assert len(mlens) == len(fields)
2587 if fields and not numfields.Matches(fields[-1]):
2593 line = ["-" for _ in fields]
2594 for idx in range(len(fields)):
2595 if separator is None:
2596 args.append(mlens[idx])
2597 args.append(line[idx])
2598 result.append(format_str % tuple(args))
2603 def _FormatBool(value):
2604 """Formats a boolean value as a string.
2612 #: Default formatting for query results; (callback, align right)
2613 _DEFAULT_FORMAT_QUERY = {
2614 constants.QFT_TEXT: (str, False),
2615 constants.QFT_BOOL: (_FormatBool, False),
2616 constants.QFT_NUMBER: (str, True),
2617 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2618 constants.QFT_OTHER: (str, False),
2619 constants.QFT_UNKNOWN: (str, False),
2623 def _GetColumnFormatter(fdef, override, unit):
2624 """Returns formatting function for a field.
2626 @type fdef: L{objects.QueryFieldDefinition}
2627 @type override: dict
2628 @param override: Dictionary for overriding field formatting functions,
2629 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2631 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2632 @rtype: tuple; (callable, bool)
2633 @return: Returns the function to format a value (takes one parameter) and a
2634 boolean for aligning the value on the right-hand side
2637 fmt = override.get(fdef.name, None)
2641 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2643 if fdef.kind == constants.QFT_UNIT:
2644 # Can't keep this information in the static dictionary
2645 return (lambda value: utils.FormatUnit(value, unit), True)
2647 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2651 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2654 class _QueryColumnFormatter:
2655 """Callable class for formatting fields of a query.
2658 def __init__(self, fn, status_fn, verbose):
2659 """Initializes this class.
2662 @param fn: Formatting function
2663 @type status_fn: callable
2664 @param status_fn: Function to report fields' status
2665 @type verbose: boolean
2666 @param verbose: whether to use verbose field descriptions or not
2670 self._status_fn = status_fn
2671 self._verbose = verbose
2673 def __call__(self, data):
2674 """Returns a field's string representation.
2677 (status, value) = data
2680 self._status_fn(status)
2682 if status == constants.RS_NORMAL:
2683 return self._fn(value)
2685 assert value is None, \
2686 "Found value %r for abnormal status %s" % (value, status)
2688 return FormatResultError(status, self._verbose)
2691 def FormatResultError(status, verbose):
2692 """Formats result status other than L{constants.RS_NORMAL}.
2694 @param status: The result status
2695 @type verbose: boolean
2696 @param verbose: Whether to return the verbose text
2697 @return: Text of result status
2700 assert status != constants.RS_NORMAL, \
2701 "FormatResultError called with status equal to constants.RS_NORMAL"
2703 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2705 raise NotImplementedError("Unknown status %s" % status)
2712 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2713 header=False, verbose=False):
2714 """Formats data in L{objects.QueryResponse}.
2716 @type result: L{objects.QueryResponse}
2717 @param result: result of query operation
2719 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2720 see L{utils.text.FormatUnit}
2721 @type format_override: dict
2722 @param format_override: Dictionary for overriding field formatting functions,
2723 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2724 @type separator: string or None
2725 @param separator: String used to separate fields
2727 @param header: Whether to output header row
2728 @type verbose: boolean
2729 @param verbose: whether to use verbose field descriptions or not
2738 if format_override is None:
2739 format_override = {}
2741 stats = dict.fromkeys(constants.RS_ALL, 0)
2743 def _RecordStatus(status):
2748 for fdef in result.fields:
2749 assert fdef.title and fdef.name
2750 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2751 columns.append(TableColumn(fdef.title,
2752 _QueryColumnFormatter(fn, _RecordStatus,
2756 table = FormatTable(result.data, columns, header, separator)
2758 # Collect statistics
2759 assert len(stats) == len(constants.RS_ALL)
2760 assert compat.all(count >= 0 for count in stats.values())
2762 # Determine overall status. If there was no data, unknown fields must be
2763 # detected via the field definitions.
2764 if (stats[constants.RS_UNKNOWN] or
2765 (not result.data and _GetUnknownFields(result.fields))):
2767 elif compat.any(count > 0 for key, count in stats.items()
2768 if key != constants.RS_NORMAL):
2769 status = QR_INCOMPLETE
2773 return (status, table)
2776 def _GetUnknownFields(fdefs):
2777 """Returns list of unknown fields included in C{fdefs}.
2779 @type fdefs: list of L{objects.QueryFieldDefinition}
2782 return [fdef for fdef in fdefs
2783 if fdef.kind == constants.QFT_UNKNOWN]
2786 def _WarnUnknownFields(fdefs):
2787 """Prints a warning to stderr if a query included unknown fields.
2789 @type fdefs: list of L{objects.QueryFieldDefinition}
2792 unknown = _GetUnknownFields(fdefs)
2794 ToStderr("Warning: Queried for unknown fields %s",
2795 utils.CommaJoin(fdef.name for fdef in unknown))
2801 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2802 format_override=None, verbose=False, force_filter=False):
2803 """Generic implementation for listing all items of a resource.
2805 @param resource: One of L{constants.QR_VIA_LUXI}
2806 @type fields: list of strings
2807 @param fields: List of fields to query for
2808 @type names: list of strings
2809 @param names: Names of items to query for
2810 @type unit: string or None
2811 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2812 None for automatic choice (human-readable for non-separator usage,
2813 otherwise megabytes); this is a one-letter string
2814 @type separator: string or None
2815 @param separator: String used to separate fields
2817 @param header: Whether to show header row
2818 @type force_filter: bool
2819 @param force_filter: Whether to always treat names as filter
2820 @type format_override: dict
2821 @param format_override: Dictionary for overriding field formatting functions,
2822 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2823 @type verbose: boolean
2824 @param verbose: whether to use verbose field descriptions or not
2830 qfilter = qlang.MakeFilter(names, force_filter)
2835 response = cl.Query(resource, fields, qfilter)
2837 found_unknown = _WarnUnknownFields(response.fields)
2839 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2841 format_override=format_override,
2847 assert ((found_unknown and status == QR_UNKNOWN) or
2848 (not found_unknown and status != QR_UNKNOWN))
2850 if status == QR_UNKNOWN:
2851 return constants.EXIT_UNKNOWN_FIELD
2853 # TODO: Should the list command fail if not all data could be collected?
2854 return constants.EXIT_SUCCESS
2857 def GenericListFields(resource, fields, separator, header, cl=None):
2858 """Generic implementation for listing fields for a resource.
2860 @param resource: One of L{constants.QR_VIA_LUXI}
2861 @type fields: list of strings
2862 @param fields: List of fields to query for
2863 @type separator: string or None
2864 @param separator: String used to separate fields
2866 @param header: Whether to show header row
2875 response = cl.QueryFields(resource, fields)
2877 found_unknown = _WarnUnknownFields(response.fields)
2880 TableColumn("Name", str, False),
2881 TableColumn("Title", str, False),
2882 TableColumn("Description", str, False),
2885 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2887 for line in FormatTable(rows, columns, header, separator):
2891 return constants.EXIT_UNKNOWN_FIELD
2893 return constants.EXIT_SUCCESS
2897 """Describes a column for L{FormatTable}.
2900 def __init__(self, title, fn, align_right):
2901 """Initializes this class.
2904 @param title: Column title
2906 @param fn: Formatting function
2907 @type align_right: bool
2908 @param align_right: Whether to align values on the right-hand side
2913 self.align_right = align_right
2916 def _GetColFormatString(width, align_right):
2917 """Returns the format string for a field.
2925 return "%%%s%ss" % (sign, width)
2928 def FormatTable(rows, columns, header, separator):
2929 """Formats data as a table.
2931 @type rows: list of lists
2932 @param rows: Row data, one list per row
2933 @type columns: list of L{TableColumn}
2934 @param columns: Column descriptions
2936 @param header: Whether to show header row
2937 @type separator: string or None
2938 @param separator: String used to separate columns
2942 data = [[col.title for col in columns]]
2943 colwidth = [len(col.title) for col in columns]
2946 colwidth = [0 for _ in columns]
2950 assert len(row) == len(columns)
2952 formatted = [col.format(value) for value, col in zip(row, columns)]
2954 if separator is None:
2955 # Update column widths
2956 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2957 # Modifying a list's items while iterating is fine
2958 colwidth[idx] = max(oldwidth, len(value))
2960 data.append(formatted)
2962 if separator is not None:
2963 # Return early if a separator is used
2964 return [separator.join(row) for row in data]
2966 if columns and not columns[-1].align_right:
2967 # Avoid unnecessary spaces at end of line
2970 # Build format string
2971 fmt = " ".join([_GetColFormatString(width, col.align_right)
2972 for col, width in zip(columns, colwidth)])
2974 return [fmt % tuple(row) for row in data]
2977 def FormatTimestamp(ts):
2978 """Formats a given timestamp.
2981 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2984 @return: a string with the formatted timestamp
2987 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2990 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2993 def ParseTimespec(value):
2994 """Parse a time specification.
2996 The following suffixed will be recognized:
3004 Without any suffix, the value will be taken to be in seconds.
3009 raise errors.OpPrereqError("Empty time specification passed")
3017 if value[-1] not in suffix_map:
3020 except (TypeError, ValueError):
3021 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3023 multiplier = suffix_map[value[-1]]
3025 if not value: # no data left after stripping the suffix
3026 raise errors.OpPrereqError("Invalid time specification (only"
3029 value = int(value) * multiplier
3030 except (TypeError, ValueError):
3031 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3035 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3036 filter_master=False, nodegroup=None):
3037 """Returns the names of online nodes.
3039 This function will also log a warning on stderr with the names of
3042 @param nodes: if not empty, use only this subset of nodes (minus the
3044 @param cl: if not None, luxi client to use
3045 @type nowarn: boolean
3046 @param nowarn: by default, this function will output a note with the
3047 offline nodes that are skipped; if this parameter is True the
3048 note is not displayed
3049 @type secondary_ips: boolean
3050 @param secondary_ips: if True, return the secondary IPs instead of the
3051 names, useful for doing network traffic over the replication interface
3053 @type filter_master: boolean
3054 @param filter_master: if True, do not return the master node in the list
3055 (useful in coordination with secondary_ips where we cannot check our
3056 node name against the list)
3057 @type nodegroup: string
3058 @param nodegroup: If set, only return nodes in this node group
3067 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3069 if nodegroup is not None:
3070 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3071 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3074 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3077 if len(qfilter) > 1:
3078 final_filter = [qlang.OP_AND] + qfilter
3080 assert len(qfilter) == 1
3081 final_filter = qfilter[0]
3085 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3087 def _IsOffline(row):
3088 (_, (_, offline), _) = row
3092 ((_, name), _, _) = row
3096 (_, _, (_, sip)) = row
3099 (offline, online) = compat.partition(result.data, _IsOffline)
3101 if offline and not nowarn:
3102 ToStderr("Note: skipping offline node(s): %s" %
3103 utils.CommaJoin(map(_GetName, offline)))
3110 return map(fn, online)
3113 def _ToStream(stream, txt, *args):
3114 """Write a message to a stream, bypassing the logging system
3116 @type stream: file object
3117 @param stream: the file to which we should write
3119 @param txt: the message
3125 stream.write(txt % args)
3130 except IOError, err:
3131 if err.errno == errno.EPIPE:
3132 # our terminal went away, we'll exit
3133 sys.exit(constants.EXIT_FAILURE)
3138 def ToStdout(txt, *args):
3139 """Write a message to stdout only, bypassing the logging system
3141 This is just a wrapper over _ToStream.
3144 @param txt: the message
3147 _ToStream(sys.stdout, txt, *args)
3150 def ToStderr(txt, *args):
3151 """Write a message to stderr only, bypassing the logging system
3153 This is just a wrapper over _ToStream.
3156 @param txt: the message
3159 _ToStream(sys.stderr, txt, *args)
3162 class JobExecutor(object):
3163 """Class which manages the submission and execution of multiple jobs.
3165 Note that instances of this class should not be reused between
3169 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3174 self.verbose = verbose
3177 self.feedback_fn = feedback_fn
3178 self._counter = itertools.count()
3181 def _IfName(name, fmt):
3182 """Helper function for formatting name.
3190 def QueueJob(self, name, *ops):
3191 """Record a job for later submit.
3194 @param name: a description of the job, will be used in WaitJobSet
3197 SetGenericOpcodeOpts(ops, self.opts)
3198 self.queue.append((self._counter.next(), name, ops))
3200 def AddJobId(self, name, status, job_id):
3201 """Adds a job ID to the internal queue.
3204 self.jobs.append((self._counter.next(), status, job_id, name))
3206 def SubmitPending(self, each=False):
3207 """Submit all pending jobs.
3212 for (_, _, ops) in self.queue:
3213 # SubmitJob will remove the success status, but raise an exception if
3214 # the submission fails, so we'll notice that anyway.
3215 results.append([True, self.cl.SubmitJob(ops)[0]])
3217 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3218 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3219 self.jobs.append((idx, status, data, name))
3221 def _ChooseJob(self):
3222 """Choose a non-waiting/queued job to poll next.
3225 assert self.jobs, "_ChooseJob called with empty job list"
3227 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3231 for job_data, status in zip(self.jobs, result):
3232 if (isinstance(status, list) and status and
3233 status[0] in (constants.JOB_STATUS_QUEUED,
3234 constants.JOB_STATUS_WAITING,
3235 constants.JOB_STATUS_CANCELING)):
3236 # job is still present and waiting
3238 # good candidate found (either running job or lost job)
3239 self.jobs.remove(job_data)
3243 return self.jobs.pop(0)
3245 def GetResults(self):
3246 """Wait for and return the results of all jobs.
3249 @return: list of tuples (success, job results), in the same order
3250 as the submitted jobs; if a job has failed, instead of the result
3251 there will be the error message
3255 self.SubmitPending()
3258 ok_jobs = [row[2] for row in self.jobs if row[1]]
3260 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3262 # first, remove any non-submitted jobs
3263 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3264 for idx, _, jid, name in failures:
3265 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3266 results.append((idx, False, jid))
3269 (idx, _, jid, name) = self._ChooseJob()
3270 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3272 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3274 except errors.JobLost, err:
3275 _, job_result = FormatError(err)
3276 ToStderr("Job %s%s has been archived, cannot check its result",
3277 jid, self._IfName(name, " for %s"))
3279 except (errors.GenericError, luxi.ProtocolError), err:
3280 _, job_result = FormatError(err)
3282 # the error message will always be shown, verbose or not
3283 ToStderr("Job %s%s has failed: %s",
3284 jid, self._IfName(name, " for %s"), job_result)
3286 results.append((idx, success, job_result))
3288 # sort based on the index, then drop it
3290 results = [i[1:] for i in results]
3294 def WaitOrShow(self, wait):
3295 """Wait for job results or only print the job IDs.
3298 @param wait: whether to wait or not
3302 return self.GetResults()
3305 self.SubmitPending()
3306 for _, status, result, name in self.jobs:
3308 ToStdout("%s: %s", result, name)
3310 ToStderr("Failure for %s: %s", name, result)
3311 return [row[1:3] for row in self.jobs]
3314 def FormatParameterDict(buf, param_dict, actual, level=1):
3315 """Formats a parameter dictionary.
3317 @type buf: L{StringIO}
3318 @param buf: the buffer into which to write
3319 @type param_dict: dict
3320 @param param_dict: the own parameters
3322 @param actual: the current parameter set (including defaults)
3323 @param level: Level of indent
3326 indent = " " * level
3327 for key in sorted(actual):
3328 val = param_dict.get(key, "default (%s)" % actual[key])
3329 buf.write("%s- %s: %s\n" % (indent, key, val))
3332 def ConfirmOperation(names, list_type, text, extra=""):
3333 """Ask the user to confirm an operation on a list of list_type.
3335 This function is used to request confirmation for doing an operation
3336 on a given list of list_type.
3339 @param names: the list of names that we display when
3340 we ask for confirmation
3341 @type list_type: str
3342 @param list_type: Human readable name for elements in the list (e.g. nodes)
3344 @param text: the operation that the user should confirm
3346 @return: True or False depending on user's confirmation.
3350 msg = ("The %s will operate on %d %s.\n%s"
3351 "Do you want to continue?" % (text, count, list_type, extra))
3352 affected = (("\nAffected %s:\n" % list_type) +
3353 "\n".join([" %s" % name for name in names]))
3355 choices = [("y", True, "Yes, execute the %s" % text),
3356 ("n", False, "No, abort the %s" % text)]
3359 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3362 question = msg + affected
3364 choice = AskUser(question, choices)
3367 choice = AskUser(msg + affected, choices)