4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
103 "DEFAULT_IALLOCATOR_OPT",
104 "IDENTIFY_DEFAULTS_OPT",
105 "IGNORE_CONSIST_OPT",
107 "IGNORE_FAILURES_OPT",
108 "IGNORE_OFFLINE_OPT",
109 "IGNORE_REMOVE_FAILURES_OPT",
110 "IGNORE_SECONDARIES_OPT",
112 "INCLUDEDEFAULTS_OPT",
115 "MAINTAIN_NODE_HEALTH_OPT",
117 "MASTER_NETMASK_OPT",
119 "MIGRATION_MODE_OPT",
120 "MODIFY_ETCHOSTS_OPT",
124 "NEW_CLUSTER_CERT_OPT",
125 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126 "NEW_CONFD_HMAC_KEY_OPT",
130 "NEW_SPICE_CERT_OPT",
132 "NOCONFLICTSCHECK_OPT",
133 "NODE_FORCE_JOIN_OPT",
135 "NODE_PLACEMENT_OPT",
139 "NODRBD_STORAGE_OPT",
145 "NOMODIFY_ETCHOSTS_OPT",
146 "NOMODIFY_SSH_SETUP_OPT",
150 "NORUNTIME_CHGS_OPT",
153 "NOSSH_KEYCHECK_OPT",
167 "PREALLOC_WIPE_DISKS_OPT",
168 "PRIMARY_IP_VERSION_OPT",
175 "REMOVE_INSTANCE_OPT",
176 "REMOVE_RESERVED_IPS_OPT",
182 "SECONDARY_ONLY_OPT",
187 "SHUTDOWN_TIMEOUT_OPT",
189 "SPECS_CPU_COUNT_OPT",
190 "SPECS_DISK_COUNT_OPT",
191 "SPECS_DISK_SIZE_OPT",
192 "SPECS_MEM_SIZE_OPT",
193 "SPECS_NIC_COUNT_OPT",
195 "IPOLICY_STD_SPECS_OPT",
196 "IPOLICY_DISK_TEMPLATES",
197 "IPOLICY_VCPU_RATIO",
203 "STARTUP_PAUSED_OPT",
212 "USE_EXTERNAL_MIP_SCRIPT",
220 "IGNORE_IPOLICY_OPT",
221 "INSTANCE_POLICY_OPTS",
222 # Generic functions for CLI programs
224 "CreateIPolicyFromOpts",
226 "GenericInstanceCreate",
232 "JobSubmittedException",
234 "RunWhileClusterStopped",
238 # Formatting functions
239 "ToStderr", "ToStdout",
242 "FormatParamsDictInfo",
244 "PrintIPolicyCommand",
254 # command line options support infrastructure
255 "ARGS_MANY_INSTANCES",
258 "ARGS_MANY_NETWORKS",
278 "OPT_COMPL_INST_ADD_NODES",
279 "OPT_COMPL_MANY_NODES",
280 "OPT_COMPL_ONE_IALLOCATOR",
281 "OPT_COMPL_ONE_INSTANCE",
282 "OPT_COMPL_ONE_NODE",
283 "OPT_COMPL_ONE_NODEGROUP",
284 "OPT_COMPL_ONE_NETWORK",
286 "OPT_COMPL_ONE_EXTSTORAGE",
292 "COMMON_CREATE_OPTS",
298 #: Priorities (sorted)
300 ("low", constants.OP_PRIO_LOW),
301 ("normal", constants.OP_PRIO_NORMAL),
302 ("high", constants.OP_PRIO_HIGH),
305 #: Priority dictionary for easier lookup
306 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307 # we migrate to Python 2.6
308 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
310 # Query result status for clients
313 QR_INCOMPLETE) = range(3)
315 #: Maximum batch size for ChooseJob
319 # constants used to create InstancePolicy dictionary
320 TISPECS_GROUP_TYPES = {
321 constants.ISPECS_MIN: constants.VTYPE_INT,
322 constants.ISPECS_MAX: constants.VTYPE_INT,
325 TISPECS_CLUSTER_TYPES = {
326 constants.ISPECS_MIN: constants.VTYPE_INT,
327 constants.ISPECS_MAX: constants.VTYPE_INT,
328 constants.ISPECS_STD: constants.VTYPE_INT,
331 #: User-friendly names for query2 field types
333 constants.QFT_UNKNOWN: "Unknown",
334 constants.QFT_TEXT: "Text",
335 constants.QFT_BOOL: "Boolean",
336 constants.QFT_NUMBER: "Number",
337 constants.QFT_UNIT: "Storage size",
338 constants.QFT_TIMESTAMP: "Timestamp",
339 constants.QFT_OTHER: "Custom",
344 def __init__(self, min=0, max=None): # pylint: disable=W0622
349 return ("<%s min=%s max=%s>" %
350 (self.__class__.__name__, self.min, self.max))
353 class ArgSuggest(_Argument):
354 """Suggesting argument.
356 Value can be any of the ones passed to the constructor.
359 # pylint: disable=W0622
360 def __init__(self, min=0, max=None, choices=None):
361 _Argument.__init__(self, min=min, max=max)
362 self.choices = choices
365 return ("<%s min=%s max=%s choices=%r>" %
366 (self.__class__.__name__, self.min, self.max, self.choices))
369 class ArgChoice(ArgSuggest):
372 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373 but value must be one of the choices.
378 class ArgUnknown(_Argument):
379 """Unknown argument to program (e.g. determined at runtime).
384 class ArgInstance(_Argument):
385 """Instances argument.
390 class ArgNode(_Argument):
396 class ArgNetwork(_Argument):
402 class ArgGroup(_Argument):
403 """Node group argument.
408 class ArgJobId(_Argument):
414 class ArgFile(_Argument):
415 """File path argument.
420 class ArgCommand(_Argument):
426 class ArgHost(_Argument):
432 class ArgOs(_Argument):
438 class ArgExtStorage(_Argument):
439 """ExtStorage argument.
445 ARGS_MANY_INSTANCES = [ArgInstance()]
446 ARGS_MANY_NETWORKS = [ArgNetwork()]
447 ARGS_MANY_NODES = [ArgNode()]
448 ARGS_MANY_GROUPS = [ArgGroup()]
449 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
453 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
457 def _ExtractTagsObject(opts, args):
458 """Extract the tag type object.
460 Note that this function will modify its args parameter.
463 if not hasattr(opts, "tag_type"):
464 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
466 if kind == constants.TAG_CLUSTER:
468 elif kind in (constants.TAG_NODEGROUP,
470 constants.TAG_NETWORK,
471 constants.TAG_INSTANCE):
473 raise errors.OpPrereqError("no arguments passed to the command",
478 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
482 def _ExtendTags(opts, args):
483 """Extend the args if a source file has been given.
485 This function will extend the tags with the contents of the file
486 passed in the 'tags_source' attribute of the opts parameter. A file
487 named '-' will be replaced by stdin.
490 fname = opts.tags_source
496 new_fh = open(fname, "r")
499 # we don't use the nice 'new_data = [line.strip() for line in fh]'
500 # because of python bug 1633941
502 line = new_fh.readline()
505 new_data.append(line.strip())
508 args.extend(new_data)
511 def ListTags(opts, args):
512 """List the tags on a given object.
514 This is a generic implementation that knows how to deal with all
515 three cases of tag objects (cluster, node, instance). The opts
516 argument is expected to contain a tag_type field denoting what
517 object type we work on.
520 kind, name = _ExtractTagsObject(opts, args)
521 cl = GetClient(query=True)
522 result = cl.QueryTags(kind, name)
523 result = list(result)
529 def AddTags(opts, args):
530 """Add tags on a given object.
532 This is a generic implementation that knows how to deal with all
533 three cases of tag objects (cluster, node, instance). The opts
534 argument is expected to contain a tag_type field denoting what
535 object type we work on.
538 kind, name = _ExtractTagsObject(opts, args)
539 _ExtendTags(opts, args)
541 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543 SubmitOrSend(op, opts)
546 def RemoveTags(opts, args):
547 """Remove tags from a given object.
549 This is a generic implementation that knows how to deal with all
550 three cases of tag objects (cluster, node, instance). The opts
551 argument is expected to contain a tag_type field denoting what
552 object type we work on.
555 kind, name = _ExtractTagsObject(opts, args)
556 _ExtendTags(opts, args)
558 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560 SubmitOrSend(op, opts)
563 def check_unit(option, opt, value): # pylint: disable=W0613
564 """OptParsers custom converter for units.
568 return utils.ParseUnit(value)
569 except errors.UnitParseError, err:
570 raise OptionValueError("option %s: %s" % (opt, err))
573 def _SplitKeyVal(opt, data, parse_prefixes):
574 """Convert a KeyVal string into a dict.
576 This function will convert a key=val[,...] string into a dict. Empty
577 values will be converted specially: keys which have the prefix 'no_'
578 will have the value=False and the prefix stripped, keys with the prefix
579 "-" will have value=None and the prefix stripped, and the others will
583 @param opt: a string holding the option name for which we process the
584 data, used in building error messages
586 @param data: a string of the format key=val,key=val,...
587 @type parse_prefixes: bool
588 @param parse_prefixes: whether to handle prefixes specially
590 @return: {key=val, key=val}
591 @raises errors.ParameterError: if there are duplicate keys
596 for elem in utils.UnescapeAndSplit(data, sep=","):
598 key, val = elem.split("=", 1)
600 if elem.startswith(NO_PREFIX):
601 key, val = elem[len(NO_PREFIX):], False
602 elif elem.startswith(UN_PREFIX):
603 key, val = elem[len(UN_PREFIX):], None
605 key, val = elem, True
607 raise errors.ParameterError("Missing value for key '%s' in option %s" %
610 raise errors.ParameterError("Duplicate key '%s' in option %s" %
616 def _SplitIdentKeyVal(opt, value, parse_prefixes):
617 """Helper function to parse "ident:key=val,key=val" options.
620 @param opt: option name, used in error messages
622 @param value: expected to be in the format "ident:key=val,key=val,..."
623 @type parse_prefixes: bool
624 @param parse_prefixes: whether to handle prefixes specially (see
627 @return: (ident, {key=val, key=val})
628 @raises errors.ParameterError: in case of duplicates or other parsing errors
632 ident, rest = value, ""
634 ident, rest = value.split(":", 1)
636 if parse_prefixes and ident.startswith(NO_PREFIX):
638 msg = "Cannot pass options when removing parameter groups: %s" % value
639 raise errors.ParameterError(msg)
640 retval = (ident[len(NO_PREFIX):], False)
641 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
644 msg = "Cannot pass options when removing parameter groups: %s" % value
645 raise errors.ParameterError(msg)
646 retval = (ident[len(UN_PREFIX):], None)
648 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649 retval = (ident, kv_dict)
653 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
654 """Custom parser for ident:key=val,key=val options.
656 This will store the parsed values as a tuple (ident, {key: val}). As such,
657 multiple uses of this option via action=append is possible.
660 return _SplitIdentKeyVal(opt, value, True)
663 def check_key_val(option, opt, value): # pylint: disable=W0613
664 """Custom parser class for key=val,key=val options.
666 This will store the parsed values as a dict {key: val}.
669 return _SplitKeyVal(opt, value, True)
672 def _SplitListKeyVal(opt, value):
674 for elem in value.split("/"):
676 raise errors.ParameterError("Empty section in option '%s'" % opt)
677 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
679 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
681 raise errors.ParameterError(msg)
682 retval[ident] = valdict
686 def check_multilist_ident_key_val(_, opt, value):
687 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
689 @rtype: list of dictionary
690 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
694 for line in value.split("//"):
695 retval.append(_SplitListKeyVal(opt, line))
699 def check_bool(option, opt, value): # pylint: disable=W0613
700 """Custom parser for yes/no options.
702 This will store the parsed value as either True or False.
705 value = value.lower()
706 if value == constants.VALUE_FALSE or value == "no":
708 elif value == constants.VALUE_TRUE or value == "yes":
711 raise errors.ParameterError("Invalid boolean value '%s'" % value)
714 def check_list(option, opt, value): # pylint: disable=W0613
715 """Custom parser for comma-separated lists.
718 # we have to make this explicit check since "".split(",") is [""],
719 # not an empty list :(
723 return utils.UnescapeAndSplit(value)
726 def check_maybefloat(option, opt, value): # pylint: disable=W0613
727 """Custom parser for float numbers which might be also defaults.
730 value = value.lower()
732 if value == constants.VALUE_DEFAULT:
738 # completion_suggestion is normally a list. Using numeric values not evaluating
739 # to False for dynamic completion.
740 (OPT_COMPL_MANY_NODES,
742 OPT_COMPL_ONE_INSTANCE,
744 OPT_COMPL_ONE_EXTSTORAGE,
745 OPT_COMPL_ONE_IALLOCATOR,
746 OPT_COMPL_ONE_NETWORK,
747 OPT_COMPL_INST_ADD_NODES,
748 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
750 OPT_COMPL_ALL = compat.UniqueFrozenset([
751 OPT_COMPL_MANY_NODES,
753 OPT_COMPL_ONE_INSTANCE,
755 OPT_COMPL_ONE_EXTSTORAGE,
756 OPT_COMPL_ONE_IALLOCATOR,
757 OPT_COMPL_ONE_NETWORK,
758 OPT_COMPL_INST_ADD_NODES,
759 OPT_COMPL_ONE_NODEGROUP,
763 class CliOption(Option):
764 """Custom option class for optparse.
767 ATTRS = Option.ATTRS + [
768 "completion_suggest",
770 TYPES = Option.TYPES + (
771 "multilistidentkeyval",
779 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781 TYPE_CHECKER["identkeyval"] = check_ident_key_val
782 TYPE_CHECKER["keyval"] = check_key_val
783 TYPE_CHECKER["unit"] = check_unit
784 TYPE_CHECKER["bool"] = check_bool
785 TYPE_CHECKER["list"] = check_list
786 TYPE_CHECKER["maybefloat"] = check_maybefloat
789 # optparse.py sets make_option, so we do it for our own option class, too
790 cli_option = CliOption
795 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796 help="Increase debugging level")
798 NOHDR_OPT = cli_option("--no-headers", default=False,
799 action="store_true", dest="no_headers",
800 help="Don't display column headers")
802 SEP_OPT = cli_option("--separator", default=None,
803 action="store", dest="separator",
804 help=("Separator between output fields"
805 " (defaults to one space)"))
807 USEUNITS_OPT = cli_option("--units", default=None,
808 dest="units", choices=("h", "m", "g", "t"),
809 help="Specify units for output (one of h/m/g/t)")
811 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812 type="string", metavar="FIELDS",
813 help="Comma separated list of output fields")
815 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816 default=False, help="Force the operation")
818 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819 default=False, help="Do not require confirmation")
821 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822 action="store_true", default=False,
823 help=("Ignore offline nodes and do as much"
826 TAG_ADD_OPT = cli_option("--tags", dest="tags",
827 default=None, help="Comma-separated list of instance"
830 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831 default=None, help="File with tag names")
833 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834 default=False, action="store_true",
835 help=("Submit the job and return the job ID, but"
836 " don't wait for the job to finish"))
838 SYNC_OPT = cli_option("--sync", dest="do_locking",
839 default=False, action="store_true",
840 help=("Grab locks while doing the queries"
841 " in order to ensure more consistent results"))
843 DRY_RUN_OPT = cli_option("--dry-run", default=False,
845 help=("Do not execute the operation, just run the"
846 " check steps and verify if it could be"
849 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
851 help="Increase the verbosity of the operation")
853 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
854 action="store_true", dest="simulate_errors",
855 help="Debugging option that makes the operation"
856 " treat most runtime checks as failed")
858 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
859 default=True, action="store_false",
860 help="Don't wait for sync (DANGEROUS!)")
862 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
863 default=False, action="store_true",
864 help="Wait for disks to sync")
866 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
867 action="store_true", default=False,
868 help="Enable offline instance")
870 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
871 action="store_true", default=False,
872 help="Disable down instance")
874 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
875 help=("Custom disk setup (%s)" %
876 utils.CommaJoin(constants.DISK_TEMPLATES)),
877 default=None, metavar="TEMPL",
878 choices=list(constants.DISK_TEMPLATES))
880 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
881 help="Do not create any network cards for"
884 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
885 help="Relative path under default cluster-wide"
886 " file storage dir to store file-based disks",
887 default=None, metavar="<DIR>")
889 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
890 help="Driver to use for image files",
891 default=None, metavar="<DRIVER>",
892 choices=list(constants.FILE_DRIVER))
894 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
895 help="Select nodes for the instance automatically"
896 " using the <NAME> iallocator plugin",
897 default=None, type="string",
898 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
900 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
902 help="Set the default instance"
904 default=None, type="string",
905 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
907 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
909 completion_suggest=OPT_COMPL_ONE_OS)
911 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
912 type="keyval", default={},
913 help="OS parameters")
915 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
916 action="store_true", default=False,
917 help="Force an unknown variant")
919 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
920 action="store_true", default=False,
921 help="Do not install the OS (will"
924 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
925 dest="allow_runtime_chgs",
926 default=True, action="store_false",
927 help="Don't allow runtime changes")
929 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
930 type="keyval", default={},
931 help="Backend parameters")
933 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
934 default={}, dest="hvparams",
935 help="Hypervisor parameters")
937 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
938 help="Disk template parameters, in the format"
939 " template:option=value,option=value,...",
940 type="identkeyval", action="append", default=[])
942 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
943 type="keyval", default={},
944 help="Memory size specs: list of key=value,"
945 " where key is one of min, max, std"
946 " (in MB or using a unit)")
948 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
949 type="keyval", default={},
950 help="CPU count specs: list of key=value,"
951 " where key is one of min, max, std")
953 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
954 dest="ispecs_disk_count",
955 type="keyval", default={},
956 help="Disk count specs: list of key=value,"
957 " where key is one of min, max, std")
959 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
960 type="keyval", default={},
961 help="Disk size specs: list of key=value,"
962 " where key is one of min, max, std"
963 " (in MB or using a unit)")
965 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
966 type="keyval", default={},
967 help="NIC count specs: list of key=value,"
968 " where key is one of min, max, std")
970 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
971 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
972 dest="ipolicy_bounds_specs",
973 type="multilistidentkeyval", default=None,
974 help="Complete instance specs limits")
976 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
977 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
978 dest="ipolicy_std_specs",
979 type="keyval", default=None,
980 help="Complte standard instance specs")
982 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
983 dest="ipolicy_disk_templates",
984 type="list", default=None,
985 help="Comma-separated list of"
986 " enabled disk templates")
988 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
989 dest="ipolicy_vcpu_ratio",
990 type="maybefloat", default=None,
991 help="The maximum allowed vcpu-to-cpu ratio")
993 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
994 dest="ipolicy_spindle_ratio",
995 type="maybefloat", default=None,
996 help=("The maximum allowed instances to"
999 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1000 help="Hypervisor and hypervisor options, in the"
1001 " format hypervisor:option=value,option=value,...",
1002 default=None, type="identkeyval")
1004 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1005 help="Hypervisor and hypervisor options, in the"
1006 " format hypervisor:option=value,option=value,...",
1007 default=[], action="append", type="identkeyval")
1009 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1010 action="store_false",
1011 help="Don't check that the instance's IP"
1014 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1015 default=True, action="store_false",
1016 help="Don't check that the instance's name"
1019 NET_OPT = cli_option("--net",
1020 help="NIC parameters", default=[],
1021 dest="nics", action="append", type="identkeyval")
1023 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1024 dest="disks", action="append", type="identkeyval")
1026 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1027 help="Comma-separated list of disks"
1028 " indices to act on (e.g. 0,2) (optional,"
1029 " defaults to all disks)")
1031 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1032 help="Enforces a single-disk configuration using the"
1033 " given disk size, in MiB unless a suffix is used",
1034 default=None, type="unit", metavar="<size>")
1036 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1037 dest="ignore_consistency",
1038 action="store_true", default=False,
1039 help="Ignore the consistency of the disks on"
1042 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1043 dest="allow_failover",
1044 action="store_true", default=False,
1045 help="If migration is not possible fallback to"
1048 NONLIVE_OPT = cli_option("--non-live", dest="live",
1049 default=True, action="store_false",
1050 help="Do a non-live migration (this usually means"
1051 " freeze the instance, save the state, transfer and"
1052 " only then resume running on the secondary node)")
1054 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1056 choices=list(constants.HT_MIGRATION_MODES),
1057 help="Override default migration mode (choose"
1058 " either live or non-live")
1060 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1061 help="Target node and optional secondary node",
1062 metavar="<pnode>[:<snode>]",
1063 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1065 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1066 action="append", metavar="<node>",
1067 help="Use only this node (can be used multiple"
1068 " times, if not given defaults to all nodes)",
1069 completion_suggest=OPT_COMPL_ONE_NODE)
1071 NODEGROUP_OPT_NAME = "--node-group"
1072 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1074 help="Node group (name or uuid)",
1075 metavar="<nodegroup>",
1076 default=None, type="string",
1077 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1079 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1081 completion_suggest=OPT_COMPL_ONE_NODE)
1083 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1084 action="store_false",
1085 help="Don't start the instance after creation")
1087 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1088 action="store_true", default=False,
1089 help="Show command instead of executing it")
1091 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1092 default=False, action="store_true",
1093 help="Instead of performing the migration/failover,"
1094 " try to recover from a failed cleanup. This is safe"
1095 " to run even if the instance is healthy, but it"
1096 " will create extra replication traffic and "
1097 " disrupt briefly the replication (like during the"
1098 " migration/failover")
1100 STATIC_OPT = cli_option("-s", "--static", dest="static",
1101 action="store_true", default=False,
1102 help="Only show configuration data, not runtime data")
1104 ALL_OPT = cli_option("--all", dest="show_all",
1105 default=False, action="store_true",
1106 help="Show info on all instances on the cluster."
1107 " This can take a long time to run, use wisely")
1109 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1110 action="store_true", default=False,
1111 help="Interactive OS reinstall, lists available"
1112 " OS templates for selection")
1114 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1115 action="store_true", default=False,
1116 help="Remove the instance from the cluster"
1117 " configuration even if there are failures"
1118 " during the removal process")
1120 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1121 dest="ignore_remove_failures",
1122 action="store_true", default=False,
1123 help="Remove the instance from the"
1124 " cluster configuration even if there"
1125 " are failures during the removal"
1128 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1129 action="store_true", default=False,
1130 help="Remove the instance from the cluster")
1132 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1133 help="Specifies the new node for the instance",
1134 metavar="NODE", default=None,
1135 completion_suggest=OPT_COMPL_ONE_NODE)
1137 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1138 help="Specifies the new secondary node",
1139 metavar="NODE", default=None,
1140 completion_suggest=OPT_COMPL_ONE_NODE)
1142 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1143 help="Specifies the new primary node",
1144 metavar="<node>", default=None,
1145 completion_suggest=OPT_COMPL_ONE_NODE)
1147 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1148 default=False, action="store_true",
1149 help="Replace the disk(s) on the primary"
1150 " node (applies only to internally mirrored"
1151 " disk templates, e.g. %s)" %
1152 utils.CommaJoin(constants.DTS_INT_MIRROR))
1154 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1155 default=False, action="store_true",
1156 help="Replace the disk(s) on the secondary"
1157 " node (applies only to internally mirrored"
1158 " disk templates, e.g. %s)" %
1159 utils.CommaJoin(constants.DTS_INT_MIRROR))
1161 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1162 default=False, action="store_true",
1163 help="Lock all nodes and auto-promote as needed"
1166 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1167 default=False, action="store_true",
1168 help="Automatically replace faulty disks"
1169 " (applies only to internally mirrored"
1170 " disk templates, e.g. %s)" %
1171 utils.CommaJoin(constants.DTS_INT_MIRROR))
1173 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1174 default=False, action="store_true",
1175 help="Ignore current recorded size"
1176 " (useful for forcing activation when"
1177 " the recorded size is wrong)")
1179 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1181 completion_suggest=OPT_COMPL_ONE_NODE)
1183 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1186 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1187 help="Specify the secondary ip for the node",
1188 metavar="ADDRESS", default=None)
1190 READD_OPT = cli_option("--readd", dest="readd",
1191 default=False, action="store_true",
1192 help="Readd old node after replacing it")
1194 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1195 default=True, action="store_false",
1196 help="Disable SSH key fingerprint checking")
1198 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1199 default=False, action="store_true",
1200 help="Force the joining of a node")
1202 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1203 type="bool", default=None, metavar=_YORNO,
1204 help="Set the master_candidate flag on the node")
1206 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1207 type="bool", default=None,
1208 help=("Set the offline flag on the node"
1209 " (cluster does not communicate with offline"
1212 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1213 type="bool", default=None,
1214 help=("Set the drained flag on the node"
1215 " (excluded from allocation operations)"))
1217 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1218 type="bool", default=None, metavar=_YORNO,
1219 help="Set the master_capable flag on the node")
1221 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1222 type="bool", default=None, metavar=_YORNO,
1223 help="Set the vm_capable flag on the node")
1225 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1226 type="bool", default=None, metavar=_YORNO,
1227 help="Set the allocatable flag on a volume")
1229 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1230 help="Disable support for lvm based instances"
1232 action="store_false", default=True)
1234 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1235 dest="enabled_hypervisors",
1236 help="Comma-separated list of hypervisors",
1237 type="string", default=None)
1239 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1240 dest="enabled_disk_templates",
1241 help="Comma-separated list of "
1243 type="string", default=None)
1245 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1246 type="keyval", default={},
1247 help="NIC parameters")
1249 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1250 dest="candidate_pool_size", type="int",
1251 help="Set the candidate pool size")
1253 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1254 help=("Enables LVM and specifies the volume group"
1255 " name (cluster-wide) for disk allocation"
1256 " [%s]" % constants.DEFAULT_VG),
1257 metavar="VG", default=None)
1259 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1260 help="Destroy cluster", action="store_true")
1262 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1263 help="Skip node agreement check (dangerous)",
1264 action="store_true", default=False)
1266 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1267 help="Specify the mac prefix for the instance IP"
1268 " addresses, in the format XX:XX:XX",
1272 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1273 help="Specify the node interface (cluster-wide)"
1274 " on which the master IP address will be added"
1275 " (cluster init default: %s)" %
1276 constants.DEFAULT_BRIDGE,
1280 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1281 help="Specify the netmask of the master IP",
1285 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1286 dest="use_external_mip_script",
1287 help="Specify whether to run a"
1288 " user-provided script for the master"
1289 " IP address turnup and"
1290 " turndown operations",
1291 type="bool", metavar=_YORNO, default=None)
1293 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1294 help="Specify the default directory (cluster-"
1295 "wide) for storing the file-based disks [%s]" %
1296 pathutils.DEFAULT_FILE_STORAGE_DIR,
1298 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1300 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1301 "--shared-file-storage-dir",
1302 dest="shared_file_storage_dir",
1303 help="Specify the default directory (cluster-wide) for storing the"
1304 " shared file-based disks [%s]" %
1305 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1306 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1308 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1309 help="Don't modify %s" % pathutils.ETC_HOSTS,
1310 action="store_false", default=True)
1312 MODIFY_ETCHOSTS_OPT = \
1313 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1314 default=None, type="bool",
1315 help="Defines whether the cluster should autonomously modify"
1316 " and keep in sync the /etc/hosts file of the nodes")
1318 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1319 help="Don't initialize SSH keys",
1320 action="store_false", default=True)
1322 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1323 help="Enable parseable error messages",
1324 action="store_true", default=False)
1326 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1327 help="Skip N+1 memory redundancy tests",
1328 action="store_true", default=False)
1330 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1331 help="Type of reboot: soft/hard/full",
1332 default=constants.INSTANCE_REBOOT_HARD,
1334 choices=list(constants.REBOOT_TYPES))
1336 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1337 dest="ignore_secondaries",
1338 default=False, action="store_true",
1339 help="Ignore errors from secondaries")
1341 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1342 action="store_false", default=True,
1343 help="Don't shutdown the instance (unsafe)")
1345 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1346 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1347 help="Maximum time to wait")
1349 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1350 dest="shutdown_timeout", type="int",
1351 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1352 help="Maximum time to wait for instance"
1355 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1357 help=("Number of seconds between repetions of the"
1360 EARLY_RELEASE_OPT = cli_option("--early-release",
1361 dest="early_release", default=False,
1362 action="store_true",
1363 help="Release the locks on the secondary"
1366 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1367 dest="new_cluster_cert",
1368 default=False, action="store_true",
1369 help="Generate a new cluster certificate")
1371 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1373 help="File containing new RAPI certificate")
1375 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1376 default=None, action="store_true",
1377 help=("Generate a new self-signed RAPI"
1380 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1382 help="File containing new SPICE certificate")
1384 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1386 help="File containing the certificate of the CA"
1387 " which signed the SPICE certificate")
1389 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1390 dest="new_spice_cert", default=None,
1391 action="store_true",
1392 help=("Generate a new self-signed SPICE"
1395 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1396 dest="new_confd_hmac_key",
1397 default=False, action="store_true",
1398 help=("Create a new HMAC key for %s" %
1401 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1402 dest="cluster_domain_secret",
1404 help=("Load new new cluster domain"
1405 " secret from file"))
1407 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1408 dest="new_cluster_domain_secret",
1409 default=False, action="store_true",
1410 help=("Create a new cluster domain"
1413 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1414 dest="use_replication_network",
1415 help="Whether to use the replication network"
1416 " for talking to the nodes",
1417 action="store_true", default=False)
1419 MAINTAIN_NODE_HEALTH_OPT = \
1420 cli_option("--maintain-node-health", dest="maintain_node_health",
1421 metavar=_YORNO, default=None, type="bool",
1422 help="Configure the cluster to automatically maintain node"
1423 " health, by shutting down unknown instances, shutting down"
1424 " unknown DRBD devices, etc.")
1426 IDENTIFY_DEFAULTS_OPT = \
1427 cli_option("--identify-defaults", dest="identify_defaults",
1428 default=False, action="store_true",
1429 help="Identify which saved instance parameters are equal to"
1430 " the current cluster defaults and set them as such, instead"
1431 " of marking them as overridden")
1433 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1434 action="store", dest="uid_pool",
1435 help=("A list of user-ids or user-id"
1436 " ranges separated by commas"))
1438 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1439 action="store", dest="add_uids",
1440 help=("A list of user-ids or user-id"
1441 " ranges separated by commas, to be"
1442 " added to the user-id pool"))
1444 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1445 action="store", dest="remove_uids",
1446 help=("A list of user-ids or user-id"
1447 " ranges separated by commas, to be"
1448 " removed from the user-id pool"))
1450 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1451 action="store", dest="reserved_lvs",
1452 help=("A comma-separated list of reserved"
1453 " logical volumes names, that will be"
1454 " ignored by cluster verify"))
1456 ROMAN_OPT = cli_option("--roman",
1457 dest="roman_integers", default=False,
1458 action="store_true",
1459 help="Use roman numbers for positive integers")
1461 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1462 action="store", default=None,
1463 help="Specifies usermode helper for DRBD")
1465 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1466 action="store_false", default=True,
1467 help="Disable support for DRBD")
1469 PRIMARY_IP_VERSION_OPT = \
1470 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1471 action="store", dest="primary_ip_version",
1472 metavar="%d|%d" % (constants.IP4_VERSION,
1473 constants.IP6_VERSION),
1474 help="Cluster-wide IP version for primary IP")
1476 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1477 action="store_true",
1478 help="Show machine name for every line in output")
1480 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1481 action="store_true",
1482 help=("Hide successful results and show failures"
1483 " only (determined by the exit code)"))
1485 REASON_OPT = cli_option("--reason", default=None,
1486 help="The reason for executing the command")
1489 def _PriorityOptionCb(option, _, value, parser):
1490 """Callback for processing C{--priority} option.
1493 value = _PRIONAME_TO_VALUE[value]
1495 setattr(parser.values, option.dest, value)
1498 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1499 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1500 choices=_PRIONAME_TO_VALUE.keys(),
1501 action="callback", type="choice",
1502 callback=_PriorityOptionCb,
1503 help="Priority for opcode processing")
1505 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1506 type="bool", default=None, metavar=_YORNO,
1507 help="Sets the hidden flag on the OS")
1509 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1510 type="bool", default=None, metavar=_YORNO,
1511 help="Sets the blacklisted flag on the OS")
1513 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1514 type="bool", metavar=_YORNO,
1515 dest="prealloc_wipe_disks",
1516 help=("Wipe disks prior to instance"
1519 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1520 type="keyval", default=None,
1521 help="Node parameters")
1523 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1524 action="store", metavar="POLICY", default=None,
1525 help="Allocation policy for the node group")
1527 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1528 type="bool", metavar=_YORNO,
1529 dest="node_powered",
1530 help="Specify if the SoR for node is powered")
1532 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1533 default=constants.OOB_TIMEOUT,
1534 help="Maximum time to wait for out-of-band helper")
1536 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1537 default=constants.OOB_POWER_DELAY,
1538 help="Time in seconds to wait between power-ons")
1540 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1541 action="store_true", default=False,
1542 help=("Whether command argument should be treated"
1545 NO_REMEMBER_OPT = cli_option("--no-remember",
1547 action="store_true", default=False,
1548 help="Perform but do not record the change"
1549 " in the configuration")
1551 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1552 default=False, action="store_true",
1553 help="Evacuate primary instances only")
1555 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1556 default=False, action="store_true",
1557 help="Evacuate secondary instances only"
1558 " (applies only to internally mirrored"
1559 " disk templates, e.g. %s)" %
1560 utils.CommaJoin(constants.DTS_INT_MIRROR))
1562 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1563 action="store_true", default=False,
1564 help="Pause instance at startup")
1566 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1567 help="Destination node group (name or uuid)",
1568 default=None, action="append",
1569 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1571 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1572 action="append", dest="ignore_errors",
1573 choices=list(constants.CV_ALL_ECODES_STRINGS),
1574 help="Error code to be ignored")
1576 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1578 help=("Specify disk state information in the"
1580 " storage_type/identifier:option=value,...;"
1581 " note this is unused for now"),
1584 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1586 help=("Specify hypervisor state information in the"
1587 " format hypervisor:option=value,...;"
1588 " note this is unused for now"),
1591 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1592 action="store_true", default=False,
1593 help="Ignore instance policy violations")
1595 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1596 help="Sets the instance's runtime memory,"
1597 " ballooning it up or down to the new value",
1598 default=None, type="unit", metavar="<size>")
1600 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1601 action="store_true", default=False,
1602 help="Marks the grow as absolute instead of the"
1603 " (default) relative mode")
1605 NETWORK_OPT = cli_option("--network",
1606 action="store", default=None, dest="network",
1607 help="IP network in CIDR notation")
1609 GATEWAY_OPT = cli_option("--gateway",
1610 action="store", default=None, dest="gateway",
1611 help="IP address of the router (gateway)")
1613 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1614 action="store", default=None,
1615 dest="add_reserved_ips",
1616 help="Comma-separated list of"
1617 " reserved IPs to add")
1619 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1620 action="store", default=None,
1621 dest="remove_reserved_ips",
1622 help="Comma-delimited list of"
1623 " reserved IPs to remove")
1625 NETWORK6_OPT = cli_option("--network6",
1626 action="store", default=None, dest="network6",
1627 help="IP network in CIDR notation")
1629 GATEWAY6_OPT = cli_option("--gateway6",
1630 action="store", default=None, dest="gateway6",
1631 help="IP6 address of the router (gateway)")
1633 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1634 dest="conflicts_check",
1636 action="store_false",
1637 help="Don't check for conflicting IPs")
1639 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1640 default=False, action="store_true",
1641 help="Include default values")
1643 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1644 action="store_true", default=False,
1645 help="Hotplug supported devices (NICs and Disks)")
1647 #: Options provided by all commands
1648 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1650 # common options for creating instances. add and import then add their own
1652 COMMON_CREATE_OPTS = [
1657 FILESTORE_DRIVER_OPT,
1663 NOCONFLICTSCHECK_OPT,
1675 # common instance policy options
1676 INSTANCE_POLICY_OPTS = [
1677 IPOLICY_BOUNDS_SPECS_OPT,
1678 IPOLICY_DISK_TEMPLATES,
1680 IPOLICY_SPINDLE_RATIO,
1683 # instance policy split specs options
1684 SPLIT_ISPECS_OPTS = [
1685 SPECS_CPU_COUNT_OPT,
1686 SPECS_DISK_COUNT_OPT,
1687 SPECS_DISK_SIZE_OPT,
1689 SPECS_NIC_COUNT_OPT,
1693 class _ShowUsage(Exception):
1694 """Exception class for L{_ParseArgs}.
1697 def __init__(self, exit_error):
1698 """Initializes instances of this class.
1700 @type exit_error: bool
1701 @param exit_error: Whether to report failure on exit
1704 Exception.__init__(self)
1705 self.exit_error = exit_error
1708 class _ShowVersion(Exception):
1709 """Exception class for L{_ParseArgs}.
1714 def _ParseArgs(binary, argv, commands, aliases, env_override):
1715 """Parser for the command line arguments.
1717 This function parses the arguments and returns the function which
1718 must be executed together with its (modified) arguments.
1720 @param binary: Script name
1721 @param argv: Command line arguments
1722 @param commands: Dictionary containing command definitions
1723 @param aliases: dictionary with command aliases {"alias": "target", ...}
1724 @param env_override: list of env variables allowed for default args
1725 @raise _ShowUsage: If usage description should be shown
1726 @raise _ShowVersion: If version should be shown
1729 assert not (env_override - set(commands))
1730 assert not (set(aliases.keys()) & set(commands.keys()))
1735 # No option or command given
1736 raise _ShowUsage(exit_error=True)
1738 if cmd == "--version":
1739 raise _ShowVersion()
1740 elif cmd == "--help":
1741 raise _ShowUsage(exit_error=False)
1742 elif not (cmd in commands or cmd in aliases):
1743 raise _ShowUsage(exit_error=True)
1745 # get command, unalias it, and look it up in commands
1747 if aliases[cmd] not in commands:
1748 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1749 " command '%s'" % (cmd, aliases[cmd]))
1753 if cmd in env_override:
1754 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1755 env_args = os.environ.get(args_env_name)
1757 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1759 func, args_def, parser_opts, usage, description = commands[cmd]
1760 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1761 description=description,
1762 formatter=TitledHelpFormatter(),
1763 usage="%%prog %s %s" % (cmd, usage))
1764 parser.disable_interspersed_args()
1765 options, args = parser.parse_args(args=argv[2:])
1767 if not _CheckArguments(cmd, args_def, args):
1768 return None, None, None
1770 return func, options, args
1773 def _FormatUsage(binary, commands):
1774 """Generates a nice description of all commands.
1776 @param binary: Script name
1777 @param commands: Dictionary containing command definitions
1780 # compute the max line length for cmd + usage
1781 mlen = min(60, max(map(len, commands)))
1783 yield "Usage: %s {command} [options...] [argument...]" % binary
1784 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1788 # and format a nice command list
1789 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1790 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1791 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1792 for line in help_lines:
1793 yield " %-*s %s" % (mlen, "", line)
1798 def _CheckArguments(cmd, args_def, args):
1799 """Verifies the arguments using the argument definition.
1803 1. Abort with error if values specified by user but none expected.
1805 1. For each argument in definition
1807 1. Keep running count of minimum number of values (min_count)
1808 1. Keep running count of maximum number of values (max_count)
1809 1. If it has an unlimited number of values
1811 1. Abort with error if it's not the last argument in the definition
1813 1. If last argument has limited number of values
1815 1. Abort with error if number of values doesn't match or is too large
1817 1. Abort with error if user didn't pass enough values (min_count)
1820 if args and not args_def:
1821 ToStderr("Error: Command %s expects no arguments", cmd)
1828 last_idx = len(args_def) - 1
1830 for idx, arg in enumerate(args_def):
1831 if min_count is None:
1833 elif arg.min is not None:
1834 min_count += arg.min
1836 if max_count is None:
1838 elif arg.max is not None:
1839 max_count += arg.max
1842 check_max = (arg.max is not None)
1844 elif arg.max is None:
1845 raise errors.ProgrammerError("Only the last argument can have max=None")
1848 # Command with exact number of arguments
1849 if (min_count is not None and max_count is not None and
1850 min_count == max_count and len(args) != min_count):
1851 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1854 # Command with limited number of arguments
1855 if max_count is not None and len(args) > max_count:
1856 ToStderr("Error: Command %s expects only %d argument(s)",
1860 # Command with some required arguments
1861 if min_count is not None and len(args) < min_count:
1862 ToStderr("Error: Command %s expects at least %d argument(s)",
1869 def SplitNodeOption(value):
1870 """Splits the value of a --node option.
1873 if value and ":" in value:
1874 return value.split(":", 1)
1876 return (value, None)
1879 def CalculateOSNames(os_name, os_variants):
1880 """Calculates all the names an OS can be called, according to its variants.
1882 @type os_name: string
1883 @param os_name: base name of the os
1884 @type os_variants: list or None
1885 @param os_variants: list of supported variants
1887 @return: list of valid names
1891 return ["%s+%s" % (os_name, v) for v in os_variants]
1896 def ParseFields(selected, default):
1897 """Parses the values of "--field"-like options.
1899 @type selected: string or None
1900 @param selected: User-selected options
1902 @param default: Default fields
1905 if selected is None:
1908 if selected.startswith("+"):
1909 return default + selected[1:].split(",")
1911 return selected.split(",")
1914 UsesRPC = rpc.RunWithRPC
1917 def AskUser(text, choices=None):
1918 """Ask the user a question.
1920 @param text: the question to ask
1922 @param choices: list with elements tuples (input_char, return_value,
1923 description); if not given, it will default to: [('y', True,
1924 'Perform the operation'), ('n', False, 'Do no do the operation')];
1925 note that the '?' char is reserved for help
1927 @return: one of the return values from the choices list; if input is
1928 not possible (i.e. not running with a tty, we return the last
1933 choices = [("y", True, "Perform the operation"),
1934 ("n", False, "Do not perform the operation")]
1935 if not choices or not isinstance(choices, list):
1936 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1937 for entry in choices:
1938 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1939 raise errors.ProgrammerError("Invalid choices element to AskUser")
1941 answer = choices[-1][1]
1943 for line in text.splitlines():
1944 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1945 text = "\n".join(new_text)
1947 f = file("/dev/tty", "a+")
1951 chars = [entry[0] for entry in choices]
1952 chars[-1] = "[%s]" % chars[-1]
1954 maps = dict([(entry[0], entry[1]) for entry in choices])
1958 f.write("/".join(chars))
1960 line = f.readline(2).strip().lower()
1965 for entry in choices:
1966 f.write(" %s - %s\n" % (entry[0], entry[2]))
1974 class JobSubmittedException(Exception):
1975 """Job was submitted, client should exit.
1977 This exception has one argument, the ID of the job that was
1978 submitted. The handler should print this ID.
1980 This is not an error, just a structured way to exit from clients.
1985 def SendJob(ops, cl=None):
1986 """Function to submit an opcode without waiting for the results.
1989 @param ops: list of opcodes
1990 @type cl: luxi.Client
1991 @param cl: the luxi client to use for communicating with the master;
1992 if None, a new client will be created
1998 job_id = cl.SubmitJob(ops)
2003 def GenericPollJob(job_id, cbs, report_cbs):
2004 """Generic job-polling function.
2006 @type job_id: number
2007 @param job_id: Job ID
2008 @type cbs: Instance of L{JobPollCbBase}
2009 @param cbs: Data callbacks
2010 @type report_cbs: Instance of L{JobPollReportCbBase}
2011 @param report_cbs: Reporting callbacks
2014 prev_job_info = None
2015 prev_logmsg_serial = None
2020 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2023 # job not found, go away!
2024 raise errors.JobLost("Job with id %s lost" % job_id)
2026 if result == constants.JOB_NOTCHANGED:
2027 report_cbs.ReportNotChanged(job_id, status)
2032 # Split result, a tuple of (field values, log entries)
2033 (job_info, log_entries) = result
2034 (status, ) = job_info
2037 for log_entry in log_entries:
2038 (serial, timestamp, log_type, message) = log_entry
2039 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2041 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2043 # TODO: Handle canceled and archived jobs
2044 elif status in (constants.JOB_STATUS_SUCCESS,
2045 constants.JOB_STATUS_ERROR,
2046 constants.JOB_STATUS_CANCELING,
2047 constants.JOB_STATUS_CANCELED):
2050 prev_job_info = job_info
2052 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2054 raise errors.JobLost("Job with id %s lost" % job_id)
2056 status, opstatus, result = jobs[0]
2058 if status == constants.JOB_STATUS_SUCCESS:
2061 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2062 raise errors.OpExecError("Job was canceled")
2065 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2066 if status == constants.OP_STATUS_SUCCESS:
2068 elif status == constants.OP_STATUS_ERROR:
2069 errors.MaybeRaise(msg)
2072 raise errors.OpExecError("partial failure (opcode %d): %s" %
2075 raise errors.OpExecError(str(msg))
2077 # default failure mode
2078 raise errors.OpExecError(result)
2081 class JobPollCbBase:
2082 """Base class for L{GenericPollJob} callbacks.
2086 """Initializes this class.
2090 def WaitForJobChangeOnce(self, job_id, fields,
2091 prev_job_info, prev_log_serial):
2092 """Waits for changes on a job.
2095 raise NotImplementedError()
2097 def QueryJobs(self, job_ids, fields):
2098 """Returns the selected fields for the selected job IDs.
2100 @type job_ids: list of numbers
2101 @param job_ids: Job IDs
2102 @type fields: list of strings
2103 @param fields: Fields
2106 raise NotImplementedError()
2109 class JobPollReportCbBase:
2110 """Base class for L{GenericPollJob} reporting callbacks.
2114 """Initializes this class.
2118 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2119 """Handles a log message.
2122 raise NotImplementedError()
2124 def ReportNotChanged(self, job_id, status):
2125 """Called for if a job hasn't changed in a while.
2127 @type job_id: number
2128 @param job_id: Job ID
2129 @type status: string or None
2130 @param status: Job status if available
2133 raise NotImplementedError()
2136 class _LuxiJobPollCb(JobPollCbBase):
2137 def __init__(self, cl):
2138 """Initializes this class.
2141 JobPollCbBase.__init__(self)
2144 def WaitForJobChangeOnce(self, job_id, fields,
2145 prev_job_info, prev_log_serial):
2146 """Waits for changes on a job.
2149 return self.cl.WaitForJobChangeOnce(job_id, fields,
2150 prev_job_info, prev_log_serial)
2152 def QueryJobs(self, job_ids, fields):
2153 """Returns the selected fields for the selected job IDs.
2156 return self.cl.QueryJobs(job_ids, fields)
2159 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2160 def __init__(self, feedback_fn):
2161 """Initializes this class.
2164 JobPollReportCbBase.__init__(self)
2166 self.feedback_fn = feedback_fn
2168 assert callable(feedback_fn)
2170 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2171 """Handles a log message.
2174 self.feedback_fn((timestamp, log_type, log_msg))
2176 def ReportNotChanged(self, job_id, status):
2177 """Called if a job hasn't changed in a while.
2183 class StdioJobPollReportCb(JobPollReportCbBase):
2185 """Initializes this class.
2188 JobPollReportCbBase.__init__(self)
2190 self.notified_queued = False
2191 self.notified_waitlock = False
2193 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2194 """Handles a log message.
2197 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2198 FormatLogMessage(log_type, log_msg))
2200 def ReportNotChanged(self, job_id, status):
2201 """Called if a job hasn't changed in a while.
2207 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2208 ToStderr("Job %s is waiting in queue", job_id)
2209 self.notified_queued = True
2211 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2212 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2213 self.notified_waitlock = True
2216 def FormatLogMessage(log_type, log_msg):
2217 """Formats a job message according to its type.
2220 if log_type != constants.ELOG_MESSAGE:
2221 log_msg = str(log_msg)
2223 return utils.SafeEncode(log_msg)
2226 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2227 """Function to poll for the result of a job.
2229 @type job_id: job identified
2230 @param job_id: the job to poll for results
2231 @type cl: luxi.Client
2232 @param cl: the luxi client to use for communicating with the master;
2233 if None, a new client will be created
2239 if reporter is None:
2241 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2243 reporter = StdioJobPollReportCb()
2245 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2247 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2250 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2251 """Legacy function to submit an opcode.
2253 This is just a simple wrapper over the construction of the processor
2254 instance. It should be extended to better handle feedback and
2255 interaction functions.
2261 SetGenericOpcodeOpts([op], opts)
2263 job_id = SendJob([op], cl=cl)
2265 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2268 return op_results[0]
2271 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2272 """Wrapper around SubmitOpCode or SendJob.
2274 This function will decide, based on the 'opts' parameter, whether to
2275 submit and wait for the result of the opcode (and return it), or
2276 whether to just send the job and print its identifier. It is used in
2277 order to simplify the implementation of the '--submit' option.
2279 It will also process the opcodes if we're sending the via SendJob
2280 (otherwise SubmitOpCode does it).
2283 if opts and opts.submit_only:
2285 SetGenericOpcodeOpts(job, opts)
2286 job_id = SendJob(job, cl=cl)
2287 raise JobSubmittedException(job_id)
2289 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2292 def _InitReasonTrail(op, opts):
2293 """Builds the first part of the reason trail
2295 Builds the initial part of the reason trail, adding the user provided reason
2296 (if it exists) and the name of the command starting the operation.
2298 @param op: the opcode the reason trail will be added to
2299 @param opts: the command line options selected by the user
2302 assert len(sys.argv) >= 2
2306 trail.append((constants.OPCODE_REASON_SRC_USER,
2310 binary = os.path.basename(sys.argv[0])
2311 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2312 command = sys.argv[1]
2313 trail.append((source, command, utils.EpochNano()))
2317 def SetGenericOpcodeOpts(opcode_list, options):
2318 """Processor for generic options.
2320 This function updates the given opcodes based on generic command
2321 line options (like debug, dry-run, etc.).
2323 @param opcode_list: list of opcodes
2324 @param options: command line options or None
2325 @return: None (in-place modification)
2330 for op in opcode_list:
2331 op.debug_level = options.debug
2332 if hasattr(options, "dry_run"):
2333 op.dry_run = options.dry_run
2334 if getattr(options, "priority", None) is not None:
2335 op.priority = options.priority
2336 _InitReasonTrail(op, options)
2339 def GetClient(query=False):
2340 """Connects to the a luxi socket and returns a client.
2342 @type query: boolean
2343 @param query: this signifies that the client will only be
2344 used for queries; if the build-time parameter
2345 enable-split-queries is enabled, then the client will be
2346 connected to the query socket instead of the masterd socket
2349 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2351 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2352 address = pathutils.MASTER_SOCKET
2353 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2354 address = pathutils.QUERY_SOCKET
2356 address = override_socket
2357 elif query and constants.ENABLE_SPLIT_QUERY:
2358 address = pathutils.QUERY_SOCKET
2361 # TODO: Cache object?
2363 client = luxi.Client(address=address)
2364 except luxi.NoMasterError:
2365 ss = ssconf.SimpleStore()
2367 # Try to read ssconf file
2370 except errors.ConfigurationError:
2371 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2372 " not part of a cluster",
2375 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2376 if master != myself:
2377 raise errors.OpPrereqError("This is not the master node, please connect"
2378 " to node '%s' and rerun the command" %
2379 master, errors.ECODE_INVAL)
2384 def FormatError(err):
2385 """Return a formatted error message for a given error.
2387 This function takes an exception instance and returns a tuple
2388 consisting of two values: first, the recommended exit code, and
2389 second, a string describing the error message (not
2390 newline-terminated).
2396 if isinstance(err, errors.ConfigurationError):
2397 txt = "Corrupt configuration file: %s" % msg
2399 obuf.write(txt + "\n")
2400 obuf.write("Aborting.")
2402 elif isinstance(err, errors.HooksAbort):
2403 obuf.write("Failure: hooks execution failed:\n")
2404 for node, script, out in err.args[0]:
2406 obuf.write(" node: %s, script: %s, output: %s\n" %
2407 (node, script, out))
2409 obuf.write(" node: %s, script: %s (no output)\n" %
2411 elif isinstance(err, errors.HooksFailure):
2412 obuf.write("Failure: hooks general failure: %s" % msg)
2413 elif isinstance(err, errors.ResolverError):
2414 this_host = netutils.Hostname.GetSysName()
2415 if err.args[0] == this_host:
2416 msg = "Failure: can't resolve my own hostname ('%s')"
2418 msg = "Failure: can't resolve hostname '%s'"
2419 obuf.write(msg % err.args[0])
2420 elif isinstance(err, errors.OpPrereqError):
2421 if len(err.args) == 2:
2422 obuf.write("Failure: prerequisites not met for this"
2423 " operation:\nerror type: %s, error details:\n%s" %
2424 (err.args[1], err.args[0]))
2426 obuf.write("Failure: prerequisites not met for this"
2427 " operation:\n%s" % msg)
2428 elif isinstance(err, errors.OpExecError):
2429 obuf.write("Failure: command execution error:\n%s" % msg)
2430 elif isinstance(err, errors.TagError):
2431 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2432 elif isinstance(err, errors.JobQueueDrainError):
2433 obuf.write("Failure: the job queue is marked for drain and doesn't"
2434 " accept new requests\n")
2435 elif isinstance(err, errors.JobQueueFull):
2436 obuf.write("Failure: the job queue is full and doesn't accept new"
2437 " job submissions until old jobs are archived\n")
2438 elif isinstance(err, errors.TypeEnforcementError):
2439 obuf.write("Parameter Error: %s" % msg)
2440 elif isinstance(err, errors.ParameterError):
2441 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2442 elif isinstance(err, luxi.NoMasterError):
2443 if err.args[0] == pathutils.MASTER_SOCKET:
2444 daemon = "the master daemon"
2445 elif err.args[0] == pathutils.QUERY_SOCKET:
2446 daemon = "the config daemon"
2448 daemon = "socket '%s'" % str(err.args[0])
2449 obuf.write("Cannot communicate with %s.\nIs the process running"
2450 " and listening for connections?" % daemon)
2451 elif isinstance(err, luxi.TimeoutError):
2452 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2453 " been submitted and will continue to run even if the call"
2454 " timed out. Useful commands in this situation are \"gnt-job"
2455 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2457 elif isinstance(err, luxi.PermissionError):
2458 obuf.write("It seems you don't have permissions to connect to the"
2459 " master daemon.\nPlease retry as a different user.")
2460 elif isinstance(err, luxi.ProtocolError):
2461 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2463 elif isinstance(err, errors.JobLost):
2464 obuf.write("Error checking job status: %s" % msg)
2465 elif isinstance(err, errors.QueryFilterParseError):
2466 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2467 obuf.write("\n".join(err.GetDetails()))
2468 elif isinstance(err, errors.GenericError):
2469 obuf.write("Unhandled Ganeti error: %s" % msg)
2470 elif isinstance(err, JobSubmittedException):
2471 obuf.write("JobID: %s\n" % err.args[0])
2474 obuf.write("Unhandled exception: %s" % msg)
2475 return retcode, obuf.getvalue().rstrip("\n")
2478 def GenericMain(commands, override=None, aliases=None,
2479 env_override=frozenset()):
2480 """Generic main function for all the gnt-* commands.
2482 @param commands: a dictionary with a special structure, see the design doc
2483 for command line handling.
2484 @param override: if not None, we expect a dictionary with keys that will
2485 override command line options; this can be used to pass
2486 options from the scripts to generic functions
2487 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2488 @param env_override: list of environment names which are allowed to submit
2489 default args for commands
2492 # save the program name and the entire command line for later logging
2494 binary = os.path.basename(sys.argv[0])
2496 binary = sys.argv[0]
2498 if len(sys.argv) >= 2:
2499 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2503 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2505 binary = "<unknown program>"
2506 cmdline = "<unknown>"
2512 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2514 except _ShowVersion:
2515 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2516 constants.RELEASE_VERSION)
2517 return constants.EXIT_SUCCESS
2518 except _ShowUsage, err:
2519 for line in _FormatUsage(binary, commands):
2523 return constants.EXIT_FAILURE
2525 return constants.EXIT_SUCCESS
2526 except errors.ParameterError, err:
2527 result, err_msg = FormatError(err)
2531 if func is None: # parse error
2534 if override is not None:
2535 for key, val in override.iteritems():
2536 setattr(options, key, val)
2538 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2539 stderr_logging=True)
2541 logging.info("Command line: %s", cmdline)
2544 result = func(options, args)
2545 except (errors.GenericError, luxi.ProtocolError,
2546 JobSubmittedException), err:
2547 result, err_msg = FormatError(err)
2548 logging.exception("Error during command processing")
2550 except KeyboardInterrupt:
2551 result = constants.EXIT_FAILURE
2552 ToStderr("Aborted. Note that if the operation created any jobs, they"
2553 " might have been submitted and"
2554 " will continue to run in the background.")
2555 except IOError, err:
2556 if err.errno == errno.EPIPE:
2557 # our terminal went away, we'll exit
2558 sys.exit(constants.EXIT_FAILURE)
2565 def ParseNicOption(optvalue):
2566 """Parses the value of the --net option(s).
2570 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2571 except (TypeError, ValueError), err:
2572 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2575 nics = [{}] * nic_max
2576 for nidx, ndict in optvalue:
2579 if not isinstance(ndict, dict):
2580 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2581 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2583 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2590 def FixHvParams(hvparams):
2591 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2592 # comma to space because commas cannot be accepted on the command line
2593 # (they already act as the separator between different hvparams). Still,
2594 # RAPI should be able to accept commas for backwards compatibility.
2595 # Therefore, we convert spaces into commas here, and we keep the old
2596 # parsing logic everywhere else.
2598 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2599 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2601 #No usb_devices, no modification required
2605 def GenericInstanceCreate(mode, opts, args):
2606 """Add an instance to the cluster via either creation or import.
2608 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2609 @param opts: the command line options selected by the user
2611 @param args: should contain only one element, the new instance name
2613 @return: the desired exit code
2618 (pnode, snode) = SplitNodeOption(opts.node)
2623 hypervisor, hvparams = opts.hypervisor
2626 nics = ParseNicOption(opts.nics)
2630 elif mode == constants.INSTANCE_CREATE:
2631 # default of one nic, all auto
2637 if opts.disk_template == constants.DT_DISKLESS:
2638 if opts.disks or opts.sd_size is not None:
2639 raise errors.OpPrereqError("Diskless instance but disk"
2640 " information passed", errors.ECODE_INVAL)
2643 if (not opts.disks and not opts.sd_size
2644 and mode == constants.INSTANCE_CREATE):
2645 raise errors.OpPrereqError("No disk information specified",
2647 if opts.disks and opts.sd_size is not None:
2648 raise errors.OpPrereqError("Please use either the '--disk' or"
2649 " '-s' option", errors.ECODE_INVAL)
2650 if opts.sd_size is not None:
2651 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2655 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2656 except ValueError, err:
2657 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2659 disks = [{}] * disk_max
2662 for didx, ddict in opts.disks:
2664 if not isinstance(ddict, dict):
2665 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2666 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2667 elif constants.IDISK_SIZE in ddict:
2668 if constants.IDISK_ADOPT in ddict:
2669 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2670 " (disk %d)" % didx, errors.ECODE_INVAL)
2672 ddict[constants.IDISK_SIZE] = \
2673 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2674 except ValueError, err:
2675 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2676 (didx, err), errors.ECODE_INVAL)
2677 elif constants.IDISK_ADOPT in ddict:
2678 if mode == constants.INSTANCE_IMPORT:
2679 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2680 " import", errors.ECODE_INVAL)
2681 ddict[constants.IDISK_SIZE] = 0
2683 raise errors.OpPrereqError("Missing size or adoption source for"
2684 " disk %d" % didx, errors.ECODE_INVAL)
2687 if opts.tags is not None:
2688 tags = opts.tags.split(",")
2692 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2693 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2694 FixHvParams(hvparams)
2696 if mode == constants.INSTANCE_CREATE:
2699 force_variant = opts.force_variant
2702 no_install = opts.no_install
2703 identify_defaults = False
2704 elif mode == constants.INSTANCE_IMPORT:
2707 force_variant = False
2708 src_node = opts.src_node
2709 src_path = opts.src_dir
2711 identify_defaults = opts.identify_defaults
2713 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2715 op = opcodes.OpInstanceCreate(instance_name=instance,
2717 disk_template=opts.disk_template,
2719 conflicts_check=opts.conflicts_check,
2720 pnode=pnode, snode=snode,
2721 ip_check=opts.ip_check,
2722 name_check=opts.name_check,
2723 wait_for_sync=opts.wait_for_sync,
2724 file_storage_dir=opts.file_storage_dir,
2725 file_driver=opts.file_driver,
2726 iallocator=opts.iallocator,
2727 hypervisor=hypervisor,
2729 beparams=opts.beparams,
2730 osparams=opts.osparams,
2734 force_variant=force_variant,
2738 no_install=no_install,
2739 identify_defaults=identify_defaults,
2740 ignore_ipolicy=opts.ignore_ipolicy)
2742 SubmitOrSend(op, opts)
2746 class _RunWhileClusterStoppedHelper:
2747 """Helper class for L{RunWhileClusterStopped} to simplify state management
2750 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2751 """Initializes this class.
2753 @type feedback_fn: callable
2754 @param feedback_fn: Feedback function
2755 @type cluster_name: string
2756 @param cluster_name: Cluster name
2757 @type master_node: string
2758 @param master_node Master node name
2759 @type online_nodes: list
2760 @param online_nodes: List of names of online nodes
2763 self.feedback_fn = feedback_fn
2764 self.cluster_name = cluster_name
2765 self.master_node = master_node
2766 self.online_nodes = online_nodes
2768 self.ssh = ssh.SshRunner(self.cluster_name)
2770 self.nonmaster_nodes = [name for name in online_nodes
2771 if name != master_node]
2773 assert self.master_node not in self.nonmaster_nodes
2775 def _RunCmd(self, node_name, cmd):
2776 """Runs a command on the local or a remote machine.
2778 @type node_name: string
2779 @param node_name: Machine name
2784 if node_name is None or node_name == self.master_node:
2785 # No need to use SSH
2786 result = utils.RunCmd(cmd)
2788 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2789 utils.ShellQuoteArgs(cmd))
2792 errmsg = ["Failed to run command %s" % result.cmd]
2794 errmsg.append("on node %s" % node_name)
2795 errmsg.append(": exitcode %s and error %s" %
2796 (result.exit_code, result.output))
2797 raise errors.OpExecError(" ".join(errmsg))
2799 def Call(self, fn, *args):
2800 """Call function while all daemons are stopped.
2803 @param fn: Function to be called
2806 # Pause watcher by acquiring an exclusive lock on watcher state file
2807 self.feedback_fn("Blocking watcher")
2808 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2810 # TODO: Currently, this just blocks. There's no timeout.
2811 # TODO: Should it be a shared lock?
2812 watcher_block.Exclusive(blocking=True)
2814 # Stop master daemons, so that no new jobs can come in and all running
2816 self.feedback_fn("Stopping master daemons")
2817 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2819 # Stop daemons on all nodes
2820 for node_name in self.online_nodes:
2821 self.feedback_fn("Stopping daemons on %s" % node_name)
2822 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2824 # All daemons are shut down now
2826 return fn(self, *args)
2827 except Exception, err:
2828 _, errmsg = FormatError(err)
2829 logging.exception("Caught exception")
2830 self.feedback_fn(errmsg)
2833 # Start cluster again, master node last
2834 for node_name in self.nonmaster_nodes + [self.master_node]:
2835 self.feedback_fn("Starting daemons on %s" % node_name)
2836 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2839 watcher_block.Close()
2842 def RunWhileClusterStopped(feedback_fn, fn, *args):
2843 """Calls a function while all cluster daemons are stopped.
2845 @type feedback_fn: callable
2846 @param feedback_fn: Feedback function
2848 @param fn: Function to be called when daemons are stopped
2851 feedback_fn("Gathering cluster information")
2853 # This ensures we're running on the master daemon
2856 (cluster_name, master_node) = \
2857 cl.QueryConfigValues(["cluster_name", "master_node"])
2859 online_nodes = GetOnlineNodes([], cl=cl)
2861 # Don't keep a reference to the client. The master daemon will go away.
2864 assert master_node in online_nodes
2866 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2867 online_nodes).Call(fn, *args)
2870 def GenerateTable(headers, fields, separator, data,
2871 numfields=None, unitfields=None,
2873 """Prints a table with headers and different fields.
2876 @param headers: dictionary mapping field names to headers for
2879 @param fields: the field names corresponding to each row in
2881 @param separator: the separator to be used; if this is None,
2882 the default 'smart' algorithm is used which computes optimal
2883 field width, otherwise just the separator is used between
2886 @param data: a list of lists, each sublist being one row to be output
2887 @type numfields: list
2888 @param numfields: a list with the fields that hold numeric
2889 values and thus should be right-aligned
2890 @type unitfields: list
2891 @param unitfields: a list with the fields that hold numeric
2892 values that should be formatted with the units field
2893 @type units: string or None
2894 @param units: the units we should use for formatting, or None for
2895 automatic choice (human-readable for non-separator usage, otherwise
2896 megabytes); this is a one-letter string
2905 if numfields is None:
2907 if unitfields is None:
2910 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2911 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2914 for field in fields:
2915 if headers and field not in headers:
2916 # TODO: handle better unknown fields (either revert to old
2917 # style of raising exception, or deal more intelligently with
2919 headers[field] = field
2920 if separator is not None:
2921 format_fields.append("%s")
2922 elif numfields.Matches(field):
2923 format_fields.append("%*s")
2925 format_fields.append("%-*s")
2927 if separator is None:
2928 mlens = [0 for name in fields]
2929 format_str = " ".join(format_fields)
2931 format_str = separator.replace("%", "%%").join(format_fields)
2936 for idx, val in enumerate(row):
2937 if unitfields.Matches(fields[idx]):
2940 except (TypeError, ValueError):
2943 val = row[idx] = utils.FormatUnit(val, units)
2944 val = row[idx] = str(val)
2945 if separator is None:
2946 mlens[idx] = max(mlens[idx], len(val))
2951 for idx, name in enumerate(fields):
2953 if separator is None:
2954 mlens[idx] = max(mlens[idx], len(hdr))
2955 args.append(mlens[idx])
2957 result.append(format_str % tuple(args))
2959 if separator is None:
2960 assert len(mlens) == len(fields)
2962 if fields and not numfields.Matches(fields[-1]):
2968 line = ["-" for _ in fields]
2969 for idx in range(len(fields)):
2970 if separator is None:
2971 args.append(mlens[idx])
2972 args.append(line[idx])
2973 result.append(format_str % tuple(args))
2978 def _FormatBool(value):
2979 """Formats a boolean value as a string.
2987 #: Default formatting for query results; (callback, align right)
2988 _DEFAULT_FORMAT_QUERY = {
2989 constants.QFT_TEXT: (str, False),
2990 constants.QFT_BOOL: (_FormatBool, False),
2991 constants.QFT_NUMBER: (str, True),
2992 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2993 constants.QFT_OTHER: (str, False),
2994 constants.QFT_UNKNOWN: (str, False),
2998 def _GetColumnFormatter(fdef, override, unit):
2999 """Returns formatting function for a field.
3001 @type fdef: L{objects.QueryFieldDefinition}
3002 @type override: dict
3003 @param override: Dictionary for overriding field formatting functions,
3004 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3006 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3007 @rtype: tuple; (callable, bool)
3008 @return: Returns the function to format a value (takes one parameter) and a
3009 boolean for aligning the value on the right-hand side
3012 fmt = override.get(fdef.name, None)
3016 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3018 if fdef.kind == constants.QFT_UNIT:
3019 # Can't keep this information in the static dictionary
3020 return (lambda value: utils.FormatUnit(value, unit), True)
3022 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3026 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3029 class _QueryColumnFormatter:
3030 """Callable class for formatting fields of a query.
3033 def __init__(self, fn, status_fn, verbose):
3034 """Initializes this class.
3037 @param fn: Formatting function
3038 @type status_fn: callable
3039 @param status_fn: Function to report fields' status
3040 @type verbose: boolean
3041 @param verbose: whether to use verbose field descriptions or not
3045 self._status_fn = status_fn
3046 self._verbose = verbose
3048 def __call__(self, data):
3049 """Returns a field's string representation.
3052 (status, value) = data
3055 self._status_fn(status)
3057 if status == constants.RS_NORMAL:
3058 return self._fn(value)
3060 assert value is None, \
3061 "Found value %r for abnormal status %s" % (value, status)
3063 return FormatResultError(status, self._verbose)
3066 def FormatResultError(status, verbose):
3067 """Formats result status other than L{constants.RS_NORMAL}.
3069 @param status: The result status
3070 @type verbose: boolean
3071 @param verbose: Whether to return the verbose text
3072 @return: Text of result status
3075 assert status != constants.RS_NORMAL, \
3076 "FormatResultError called with status equal to constants.RS_NORMAL"
3078 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3080 raise NotImplementedError("Unknown status %s" % status)
3087 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3088 header=False, verbose=False):
3089 """Formats data in L{objects.QueryResponse}.
3091 @type result: L{objects.QueryResponse}
3092 @param result: result of query operation
3094 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3095 see L{utils.text.FormatUnit}
3096 @type format_override: dict
3097 @param format_override: Dictionary for overriding field formatting functions,
3098 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3099 @type separator: string or None
3100 @param separator: String used to separate fields
3102 @param header: Whether to output header row
3103 @type verbose: boolean
3104 @param verbose: whether to use verbose field descriptions or not
3113 if format_override is None:
3114 format_override = {}
3116 stats = dict.fromkeys(constants.RS_ALL, 0)
3118 def _RecordStatus(status):
3123 for fdef in result.fields:
3124 assert fdef.title and fdef.name
3125 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3126 columns.append(TableColumn(fdef.title,
3127 _QueryColumnFormatter(fn, _RecordStatus,
3131 table = FormatTable(result.data, columns, header, separator)
3133 # Collect statistics
3134 assert len(stats) == len(constants.RS_ALL)
3135 assert compat.all(count >= 0 for count in stats.values())
3137 # Determine overall status. If there was no data, unknown fields must be
3138 # detected via the field definitions.
3139 if (stats[constants.RS_UNKNOWN] or
3140 (not result.data and _GetUnknownFields(result.fields))):
3142 elif compat.any(count > 0 for key, count in stats.items()
3143 if key != constants.RS_NORMAL):
3144 status = QR_INCOMPLETE
3148 return (status, table)
3151 def _GetUnknownFields(fdefs):
3152 """Returns list of unknown fields included in C{fdefs}.
3154 @type fdefs: list of L{objects.QueryFieldDefinition}
3157 return [fdef for fdef in fdefs
3158 if fdef.kind == constants.QFT_UNKNOWN]
3161 def _WarnUnknownFields(fdefs):
3162 """Prints a warning to stderr if a query included unknown fields.
3164 @type fdefs: list of L{objects.QueryFieldDefinition}
3167 unknown = _GetUnknownFields(fdefs)
3169 ToStderr("Warning: Queried for unknown fields %s",
3170 utils.CommaJoin(fdef.name for fdef in unknown))
3176 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3177 format_override=None, verbose=False, force_filter=False,
3178 namefield=None, qfilter=None, isnumeric=False):
3179 """Generic implementation for listing all items of a resource.
3181 @param resource: One of L{constants.QR_VIA_LUXI}
3182 @type fields: list of strings
3183 @param fields: List of fields to query for
3184 @type names: list of strings
3185 @param names: Names of items to query for
3186 @type unit: string or None
3187 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3188 None for automatic choice (human-readable for non-separator usage,
3189 otherwise megabytes); this is a one-letter string
3190 @type separator: string or None
3191 @param separator: String used to separate fields
3193 @param header: Whether to show header row
3194 @type force_filter: bool
3195 @param force_filter: Whether to always treat names as filter
3196 @type format_override: dict
3197 @param format_override: Dictionary for overriding field formatting functions,
3198 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3199 @type verbose: boolean
3200 @param verbose: whether to use verbose field descriptions or not
3201 @type namefield: string
3202 @param namefield: Name of field to use for simple filters (see
3203 L{qlang.MakeFilter} for details)
3204 @type qfilter: list or None
3205 @param qfilter: Query filter (in addition to names)
3206 @param isnumeric: bool
3207 @param isnumeric: Whether the namefield's type is numeric, and therefore
3208 any simple filters built by namefield should use integer values to
3215 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3216 isnumeric=isnumeric)
3219 qfilter = namefilter
3220 elif namefilter is not None:
3221 qfilter = [qlang.OP_AND, namefilter, qfilter]
3226 response = cl.Query(resource, fields, qfilter)
3228 found_unknown = _WarnUnknownFields(response.fields)
3230 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3232 format_override=format_override,
3238 assert ((found_unknown and status == QR_UNKNOWN) or
3239 (not found_unknown and status != QR_UNKNOWN))
3241 if status == QR_UNKNOWN:
3242 return constants.EXIT_UNKNOWN_FIELD
3244 # TODO: Should the list command fail if not all data could be collected?
3245 return constants.EXIT_SUCCESS
3248 def _FieldDescValues(fdef):
3249 """Helper function for L{GenericListFields} to get query field description.
3251 @type fdef: L{objects.QueryFieldDefinition}
3257 _QFT_NAMES.get(fdef.kind, fdef.kind),
3263 def GenericListFields(resource, fields, separator, header, cl=None):
3264 """Generic implementation for listing fields for a resource.
3266 @param resource: One of L{constants.QR_VIA_LUXI}
3267 @type fields: list of strings
3268 @param fields: List of fields to query for
3269 @type separator: string or None
3270 @param separator: String used to separate fields
3272 @param header: Whether to show header row
3281 response = cl.QueryFields(resource, fields)
3283 found_unknown = _WarnUnknownFields(response.fields)
3286 TableColumn("Name", str, False),
3287 TableColumn("Type", str, False),
3288 TableColumn("Title", str, False),
3289 TableColumn("Description", str, False),
3292 rows = map(_FieldDescValues, response.fields)
3294 for line in FormatTable(rows, columns, header, separator):
3298 return constants.EXIT_UNKNOWN_FIELD
3300 return constants.EXIT_SUCCESS
3304 """Describes a column for L{FormatTable}.
3307 def __init__(self, title, fn, align_right):
3308 """Initializes this class.
3311 @param title: Column title
3313 @param fn: Formatting function
3314 @type align_right: bool
3315 @param align_right: Whether to align values on the right-hand side
3320 self.align_right = align_right
3323 def _GetColFormatString(width, align_right):
3324 """Returns the format string for a field.
3332 return "%%%s%ss" % (sign, width)
3335 def FormatTable(rows, columns, header, separator):
3336 """Formats data as a table.
3338 @type rows: list of lists
3339 @param rows: Row data, one list per row
3340 @type columns: list of L{TableColumn}
3341 @param columns: Column descriptions
3343 @param header: Whether to show header row
3344 @type separator: string or None
3345 @param separator: String used to separate columns
3349 data = [[col.title for col in columns]]
3350 colwidth = [len(col.title) for col in columns]
3353 colwidth = [0 for _ in columns]
3357 assert len(row) == len(columns)
3359 formatted = [col.format(value) for value, col in zip(row, columns)]
3361 if separator is None:
3362 # Update column widths
3363 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3364 # Modifying a list's items while iterating is fine
3365 colwidth[idx] = max(oldwidth, len(value))
3367 data.append(formatted)
3369 if separator is not None:
3370 # Return early if a separator is used
3371 return [separator.join(row) for row in data]
3373 if columns and not columns[-1].align_right:
3374 # Avoid unnecessary spaces at end of line
3377 # Build format string
3378 fmt = " ".join([_GetColFormatString(width, col.align_right)
3379 for col, width in zip(columns, colwidth)])
3381 return [fmt % tuple(row) for row in data]
3384 def FormatTimestamp(ts):
3385 """Formats a given timestamp.
3388 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3391 @return: a string with the formatted timestamp
3394 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3398 return utils.FormatTime(sec, usecs=usecs)
3401 def ParseTimespec(value):
3402 """Parse a time specification.
3404 The following suffixed will be recognized:
3412 Without any suffix, the value will be taken to be in seconds.
3417 raise errors.OpPrereqError("Empty time specification passed",
3426 if value[-1] not in suffix_map:
3429 except (TypeError, ValueError):
3430 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3433 multiplier = suffix_map[value[-1]]
3435 if not value: # no data left after stripping the suffix
3436 raise errors.OpPrereqError("Invalid time specification (only"
3437 " suffix passed)", errors.ECODE_INVAL)
3439 value = int(value) * multiplier
3440 except (TypeError, ValueError):
3441 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3446 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3447 filter_master=False, nodegroup=None):
3448 """Returns the names of online nodes.
3450 This function will also log a warning on stderr with the names of
3453 @param nodes: if not empty, use only this subset of nodes (minus the
3455 @param cl: if not None, luxi client to use
3456 @type nowarn: boolean
3457 @param nowarn: by default, this function will output a note with the
3458 offline nodes that are skipped; if this parameter is True the
3459 note is not displayed
3460 @type secondary_ips: boolean
3461 @param secondary_ips: if True, return the secondary IPs instead of the
3462 names, useful for doing network traffic over the replication interface
3464 @type filter_master: boolean
3465 @param filter_master: if True, do not return the master node in the list
3466 (useful in coordination with secondary_ips where we cannot check our
3467 node name against the list)
3468 @type nodegroup: string
3469 @param nodegroup: If set, only return nodes in this node group
3478 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3480 if nodegroup is not None:
3481 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3482 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3485 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3488 if len(qfilter) > 1:
3489 final_filter = [qlang.OP_AND] + qfilter
3491 assert len(qfilter) == 1
3492 final_filter = qfilter[0]
3496 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3498 def _IsOffline(row):
3499 (_, (_, offline), _) = row
3503 ((_, name), _, _) = row
3507 (_, _, (_, sip)) = row
3510 (offline, online) = compat.partition(result.data, _IsOffline)
3512 if offline and not nowarn:
3513 ToStderr("Note: skipping offline node(s): %s" %
3514 utils.CommaJoin(map(_GetName, offline)))
3521 return map(fn, online)
3524 def _ToStream(stream, txt, *args):
3525 """Write a message to a stream, bypassing the logging system
3527 @type stream: file object
3528 @param stream: the file to which we should write
3530 @param txt: the message
3536 stream.write(txt % args)
3541 except IOError, err:
3542 if err.errno == errno.EPIPE:
3543 # our terminal went away, we'll exit
3544 sys.exit(constants.EXIT_FAILURE)
3549 def ToStdout(txt, *args):
3550 """Write a message to stdout only, bypassing the logging system
3552 This is just a wrapper over _ToStream.
3555 @param txt: the message
3558 _ToStream(sys.stdout, txt, *args)
3561 def ToStderr(txt, *args):
3562 """Write a message to stderr only, bypassing the logging system
3564 This is just a wrapper over _ToStream.
3567 @param txt: the message
3570 _ToStream(sys.stderr, txt, *args)
3573 class JobExecutor(object):
3574 """Class which manages the submission and execution of multiple jobs.
3576 Note that instances of this class should not be reused between
3580 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3585 self.verbose = verbose
3588 self.feedback_fn = feedback_fn
3589 self._counter = itertools.count()
3592 def _IfName(name, fmt):
3593 """Helper function for formatting name.
3601 def QueueJob(self, name, *ops):
3602 """Record a job for later submit.
3605 @param name: a description of the job, will be used in WaitJobSet
3608 SetGenericOpcodeOpts(ops, self.opts)
3609 self.queue.append((self._counter.next(), name, ops))
3611 def AddJobId(self, name, status, job_id):
3612 """Adds a job ID to the internal queue.
3615 self.jobs.append((self._counter.next(), status, job_id, name))
3617 def SubmitPending(self, each=False):
3618 """Submit all pending jobs.
3623 for (_, _, ops) in self.queue:
3624 # SubmitJob will remove the success status, but raise an exception if
3625 # the submission fails, so we'll notice that anyway.
3626 results.append([True, self.cl.SubmitJob(ops)[0]])
3628 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3629 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3630 self.jobs.append((idx, status, data, name))
3632 def _ChooseJob(self):
3633 """Choose a non-waiting/queued job to poll next.
3636 assert self.jobs, "_ChooseJob called with empty job list"
3638 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3642 for job_data, status in zip(self.jobs, result):
3643 if (isinstance(status, list) and status and
3644 status[0] in (constants.JOB_STATUS_QUEUED,
3645 constants.JOB_STATUS_WAITING,
3646 constants.JOB_STATUS_CANCELING)):
3647 # job is still present and waiting
3649 # good candidate found (either running job or lost job)
3650 self.jobs.remove(job_data)
3654 return self.jobs.pop(0)
3656 def GetResults(self):
3657 """Wait for and return the results of all jobs.
3660 @return: list of tuples (success, job results), in the same order
3661 as the submitted jobs; if a job has failed, instead of the result
3662 there will be the error message
3666 self.SubmitPending()
3669 ok_jobs = [row[2] for row in self.jobs if row[1]]
3671 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3673 # first, remove any non-submitted jobs
3674 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3675 for idx, _, jid, name in failures:
3676 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3677 results.append((idx, False, jid))
3680 (idx, _, jid, name) = self._ChooseJob()
3681 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3683 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3685 except errors.JobLost, err:
3686 _, job_result = FormatError(err)
3687 ToStderr("Job %s%s has been archived, cannot check its result",
3688 jid, self._IfName(name, " for %s"))
3690 except (errors.GenericError, luxi.ProtocolError), err:
3691 _, job_result = FormatError(err)
3693 # the error message will always be shown, verbose or not
3694 ToStderr("Job %s%s has failed: %s",
3695 jid, self._IfName(name, " for %s"), job_result)
3697 results.append((idx, success, job_result))
3699 # sort based on the index, then drop it
3701 results = [i[1:] for i in results]
3705 def WaitOrShow(self, wait):
3706 """Wait for job results or only print the job IDs.
3709 @param wait: whether to wait or not
3713 return self.GetResults()
3716 self.SubmitPending()
3717 for _, status, result, name in self.jobs:
3719 ToStdout("%s: %s", result, name)
3721 ToStderr("Failure for %s: %s", name, result)
3722 return [row[1:3] for row in self.jobs]
3725 def FormatParamsDictInfo(param_dict, actual):
3726 """Formats a parameter dictionary.
3728 @type param_dict: dict
3729 @param param_dict: the own parameters
3731 @param actual: the current parameter set (including defaults)
3733 @return: dictionary where the value of each parameter is either a fully
3734 formatted string or a dictionary containing formatted strings
3738 for (key, data) in actual.items():
3739 if isinstance(data, dict) and data:
3740 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3742 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3746 def _FormatListInfoDefault(data, def_data):
3747 if data is not None:
3748 ret = utils.CommaJoin(data)
3750 ret = "default (%s)" % utils.CommaJoin(def_data)
3754 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3755 """Formats an instance policy.
3757 @type custom_ipolicy: dict
3758 @param custom_ipolicy: own policy
3759 @type eff_ipolicy: dict
3760 @param eff_ipolicy: effective policy (including defaults); ignored for
3762 @type iscluster: bool
3763 @param iscluster: the policy is at cluster level
3764 @rtype: list of pairs
3765 @return: formatted data, suitable for L{PrintGenericInfo}
3769 eff_ipolicy = custom_ipolicy
3772 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3774 for (k, minmax) in enumerate(custom_minmax):
3776 ("%s/%s" % (key, k),
3777 FormatParamsDictInfo(minmax[key], minmax[key]))
3778 for key in constants.ISPECS_MINMAX_KEYS
3781 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3783 ("%s/%s" % (key, k),
3784 FormatParamsDictInfo({}, minmax[key]))
3785 for key in constants.ISPECS_MINMAX_KEYS
3787 ret = [("bounds specs", minmax_out)]
3790 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3792 (constants.ISPECS_STD,
3793 FormatParamsDictInfo(stdspecs, stdspecs))
3797 ("allowed disk templates",
3798 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3799 eff_ipolicy[constants.IPOLICY_DTS]))
3802 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3803 for key in constants.IPOLICY_PARAMETERS
3808 def _PrintSpecsParameters(buf, specs):
3809 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3810 buf.write(",".join(values))
3813 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3814 """Print the command option used to generate the given instance policy.
3816 Currently only the parts dealing with specs are supported.
3819 @param buf: stream to write into
3821 @param ipolicy: instance policy
3823 @param isgroup: whether the policy is at group level
3827 stdspecs = ipolicy.get("std")
3829 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3830 _PrintSpecsParameters(buf, stdspecs)
3831 minmaxes = ipolicy.get("minmax", [])
3833 for minmax in minmaxes:
3834 minspecs = minmax.get("min")
3835 maxspecs = minmax.get("max")
3836 if minspecs and maxspecs:
3838 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3843 _PrintSpecsParameters(buf, minspecs)
3845 _PrintSpecsParameters(buf, maxspecs)
3848 def ConfirmOperation(names, list_type, text, extra=""):
3849 """Ask the user to confirm an operation on a list of list_type.
3851 This function is used to request confirmation for doing an operation
3852 on a given list of list_type.
3855 @param names: the list of names that we display when
3856 we ask for confirmation
3857 @type list_type: str
3858 @param list_type: Human readable name for elements in the list (e.g. nodes)
3860 @param text: the operation that the user should confirm
3862 @return: True or False depending on user's confirmation.
3866 msg = ("The %s will operate on %d %s.\n%s"
3867 "Do you want to continue?" % (text, count, list_type, extra))
3868 affected = (("\nAffected %s:\n" % list_type) +
3869 "\n".join([" %s" % name for name in names]))
3871 choices = [("y", True, "Yes, execute the %s" % text),
3872 ("n", False, "No, abort the %s" % text)]
3875 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3878 question = msg + affected
3880 choice = AskUser(question, choices)
3883 choice = AskUser(msg + affected, choices)
3887 def _MaybeParseUnit(elements):
3888 """Parses and returns an array of potential values with units.
3892 for k, v in elements.items():
3893 if v == constants.VALUE_DEFAULT:
3896 parsed[k] = utils.ParseUnit(v)
3900 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3901 ispecs_disk_count, ispecs_disk_size,
3902 ispecs_nic_count, group_ipolicy, fill_all):
3905 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3906 if ispecs_disk_size:
3907 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3908 except (TypeError, ValueError, errors.UnitParseError), err:
3909 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3911 (ispecs_disk_size, ispecs_mem_size, err),
3914 # prepare ipolicy dict
3915 ispecs_transposed = {
3916 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3917 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3918 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3919 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3920 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3923 # first, check that the values given are correct
3925 forced_type = TISPECS_GROUP_TYPES
3927 forced_type = TISPECS_CLUSTER_TYPES
3928 for specs in ispecs_transposed.values():
3929 assert type(specs) is dict
3930 utils.ForceDictType(specs, forced_type)
3934 constants.ISPECS_MIN: {},
3935 constants.ISPECS_MAX: {},
3936 constants.ISPECS_STD: {},
3938 for (name, specs) in ispecs_transposed.iteritems():
3939 assert name in constants.ISPECS_PARAMETERS
3940 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3941 assert key in ispecs
3942 ispecs[key][name] = val
3944 for key in constants.ISPECS_MINMAX_KEYS:
3947 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3949 minmax_out[key] = ispecs[key]
3950 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3952 ipolicy[constants.ISPECS_STD] = \
3953 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3954 ispecs[constants.ISPECS_STD])
3956 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3959 def _ParseSpecUnit(spec, keyname):
3961 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3964 ret[k] = utils.ParseUnit(ret[k])
3965 except (TypeError, ValueError, errors.UnitParseError), err:
3966 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3967 " specs: %s" % (k, ret[k], keyname, err)),
3972 def _ParseISpec(spec, keyname, required):
3973 ret = _ParseSpecUnit(spec, keyname)
3974 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3975 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3976 if required and missing:
3977 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3978 (keyname, utils.CommaJoin(missing)),
3983 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3985 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3986 len(minmax_ispecs[0]) == 1):
3987 for (key, spec) in minmax_ispecs[0].items():
3988 # This loop is executed exactly once
3989 if key in allowed_values and not spec:
3994 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3995 group_ipolicy, allowed_values):
3996 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3997 if found_allowed is not None:
3998 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3999 elif minmax_ispecs is not None:
4001 for mmpair in minmax_ispecs:
4003 for (key, spec) in mmpair.items():
4004 if key not in constants.ISPECS_MINMAX_KEYS:
4005 msg = "Invalid key in bounds instance specifications: %s" % key
4006 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4007 mmpair_out[key] = _ParseISpec(spec, key, True)
4008 minmax_out.append(mmpair_out)
4009 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4010 if std_ispecs is not None:
4011 assert not group_ipolicy # This is not an option for gnt-group
4012 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4015 def CreateIPolicyFromOpts(ispecs_mem_size=None,
4016 ispecs_cpu_count=None,
4017 ispecs_disk_count=None,
4018 ispecs_disk_size=None,
4019 ispecs_nic_count=None,
4022 ipolicy_disk_templates=None,
4023 ipolicy_vcpu_ratio=None,
4024 ipolicy_spindle_ratio=None,
4025 group_ipolicy=False,
4026 allowed_values=None,
4028 """Creation of instance policy based on command line options.
4030 @param fill_all: whether for cluster policies we should ensure that
4031 all values are filled
4034 assert not (fill_all and allowed_values)
4036 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4037 ispecs_disk_size or ispecs_nic_count)
4038 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4039 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4040 " together with any --ipolicy-xxx-specs option",
4043 ipolicy_out = objects.MakeEmptyIPolicy()
4046 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4047 ispecs_disk_count, ispecs_disk_size,
4048 ispecs_nic_count, group_ipolicy, fill_all)
4049 elif (minmax_ispecs is not None or std_ispecs is not None):
4050 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4051 group_ipolicy, allowed_values)
4053 if ipolicy_disk_templates is not None:
4054 if allowed_values and ipolicy_disk_templates in allowed_values:
4055 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4057 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4058 if ipolicy_vcpu_ratio is not None:
4059 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4060 if ipolicy_spindle_ratio is not None:
4061 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4063 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4065 if not group_ipolicy and fill_all:
4066 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4071 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4072 """Formatting core of L{PrintGenericInfo}.
4074 @param buf: (string) stream to accumulate the result into
4075 @param data: data to format
4077 @param level: depth in the data hierarchy, used for indenting
4078 @type afterkey: bool
4079 @param afterkey: True when we are in the middle of a line after a key (used
4080 to properly add newlines or indentation)
4084 if isinstance(data, dict):
4093 for key in sorted(data):
4095 buf.write(baseind * level)
4100 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4101 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4102 # list of tuples (an ordered dictionary)
4108 for (key, val) in data:
4110 buf.write(baseind * level)
4115 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4116 elif isinstance(data, list):
4127 buf.write(baseind * level)
4131 buf.write(baseind[1:])
4132 _SerializeGenericInfo(buf, item, level + 1)
4134 # This branch should be only taken for strings, but it's practically
4135 # impossible to guarantee that no other types are produced somewhere
4136 buf.write(str(data))
4140 def PrintGenericInfo(data):
4141 """Print information formatted according to the hierarchy.
4143 The output is a valid YAML string.
4145 @param data: the data to print. It's a hierarchical structure whose elements
4147 - dictionaries, where keys are strings and values are of any of the
4149 - lists of pairs (key, value), where key is a string and value is of
4150 any of the types listed here; it's a way to encode ordered
4152 - lists of any of the types listed here
4157 _SerializeGenericInfo(buf, data, 0)
4158 ToStdout(buf.getvalue().rstrip("\n"))