4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
119 "MODIFY_ETCHOSTS_OPT",
123 "NEW_CLUSTER_CERT_OPT",
124 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125 "NEW_CONFD_HMAC_KEY_OPT",
129 "NEW_SPICE_CERT_OPT",
131 "NOCONFLICTSCHECK_OPT",
132 "NODE_FORCE_JOIN_OPT",
134 "NODE_PLACEMENT_OPT",
138 "NODRBD_STORAGE_OPT",
144 "NOMODIFY_ETCHOSTS_OPT",
145 "NOMODIFY_SSH_SETUP_OPT",
149 "NORUNTIME_CHGS_OPT",
152 "NOSSH_KEYCHECK_OPT",
166 "PREALLOC_WIPE_DISKS_OPT",
167 "PRIMARY_IP_VERSION_OPT",
175 "REMOVE_INSTANCE_OPT",
176 "REMOVE_RESERVED_IPS_OPT",
182 "SECONDARY_ONLY_OPT",
187 "SHUTDOWN_TIMEOUT_OPT",
189 "SPECS_CPU_COUNT_OPT",
190 "SPECS_DISK_COUNT_OPT",
191 "SPECS_DISK_SIZE_OPT",
192 "SPECS_MEM_SIZE_OPT",
193 "SPECS_NIC_COUNT_OPT",
195 "IPOLICY_STD_SPECS_OPT",
196 "IPOLICY_DISK_TEMPLATES",
197 "IPOLICY_VCPU_RATIO",
204 "STARTUP_PAUSED_OPT",
213 "USE_EXTERNAL_MIP_SCRIPT",
221 "IGNORE_IPOLICY_OPT",
222 "INSTANCE_POLICY_OPTS",
223 # Generic functions for CLI programs
225 "CreateIPolicyFromOpts",
227 "GenericInstanceCreate",
233 "JobSubmittedException",
235 "RunWhileClusterStopped",
239 # Formatting functions
240 "ToStderr", "ToStdout",
243 "FormatParamsDictInfo",
245 "PrintIPolicyCommand",
255 # command line options support infrastructure
256 "ARGS_MANY_INSTANCES",
259 "ARGS_MANY_NETWORKS",
279 "OPT_COMPL_INST_ADD_NODES",
280 "OPT_COMPL_MANY_NODES",
281 "OPT_COMPL_ONE_IALLOCATOR",
282 "OPT_COMPL_ONE_INSTANCE",
283 "OPT_COMPL_ONE_NODE",
284 "OPT_COMPL_ONE_NODEGROUP",
285 "OPT_COMPL_ONE_NETWORK",
287 "OPT_COMPL_ONE_EXTSTORAGE",
292 "COMMON_CREATE_OPTS",
298 #: Priorities (sorted)
300 ("low", constants.OP_PRIO_LOW),
301 ("normal", constants.OP_PRIO_NORMAL),
302 ("high", constants.OP_PRIO_HIGH),
305 #: Priority dictionary for easier lookup
306 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307 # we migrate to Python 2.6
308 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
310 # Query result status for clients
313 QR_INCOMPLETE) = range(3)
315 #: Maximum batch size for ChooseJob
319 # constants used to create InstancePolicy dictionary
320 TISPECS_GROUP_TYPES = {
321 constants.ISPECS_MIN: constants.VTYPE_INT,
322 constants.ISPECS_MAX: constants.VTYPE_INT,
325 TISPECS_CLUSTER_TYPES = {
326 constants.ISPECS_MIN: constants.VTYPE_INT,
327 constants.ISPECS_MAX: constants.VTYPE_INT,
328 constants.ISPECS_STD: constants.VTYPE_INT,
331 #: User-friendly names for query2 field types
333 constants.QFT_UNKNOWN: "Unknown",
334 constants.QFT_TEXT: "Text",
335 constants.QFT_BOOL: "Boolean",
336 constants.QFT_NUMBER: "Number",
337 constants.QFT_UNIT: "Storage size",
338 constants.QFT_TIMESTAMP: "Timestamp",
339 constants.QFT_OTHER: "Custom",
344 def __init__(self, min=0, max=None): # pylint: disable=W0622
349 return ("<%s min=%s max=%s>" %
350 (self.__class__.__name__, self.min, self.max))
353 class ArgSuggest(_Argument):
354 """Suggesting argument.
356 Value can be any of the ones passed to the constructor.
359 # pylint: disable=W0622
360 def __init__(self, min=0, max=None, choices=None):
361 _Argument.__init__(self, min=min, max=max)
362 self.choices = choices
365 return ("<%s min=%s max=%s choices=%r>" %
366 (self.__class__.__name__, self.min, self.max, self.choices))
369 class ArgChoice(ArgSuggest):
372 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373 but value must be one of the choices.
378 class ArgUnknown(_Argument):
379 """Unknown argument to program (e.g. determined at runtime).
384 class ArgInstance(_Argument):
385 """Instances argument.
390 class ArgNode(_Argument):
396 class ArgNetwork(_Argument):
402 class ArgGroup(_Argument):
403 """Node group argument.
408 class ArgJobId(_Argument):
414 class ArgFile(_Argument):
415 """File path argument.
420 class ArgCommand(_Argument):
426 class ArgHost(_Argument):
432 class ArgOs(_Argument):
438 class ArgExtStorage(_Argument):
439 """ExtStorage argument.
445 ARGS_MANY_INSTANCES = [ArgInstance()]
446 ARGS_MANY_NETWORKS = [ArgNetwork()]
447 ARGS_MANY_NODES = [ArgNode()]
448 ARGS_MANY_GROUPS = [ArgGroup()]
449 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
453 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
457 def _ExtractTagsObject(opts, args):
458 """Extract the tag type object.
460 Note that this function will modify its args parameter.
463 if not hasattr(opts, "tag_type"):
464 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
466 if kind == constants.TAG_CLUSTER:
468 elif kind in (constants.TAG_NODEGROUP,
470 constants.TAG_NETWORK,
471 constants.TAG_INSTANCE):
473 raise errors.OpPrereqError("no arguments passed to the command",
478 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
482 def _ExtendTags(opts, args):
483 """Extend the args if a source file has been given.
485 This function will extend the tags with the contents of the file
486 passed in the 'tags_source' attribute of the opts parameter. A file
487 named '-' will be replaced by stdin.
490 fname = opts.tags_source
496 new_fh = open(fname, "r")
499 # we don't use the nice 'new_data = [line.strip() for line in fh]'
500 # because of python bug 1633941
502 line = new_fh.readline()
505 new_data.append(line.strip())
508 args.extend(new_data)
511 def ListTags(opts, args):
512 """List the tags on a given object.
514 This is a generic implementation that knows how to deal with all
515 three cases of tag objects (cluster, node, instance). The opts
516 argument is expected to contain a tag_type field denoting what
517 object type we work on.
520 kind, name = _ExtractTagsObject(opts, args)
521 cl = GetClient(query=True)
522 result = cl.QueryTags(kind, name)
523 result = list(result)
529 def AddTags(opts, args):
530 """Add tags on a given object.
532 This is a generic implementation that knows how to deal with all
533 three cases of tag objects (cluster, node, instance). The opts
534 argument is expected to contain a tag_type field denoting what
535 object type we work on.
538 kind, name = _ExtractTagsObject(opts, args)
539 _ExtendTags(opts, args)
541 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543 SubmitOrSend(op, opts)
546 def RemoveTags(opts, args):
547 """Remove tags from a given object.
549 This is a generic implementation that knows how to deal with all
550 three cases of tag objects (cluster, node, instance). The opts
551 argument is expected to contain a tag_type field denoting what
552 object type we work on.
555 kind, name = _ExtractTagsObject(opts, args)
556 _ExtendTags(opts, args)
558 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560 SubmitOrSend(op, opts)
563 def check_unit(option, opt, value): # pylint: disable=W0613
564 """OptParsers custom converter for units.
568 return utils.ParseUnit(value)
569 except errors.UnitParseError, err:
570 raise OptionValueError("option %s: %s" % (opt, err))
573 def _SplitKeyVal(opt, data, parse_prefixes):
574 """Convert a KeyVal string into a dict.
576 This function will convert a key=val[,...] string into a dict. Empty
577 values will be converted specially: keys which have the prefix 'no_'
578 will have the value=False and the prefix stripped, keys with the prefix
579 "-" will have value=None and the prefix stripped, and the others will
583 @param opt: a string holding the option name for which we process the
584 data, used in building error messages
586 @param data: a string of the format key=val,key=val,...
587 @type parse_prefixes: bool
588 @param parse_prefixes: whether to handle prefixes specially
590 @return: {key=val, key=val}
591 @raises errors.ParameterError: if there are duplicate keys
596 for elem in utils.UnescapeAndSplit(data, sep=","):
598 key, val = elem.split("=", 1)
600 if elem.startswith(NO_PREFIX):
601 key, val = elem[len(NO_PREFIX):], False
602 elif elem.startswith(UN_PREFIX):
603 key, val = elem[len(UN_PREFIX):], None
605 key, val = elem, True
607 raise errors.ParameterError("Missing value for key '%s' in option %s" %
610 raise errors.ParameterError("Duplicate key '%s' in option %s" %
616 def _SplitIdentKeyVal(opt, value, parse_prefixes):
617 """Helper function to parse "ident:key=val,key=val" options.
620 @param opt: option name, used in error messages
622 @param value: expected to be in the format "ident:key=val,key=val,..."
623 @type parse_prefixes: bool
624 @param parse_prefixes: whether to handle prefixes specially (see
627 @return: (ident, {key=val, key=val})
628 @raises errors.ParameterError: in case of duplicates or other parsing errors
632 ident, rest = value, ""
634 ident, rest = value.split(":", 1)
636 if parse_prefixes and ident.startswith(NO_PREFIX):
638 msg = "Cannot pass options when removing parameter groups: %s" % value
639 raise errors.ParameterError(msg)
640 retval = (ident[len(NO_PREFIX):], False)
641 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
644 msg = "Cannot pass options when removing parameter groups: %s" % value
645 raise errors.ParameterError(msg)
646 retval = (ident[len(UN_PREFIX):], None)
648 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649 retval = (ident, kv_dict)
653 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
654 """Custom parser for ident:key=val,key=val options.
656 This will store the parsed values as a tuple (ident, {key: val}). As such,
657 multiple uses of this option via action=append is possible.
660 return _SplitIdentKeyVal(opt, value, True)
663 def check_key_val(option, opt, value): # pylint: disable=W0613
664 """Custom parser class for key=val,key=val options.
666 This will store the parsed values as a dict {key: val}.
669 return _SplitKeyVal(opt, value, True)
672 def _SplitListKeyVal(opt, value):
674 for elem in value.split("/"):
676 raise errors.ParameterError("Empty section in option '%s'" % opt)
677 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
679 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
681 raise errors.ParameterError(msg)
682 retval[ident] = valdict
686 def check_multilist_ident_key_val(_, opt, value):
687 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
689 @rtype: list of dictionary
690 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
694 for line in value.split("//"):
695 retval.append(_SplitListKeyVal(opt, line))
699 def check_bool(option, opt, value): # pylint: disable=W0613
700 """Custom parser for yes/no options.
702 This will store the parsed value as either True or False.
705 value = value.lower()
706 if value == constants.VALUE_FALSE or value == "no":
708 elif value == constants.VALUE_TRUE or value == "yes":
711 raise errors.ParameterError("Invalid boolean value '%s'" % value)
714 def check_list(option, opt, value): # pylint: disable=W0613
715 """Custom parser for comma-separated lists.
718 # we have to make this explicit check since "".split(",") is [""],
719 # not an empty list :(
723 return utils.UnescapeAndSplit(value)
726 def check_maybefloat(option, opt, value): # pylint: disable=W0613
727 """Custom parser for float numbers which might be also defaults.
730 value = value.lower()
732 if value == constants.VALUE_DEFAULT:
738 # completion_suggestion is normally a list. Using numeric values not evaluating
739 # to False for dynamic completion.
740 (OPT_COMPL_MANY_NODES,
742 OPT_COMPL_ONE_INSTANCE,
744 OPT_COMPL_ONE_EXTSTORAGE,
745 OPT_COMPL_ONE_IALLOCATOR,
746 OPT_COMPL_ONE_NETWORK,
747 OPT_COMPL_INST_ADD_NODES,
748 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
750 OPT_COMPL_ALL = compat.UniqueFrozenset([
751 OPT_COMPL_MANY_NODES,
753 OPT_COMPL_ONE_INSTANCE,
755 OPT_COMPL_ONE_EXTSTORAGE,
756 OPT_COMPL_ONE_IALLOCATOR,
757 OPT_COMPL_ONE_NETWORK,
758 OPT_COMPL_INST_ADD_NODES,
759 OPT_COMPL_ONE_NODEGROUP,
763 class CliOption(Option):
764 """Custom option class for optparse.
767 ATTRS = Option.ATTRS + [
768 "completion_suggest",
770 TYPES = Option.TYPES + (
771 "multilistidentkeyval",
779 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781 TYPE_CHECKER["identkeyval"] = check_ident_key_val
782 TYPE_CHECKER["keyval"] = check_key_val
783 TYPE_CHECKER["unit"] = check_unit
784 TYPE_CHECKER["bool"] = check_bool
785 TYPE_CHECKER["list"] = check_list
786 TYPE_CHECKER["maybefloat"] = check_maybefloat
789 # optparse.py sets make_option, so we do it for our own option class, too
790 cli_option = CliOption
795 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796 help="Increase debugging level")
798 NOHDR_OPT = cli_option("--no-headers", default=False,
799 action="store_true", dest="no_headers",
800 help="Don't display column headers")
802 SEP_OPT = cli_option("--separator", default=None,
803 action="store", dest="separator",
804 help=("Separator between output fields"
805 " (defaults to one space)"))
807 USEUNITS_OPT = cli_option("--units", default=None,
808 dest="units", choices=("h", "m", "g", "t"),
809 help="Specify units for output (one of h/m/g/t)")
811 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812 type="string", metavar="FIELDS",
813 help="Comma separated list of output fields")
815 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816 default=False, help="Force the operation")
818 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819 default=False, help="Do not require confirmation")
821 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822 action="store_true", default=False,
823 help=("Ignore offline nodes and do as much"
826 TAG_ADD_OPT = cli_option("--tags", dest="tags",
827 default=None, help="Comma-separated list of instance"
830 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831 default=None, help="File with tag names")
833 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834 default=False, action="store_true",
835 help=("Submit the job and return the job ID, but"
836 " don't wait for the job to finish"))
838 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
839 default=False, action="store_true",
840 help=("Additionally print the job as first line"
841 " on stdout (for scripting)."))
843 SYNC_OPT = cli_option("--sync", dest="do_locking",
844 default=False, action="store_true",
845 help=("Grab locks while doing the queries"
846 " in order to ensure more consistent results"))
848 DRY_RUN_OPT = cli_option("--dry-run", default=False,
850 help=("Do not execute the operation, just run the"
851 " check steps and verify if it could be"
854 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
856 help="Increase the verbosity of the operation")
858 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
859 action="store_true", dest="simulate_errors",
860 help="Debugging option that makes the operation"
861 " treat most runtime checks as failed")
863 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
864 default=True, action="store_false",
865 help="Don't wait for sync (DANGEROUS!)")
867 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
868 default=False, action="store_true",
869 help="Wait for disks to sync")
871 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
872 action="store_true", default=False,
873 help="Enable offline instance")
875 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
876 action="store_true", default=False,
877 help="Disable down instance")
879 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
880 help=("Custom disk setup (%s)" %
881 utils.CommaJoin(constants.DISK_TEMPLATES)),
882 default=None, metavar="TEMPL",
883 choices=list(constants.DISK_TEMPLATES))
885 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
886 help="Do not create any network cards for"
889 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
890 help="Relative path under default cluster-wide"
891 " file storage dir to store file-based disks",
892 default=None, metavar="<DIR>")
894 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
895 help="Driver to use for image files",
896 default=None, metavar="<DRIVER>",
897 choices=list(constants.FILE_DRIVER))
899 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
900 help="Select nodes for the instance automatically"
901 " using the <NAME> iallocator plugin",
902 default=None, type="string",
903 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
905 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
907 help="Set the default instance"
909 default=None, type="string",
910 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
912 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
914 completion_suggest=OPT_COMPL_ONE_OS)
916 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
917 type="keyval", default={},
918 help="OS parameters")
920 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
921 action="store_true", default=False,
922 help="Force an unknown variant")
924 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
925 action="store_true", default=False,
926 help="Do not install the OS (will"
929 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
930 dest="allow_runtime_chgs",
931 default=True, action="store_false",
932 help="Don't allow runtime changes")
934 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
935 type="keyval", default={},
936 help="Backend parameters")
938 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
939 default={}, dest="hvparams",
940 help="Hypervisor parameters")
942 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
943 help="Disk template parameters, in the format"
944 " template:option=value,option=value,...",
945 type="identkeyval", action="append", default=[])
947 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
948 type="keyval", default={},
949 help="Memory size specs: list of key=value,"
950 " where key is one of min, max, std"
951 " (in MB or using a unit)")
953 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
954 type="keyval", default={},
955 help="CPU count specs: list of key=value,"
956 " where key is one of min, max, std")
958 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
959 dest="ispecs_disk_count",
960 type="keyval", default={},
961 help="Disk count specs: list of key=value,"
962 " where key is one of min, max, std")
964 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
965 type="keyval", default={},
966 help="Disk size specs: list of key=value,"
967 " where key is one of min, max, std"
968 " (in MB or using a unit)")
970 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
971 type="keyval", default={},
972 help="NIC count specs: list of key=value,"
973 " where key is one of min, max, std")
975 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
976 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
977 dest="ipolicy_bounds_specs",
978 type="multilistidentkeyval", default=None,
979 help="Complete instance specs limits")
981 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
982 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
983 dest="ipolicy_std_specs",
984 type="keyval", default=None,
985 help="Complte standard instance specs")
987 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
988 dest="ipolicy_disk_templates",
989 type="list", default=None,
990 help="Comma-separated list of"
991 " enabled disk templates")
993 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
994 dest="ipolicy_vcpu_ratio",
995 type="maybefloat", default=None,
996 help="The maximum allowed vcpu-to-cpu ratio")
998 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
999 dest="ipolicy_spindle_ratio",
1000 type="maybefloat", default=None,
1001 help=("The maximum allowed instances to"
1004 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1005 help="Hypervisor and hypervisor options, in the"
1006 " format hypervisor:option=value,option=value,...",
1007 default=None, type="identkeyval")
1009 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1010 help="Hypervisor and hypervisor options, in the"
1011 " format hypervisor:option=value,option=value,...",
1012 default=[], action="append", type="identkeyval")
1014 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1015 action="store_false",
1016 help="Don't check that the instance's IP"
1019 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1020 default=True, action="store_false",
1021 help="Don't check that the instance's name"
1024 NET_OPT = cli_option("--net",
1025 help="NIC parameters", default=[],
1026 dest="nics", action="append", type="identkeyval")
1028 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1029 dest="disks", action="append", type="identkeyval")
1031 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1032 help="Comma-separated list of disks"
1033 " indices to act on (e.g. 0,2) (optional,"
1034 " defaults to all disks)")
1036 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1037 help="Enforces a single-disk configuration using the"
1038 " given disk size, in MiB unless a suffix is used",
1039 default=None, type="unit", metavar="<size>")
1041 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1042 dest="ignore_consistency",
1043 action="store_true", default=False,
1044 help="Ignore the consistency of the disks on"
1047 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1048 dest="allow_failover",
1049 action="store_true", default=False,
1050 help="If migration is not possible fallback to"
1053 NONLIVE_OPT = cli_option("--non-live", dest="live",
1054 default=True, action="store_false",
1055 help="Do a non-live migration (this usually means"
1056 " freeze the instance, save the state, transfer and"
1057 " only then resume running on the secondary node)")
1059 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1061 choices=list(constants.HT_MIGRATION_MODES),
1062 help="Override default migration mode (choose"
1063 " either live or non-live")
1065 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1066 help="Target node and optional secondary node",
1067 metavar="<pnode>[:<snode>]",
1068 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1070 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1071 action="append", metavar="<node>",
1072 help="Use only this node (can be used multiple"
1073 " times, if not given defaults to all nodes)",
1074 completion_suggest=OPT_COMPL_ONE_NODE)
1076 NODEGROUP_OPT_NAME = "--node-group"
1077 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1079 help="Node group (name or uuid)",
1080 metavar="<nodegroup>",
1081 default=None, type="string",
1082 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1084 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1086 completion_suggest=OPT_COMPL_ONE_NODE)
1088 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1089 action="store_false",
1090 help="Don't start the instance after creation")
1092 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1093 action="store_true", default=False,
1094 help="Show command instead of executing it")
1096 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1097 default=False, action="store_true",
1098 help="Instead of performing the migration/failover,"
1099 " try to recover from a failed cleanup. This is safe"
1100 " to run even if the instance is healthy, but it"
1101 " will create extra replication traffic and "
1102 " disrupt briefly the replication (like during the"
1103 " migration/failover")
1105 STATIC_OPT = cli_option("-s", "--static", dest="static",
1106 action="store_true", default=False,
1107 help="Only show configuration data, not runtime data")
1109 ALL_OPT = cli_option("--all", dest="show_all",
1110 default=False, action="store_true",
1111 help="Show info on all instances on the cluster."
1112 " This can take a long time to run, use wisely")
1114 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1115 action="store_true", default=False,
1116 help="Interactive OS reinstall, lists available"
1117 " OS templates for selection")
1119 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1120 action="store_true", default=False,
1121 help="Remove the instance from the cluster"
1122 " configuration even if there are failures"
1123 " during the removal process")
1125 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1126 dest="ignore_remove_failures",
1127 action="store_true", default=False,
1128 help="Remove the instance from the"
1129 " cluster configuration even if there"
1130 " are failures during the removal"
1133 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1134 action="store_true", default=False,
1135 help="Remove the instance from the cluster")
1137 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1138 help="Specifies the new node for the instance",
1139 metavar="NODE", default=None,
1140 completion_suggest=OPT_COMPL_ONE_NODE)
1142 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1143 help="Specifies the new secondary node",
1144 metavar="NODE", default=None,
1145 completion_suggest=OPT_COMPL_ONE_NODE)
1147 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1148 help="Specifies the new primary node",
1149 metavar="<node>", default=None,
1150 completion_suggest=OPT_COMPL_ONE_NODE)
1152 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1153 default=False, action="store_true",
1154 help="Replace the disk(s) on the primary"
1155 " node (applies only to internally mirrored"
1156 " disk templates, e.g. %s)" %
1157 utils.CommaJoin(constants.DTS_INT_MIRROR))
1159 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1160 default=False, action="store_true",
1161 help="Replace the disk(s) on the secondary"
1162 " node (applies only to internally mirrored"
1163 " disk templates, e.g. %s)" %
1164 utils.CommaJoin(constants.DTS_INT_MIRROR))
1166 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1167 default=False, action="store_true",
1168 help="Lock all nodes and auto-promote as needed"
1171 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1172 default=False, action="store_true",
1173 help="Automatically replace faulty disks"
1174 " (applies only to internally mirrored"
1175 " disk templates, e.g. %s)" %
1176 utils.CommaJoin(constants.DTS_INT_MIRROR))
1178 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1179 default=False, action="store_true",
1180 help="Ignore current recorded size"
1181 " (useful for forcing activation when"
1182 " the recorded size is wrong)")
1184 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1186 completion_suggest=OPT_COMPL_ONE_NODE)
1188 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1191 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1192 help="Specify the secondary ip for the node",
1193 metavar="ADDRESS", default=None)
1195 READD_OPT = cli_option("--readd", dest="readd",
1196 default=False, action="store_true",
1197 help="Readd old node after replacing it")
1199 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1200 default=True, action="store_false",
1201 help="Disable SSH key fingerprint checking")
1203 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1204 default=False, action="store_true",
1205 help="Force the joining of a node")
1207 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1208 type="bool", default=None, metavar=_YORNO,
1209 help="Set the master_candidate flag on the node")
1211 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1212 type="bool", default=None,
1213 help=("Set the offline flag on the node"
1214 " (cluster does not communicate with offline"
1217 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1218 type="bool", default=None,
1219 help=("Set the drained flag on the node"
1220 " (excluded from allocation operations)"))
1222 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1223 type="bool", default=None, metavar=_YORNO,
1224 help="Set the master_capable flag on the node")
1226 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1227 type="bool", default=None, metavar=_YORNO,
1228 help="Set the vm_capable flag on the node")
1230 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1231 type="bool", default=None, metavar=_YORNO,
1232 help="Set the allocatable flag on a volume")
1234 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1235 help="Disable support for lvm based instances"
1237 action="store_false", default=True)
1239 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1240 dest="enabled_hypervisors",
1241 help="Comma-separated list of hypervisors",
1242 type="string", default=None)
1244 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1245 dest="enabled_disk_templates",
1246 help="Comma-separated list of "
1248 type="string", default=None)
1250 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1251 type="keyval", default={},
1252 help="NIC parameters")
1254 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1255 dest="candidate_pool_size", type="int",
1256 help="Set the candidate pool size")
1258 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1259 help=("Enables LVM and specifies the volume group"
1260 " name (cluster-wide) for disk allocation"
1261 " [%s]" % constants.DEFAULT_VG),
1262 metavar="VG", default=None)
1264 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1265 help="Destroy cluster", action="store_true")
1267 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1268 help="Skip node agreement check (dangerous)",
1269 action="store_true", default=False)
1271 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1272 help="Specify the mac prefix for the instance IP"
1273 " addresses, in the format XX:XX:XX",
1277 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1278 help="Specify the node interface (cluster-wide)"
1279 " on which the master IP address will be added"
1280 " (cluster init default: %s)" %
1281 constants.DEFAULT_BRIDGE,
1285 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1286 help="Specify the netmask of the master IP",
1290 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1291 dest="use_external_mip_script",
1292 help="Specify whether to run a"
1293 " user-provided script for the master"
1294 " IP address turnup and"
1295 " turndown operations",
1296 type="bool", metavar=_YORNO, default=None)
1298 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1299 help="Specify the default directory (cluster-"
1300 "wide) for storing the file-based disks [%s]" %
1301 pathutils.DEFAULT_FILE_STORAGE_DIR,
1305 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1306 "--shared-file-storage-dir",
1307 dest="shared_file_storage_dir",
1308 help="Specify the default directory (cluster-wide) for storing the"
1309 " shared file-based disks [%s]" %
1310 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1311 metavar="SHAREDDIR", default=None)
1313 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1314 help="Don't modify %s" % pathutils.ETC_HOSTS,
1315 action="store_false", default=True)
1317 MODIFY_ETCHOSTS_OPT = \
1318 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1319 default=None, type="bool",
1320 help="Defines whether the cluster should autonomously modify"
1321 " and keep in sync the /etc/hosts file of the nodes")
1323 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1324 help="Don't initialize SSH keys",
1325 action="store_false", default=True)
1327 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1328 help="Enable parseable error messages",
1329 action="store_true", default=False)
1331 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1332 help="Skip N+1 memory redundancy tests",
1333 action="store_true", default=False)
1335 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1336 help="Type of reboot: soft/hard/full",
1337 default=constants.INSTANCE_REBOOT_HARD,
1339 choices=list(constants.REBOOT_TYPES))
1341 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1342 dest="ignore_secondaries",
1343 default=False, action="store_true",
1344 help="Ignore errors from secondaries")
1346 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1347 action="store_false", default=True,
1348 help="Don't shutdown the instance (unsafe)")
1350 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1351 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1352 help="Maximum time to wait")
1354 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1355 dest="shutdown_timeout", type="int",
1356 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1357 help="Maximum time to wait for instance"
1360 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1362 help=("Number of seconds between repetions of the"
1365 EARLY_RELEASE_OPT = cli_option("--early-release",
1366 dest="early_release", default=False,
1367 action="store_true",
1368 help="Release the locks on the secondary"
1371 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1372 dest="new_cluster_cert",
1373 default=False, action="store_true",
1374 help="Generate a new cluster certificate")
1376 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1378 help="File containing new RAPI certificate")
1380 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1381 default=None, action="store_true",
1382 help=("Generate a new self-signed RAPI"
1385 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1387 help="File containing new SPICE certificate")
1389 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1391 help="File containing the certificate of the CA"
1392 " which signed the SPICE certificate")
1394 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1395 dest="new_spice_cert", default=None,
1396 action="store_true",
1397 help=("Generate a new self-signed SPICE"
1400 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1401 dest="new_confd_hmac_key",
1402 default=False, action="store_true",
1403 help=("Create a new HMAC key for %s" %
1406 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1407 dest="cluster_domain_secret",
1409 help=("Load new new cluster domain"
1410 " secret from file"))
1412 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1413 dest="new_cluster_domain_secret",
1414 default=False, action="store_true",
1415 help=("Create a new cluster domain"
1418 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1419 dest="use_replication_network",
1420 help="Whether to use the replication network"
1421 " for talking to the nodes",
1422 action="store_true", default=False)
1424 MAINTAIN_NODE_HEALTH_OPT = \
1425 cli_option("--maintain-node-health", dest="maintain_node_health",
1426 metavar=_YORNO, default=None, type="bool",
1427 help="Configure the cluster to automatically maintain node"
1428 " health, by shutting down unknown instances, shutting down"
1429 " unknown DRBD devices, etc.")
1431 IDENTIFY_DEFAULTS_OPT = \
1432 cli_option("--identify-defaults", dest="identify_defaults",
1433 default=False, action="store_true",
1434 help="Identify which saved instance parameters are equal to"
1435 " the current cluster defaults and set them as such, instead"
1436 " of marking them as overridden")
1438 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1439 action="store", dest="uid_pool",
1440 help=("A list of user-ids or user-id"
1441 " ranges separated by commas"))
1443 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1444 action="store", dest="add_uids",
1445 help=("A list of user-ids or user-id"
1446 " ranges separated by commas, to be"
1447 " added to the user-id pool"))
1449 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1450 action="store", dest="remove_uids",
1451 help=("A list of user-ids or user-id"
1452 " ranges separated by commas, to be"
1453 " removed from the user-id pool"))
1455 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1456 action="store", dest="reserved_lvs",
1457 help=("A comma-separated list of reserved"
1458 " logical volumes names, that will be"
1459 " ignored by cluster verify"))
1461 ROMAN_OPT = cli_option("--roman",
1462 dest="roman_integers", default=False,
1463 action="store_true",
1464 help="Use roman numbers for positive integers")
1466 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1467 action="store", default=None,
1468 help="Specifies usermode helper for DRBD")
1470 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1471 action="store_false", default=True,
1472 help="Disable support for DRBD")
1474 PRIMARY_IP_VERSION_OPT = \
1475 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1476 action="store", dest="primary_ip_version",
1477 metavar="%d|%d" % (constants.IP4_VERSION,
1478 constants.IP6_VERSION),
1479 help="Cluster-wide IP version for primary IP")
1481 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1482 action="store_true",
1483 help="Show machine name for every line in output")
1485 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1486 action="store_true",
1487 help=("Hide successful results and show failures"
1488 " only (determined by the exit code)"))
1490 REASON_OPT = cli_option("--reason", default=None,
1491 help="The reason for executing the command")
1494 def _PriorityOptionCb(option, _, value, parser):
1495 """Callback for processing C{--priority} option.
1498 value = _PRIONAME_TO_VALUE[value]
1500 setattr(parser.values, option.dest, value)
1503 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1504 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1505 choices=_PRIONAME_TO_VALUE.keys(),
1506 action="callback", type="choice",
1507 callback=_PriorityOptionCb,
1508 help="Priority for opcode processing")
1510 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1511 type="bool", default=None, metavar=_YORNO,
1512 help="Sets the hidden flag on the OS")
1514 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1515 type="bool", default=None, metavar=_YORNO,
1516 help="Sets the blacklisted flag on the OS")
1518 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1519 type="bool", metavar=_YORNO,
1520 dest="prealloc_wipe_disks",
1521 help=("Wipe disks prior to instance"
1524 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1525 type="keyval", default=None,
1526 help="Node parameters")
1528 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1529 action="store", metavar="POLICY", default=None,
1530 help="Allocation policy for the node group")
1532 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1533 type="bool", metavar=_YORNO,
1534 dest="node_powered",
1535 help="Specify if the SoR for node is powered")
1537 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1538 default=constants.OOB_TIMEOUT,
1539 help="Maximum time to wait for out-of-band helper")
1541 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1542 default=constants.OOB_POWER_DELAY,
1543 help="Time in seconds to wait between power-ons")
1545 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1546 action="store_true", default=False,
1547 help=("Whether command argument should be treated"
1550 NO_REMEMBER_OPT = cli_option("--no-remember",
1552 action="store_true", default=False,
1553 help="Perform but do not record the change"
1554 " in the configuration")
1556 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1557 default=False, action="store_true",
1558 help="Evacuate primary instances only")
1560 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1561 default=False, action="store_true",
1562 help="Evacuate secondary instances only"
1563 " (applies only to internally mirrored"
1564 " disk templates, e.g. %s)" %
1565 utils.CommaJoin(constants.DTS_INT_MIRROR))
1567 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1568 action="store_true", default=False,
1569 help="Pause instance at startup")
1571 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1572 help="Destination node group (name or uuid)",
1573 default=None, action="append",
1574 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1576 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1577 action="append", dest="ignore_errors",
1578 choices=list(constants.CV_ALL_ECODES_STRINGS),
1579 help="Error code to be ignored")
1581 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1583 help=("Specify disk state information in the"
1585 " storage_type/identifier:option=value,...;"
1586 " note this is unused for now"),
1589 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1591 help=("Specify hypervisor state information in the"
1592 " format hypervisor:option=value,...;"
1593 " note this is unused for now"),
1596 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1597 action="store_true", default=False,
1598 help="Ignore instance policy violations")
1600 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1601 help="Sets the instance's runtime memory,"
1602 " ballooning it up or down to the new value",
1603 default=None, type="unit", metavar="<size>")
1605 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1606 action="store_true", default=False,
1607 help="Marks the grow as absolute instead of the"
1608 " (default) relative mode")
1610 NETWORK_OPT = cli_option("--network",
1611 action="store", default=None, dest="network",
1612 help="IP network in CIDR notation")
1614 GATEWAY_OPT = cli_option("--gateway",
1615 action="store", default=None, dest="gateway",
1616 help="IP address of the router (gateway)")
1618 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1619 action="store", default=None,
1620 dest="add_reserved_ips",
1621 help="Comma-separated list of"
1622 " reserved IPs to add")
1624 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1625 action="store", default=None,
1626 dest="remove_reserved_ips",
1627 help="Comma-delimited list of"
1628 " reserved IPs to remove")
1630 NETWORK6_OPT = cli_option("--network6",
1631 action="store", default=None, dest="network6",
1632 help="IP network in CIDR notation")
1634 GATEWAY6_OPT = cli_option("--gateway6",
1635 action="store", default=None, dest="gateway6",
1636 help="IP6 address of the router (gateway)")
1638 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1639 dest="conflicts_check",
1641 action="store_false",
1642 help="Don't check for conflicting IPs")
1644 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1645 default=False, action="store_true",
1646 help="Include default values")
1648 #: Options provided by all commands
1649 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1651 # options related to asynchronous job handling
1658 # common options for creating instances. add and import then add their own
1660 COMMON_CREATE_OPTS = [
1665 FILESTORE_DRIVER_OPT,
1671 NOCONFLICTSCHECK_OPT,
1684 # common instance policy options
1685 INSTANCE_POLICY_OPTS = [
1686 IPOLICY_BOUNDS_SPECS_OPT,
1687 IPOLICY_DISK_TEMPLATES,
1689 IPOLICY_SPINDLE_RATIO,
1692 # instance policy split specs options
1693 SPLIT_ISPECS_OPTS = [
1694 SPECS_CPU_COUNT_OPT,
1695 SPECS_DISK_COUNT_OPT,
1696 SPECS_DISK_SIZE_OPT,
1698 SPECS_NIC_COUNT_OPT,
1702 class _ShowUsage(Exception):
1703 """Exception class for L{_ParseArgs}.
1706 def __init__(self, exit_error):
1707 """Initializes instances of this class.
1709 @type exit_error: bool
1710 @param exit_error: Whether to report failure on exit
1713 Exception.__init__(self)
1714 self.exit_error = exit_error
1717 class _ShowVersion(Exception):
1718 """Exception class for L{_ParseArgs}.
1723 def _ParseArgs(binary, argv, commands, aliases, env_override):
1724 """Parser for the command line arguments.
1726 This function parses the arguments and returns the function which
1727 must be executed together with its (modified) arguments.
1729 @param binary: Script name
1730 @param argv: Command line arguments
1731 @param commands: Dictionary containing command definitions
1732 @param aliases: dictionary with command aliases {"alias": "target", ...}
1733 @param env_override: list of env variables allowed for default args
1734 @raise _ShowUsage: If usage description should be shown
1735 @raise _ShowVersion: If version should be shown
1738 assert not (env_override - set(commands))
1739 assert not (set(aliases.keys()) & set(commands.keys()))
1744 # No option or command given
1745 raise _ShowUsage(exit_error=True)
1747 if cmd == "--version":
1748 raise _ShowVersion()
1749 elif cmd == "--help":
1750 raise _ShowUsage(exit_error=False)
1751 elif not (cmd in commands or cmd in aliases):
1752 raise _ShowUsage(exit_error=True)
1754 # get command, unalias it, and look it up in commands
1756 if aliases[cmd] not in commands:
1757 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1758 " command '%s'" % (cmd, aliases[cmd]))
1762 if cmd in env_override:
1763 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1764 env_args = os.environ.get(args_env_name)
1766 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1768 func, args_def, parser_opts, usage, description = commands[cmd]
1769 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1770 description=description,
1771 formatter=TitledHelpFormatter(),
1772 usage="%%prog %s %s" % (cmd, usage))
1773 parser.disable_interspersed_args()
1774 options, args = parser.parse_args(args=argv[2:])
1776 if not _CheckArguments(cmd, args_def, args):
1777 return None, None, None
1779 return func, options, args
1782 def _FormatUsage(binary, commands):
1783 """Generates a nice description of all commands.
1785 @param binary: Script name
1786 @param commands: Dictionary containing command definitions
1789 # compute the max line length for cmd + usage
1790 mlen = min(60, max(map(len, commands)))
1792 yield "Usage: %s {command} [options...] [argument...]" % binary
1793 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1797 # and format a nice command list
1798 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1799 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1800 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1801 for line in help_lines:
1802 yield " %-*s %s" % (mlen, "", line)
1807 def _CheckArguments(cmd, args_def, args):
1808 """Verifies the arguments using the argument definition.
1812 1. Abort with error if values specified by user but none expected.
1814 1. For each argument in definition
1816 1. Keep running count of minimum number of values (min_count)
1817 1. Keep running count of maximum number of values (max_count)
1818 1. If it has an unlimited number of values
1820 1. Abort with error if it's not the last argument in the definition
1822 1. If last argument has limited number of values
1824 1. Abort with error if number of values doesn't match or is too large
1826 1. Abort with error if user didn't pass enough values (min_count)
1829 if args and not args_def:
1830 ToStderr("Error: Command %s expects no arguments", cmd)
1837 last_idx = len(args_def) - 1
1839 for idx, arg in enumerate(args_def):
1840 if min_count is None:
1842 elif arg.min is not None:
1843 min_count += arg.min
1845 if max_count is None:
1847 elif arg.max is not None:
1848 max_count += arg.max
1851 check_max = (arg.max is not None)
1853 elif arg.max is None:
1854 raise errors.ProgrammerError("Only the last argument can have max=None")
1857 # Command with exact number of arguments
1858 if (min_count is not None and max_count is not None and
1859 min_count == max_count and len(args) != min_count):
1860 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1863 # Command with limited number of arguments
1864 if max_count is not None and len(args) > max_count:
1865 ToStderr("Error: Command %s expects only %d argument(s)",
1869 # Command with some required arguments
1870 if min_count is not None and len(args) < min_count:
1871 ToStderr("Error: Command %s expects at least %d argument(s)",
1878 def SplitNodeOption(value):
1879 """Splits the value of a --node option.
1882 if value and ":" in value:
1883 return value.split(":", 1)
1885 return (value, None)
1888 def CalculateOSNames(os_name, os_variants):
1889 """Calculates all the names an OS can be called, according to its variants.
1891 @type os_name: string
1892 @param os_name: base name of the os
1893 @type os_variants: list or None
1894 @param os_variants: list of supported variants
1896 @return: list of valid names
1900 return ["%s+%s" % (os_name, v) for v in os_variants]
1905 def ParseFields(selected, default):
1906 """Parses the values of "--field"-like options.
1908 @type selected: string or None
1909 @param selected: User-selected options
1911 @param default: Default fields
1914 if selected is None:
1917 if selected.startswith("+"):
1918 return default + selected[1:].split(",")
1920 return selected.split(",")
1923 UsesRPC = rpc.RunWithRPC
1926 def AskUser(text, choices=None):
1927 """Ask the user a question.
1929 @param text: the question to ask
1931 @param choices: list with elements tuples (input_char, return_value,
1932 description); if not given, it will default to: [('y', True,
1933 'Perform the operation'), ('n', False, 'Do no do the operation')];
1934 note that the '?' char is reserved for help
1936 @return: one of the return values from the choices list; if input is
1937 not possible (i.e. not running with a tty, we return the last
1942 choices = [("y", True, "Perform the operation"),
1943 ("n", False, "Do not perform the operation")]
1944 if not choices or not isinstance(choices, list):
1945 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1946 for entry in choices:
1947 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1948 raise errors.ProgrammerError("Invalid choices element to AskUser")
1950 answer = choices[-1][1]
1952 for line in text.splitlines():
1953 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1954 text = "\n".join(new_text)
1956 f = file("/dev/tty", "a+")
1960 chars = [entry[0] for entry in choices]
1961 chars[-1] = "[%s]" % chars[-1]
1963 maps = dict([(entry[0], entry[1]) for entry in choices])
1967 f.write("/".join(chars))
1969 line = f.readline(2).strip().lower()
1974 for entry in choices:
1975 f.write(" %s - %s\n" % (entry[0], entry[2]))
1983 class JobSubmittedException(Exception):
1984 """Job was submitted, client should exit.
1986 This exception has one argument, the ID of the job that was
1987 submitted. The handler should print this ID.
1989 This is not an error, just a structured way to exit from clients.
1994 def SendJob(ops, cl=None):
1995 """Function to submit an opcode without waiting for the results.
1998 @param ops: list of opcodes
1999 @type cl: luxi.Client
2000 @param cl: the luxi client to use for communicating with the master;
2001 if None, a new client will be created
2007 job_id = cl.SubmitJob(ops)
2012 def GenericPollJob(job_id, cbs, report_cbs):
2013 """Generic job-polling function.
2015 @type job_id: number
2016 @param job_id: Job ID
2017 @type cbs: Instance of L{JobPollCbBase}
2018 @param cbs: Data callbacks
2019 @type report_cbs: Instance of L{JobPollReportCbBase}
2020 @param report_cbs: Reporting callbacks
2023 prev_job_info = None
2024 prev_logmsg_serial = None
2029 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2032 # job not found, go away!
2033 raise errors.JobLost("Job with id %s lost" % job_id)
2035 if result == constants.JOB_NOTCHANGED:
2036 report_cbs.ReportNotChanged(job_id, status)
2041 # Split result, a tuple of (field values, log entries)
2042 (job_info, log_entries) = result
2043 (status, ) = job_info
2046 for log_entry in log_entries:
2047 (serial, timestamp, log_type, message) = log_entry
2048 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2050 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2052 # TODO: Handle canceled and archived jobs
2053 elif status in (constants.JOB_STATUS_SUCCESS,
2054 constants.JOB_STATUS_ERROR,
2055 constants.JOB_STATUS_CANCELING,
2056 constants.JOB_STATUS_CANCELED):
2059 prev_job_info = job_info
2061 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2063 raise errors.JobLost("Job with id %s lost" % job_id)
2065 status, opstatus, result = jobs[0]
2067 if status == constants.JOB_STATUS_SUCCESS:
2070 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2071 raise errors.OpExecError("Job was canceled")
2074 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2075 if status == constants.OP_STATUS_SUCCESS:
2077 elif status == constants.OP_STATUS_ERROR:
2078 errors.MaybeRaise(msg)
2081 raise errors.OpExecError("partial failure (opcode %d): %s" %
2084 raise errors.OpExecError(str(msg))
2086 # default failure mode
2087 raise errors.OpExecError(result)
2090 class JobPollCbBase:
2091 """Base class for L{GenericPollJob} callbacks.
2095 """Initializes this class.
2099 def WaitForJobChangeOnce(self, job_id, fields,
2100 prev_job_info, prev_log_serial):
2101 """Waits for changes on a job.
2104 raise NotImplementedError()
2106 def QueryJobs(self, job_ids, fields):
2107 """Returns the selected fields for the selected job IDs.
2109 @type job_ids: list of numbers
2110 @param job_ids: Job IDs
2111 @type fields: list of strings
2112 @param fields: Fields
2115 raise NotImplementedError()
2118 class JobPollReportCbBase:
2119 """Base class for L{GenericPollJob} reporting callbacks.
2123 """Initializes this class.
2127 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2128 """Handles a log message.
2131 raise NotImplementedError()
2133 def ReportNotChanged(self, job_id, status):
2134 """Called for if a job hasn't changed in a while.
2136 @type job_id: number
2137 @param job_id: Job ID
2138 @type status: string or None
2139 @param status: Job status if available
2142 raise NotImplementedError()
2145 class _LuxiJobPollCb(JobPollCbBase):
2146 def __init__(self, cl):
2147 """Initializes this class.
2150 JobPollCbBase.__init__(self)
2153 def WaitForJobChangeOnce(self, job_id, fields,
2154 prev_job_info, prev_log_serial):
2155 """Waits for changes on a job.
2158 return self.cl.WaitForJobChangeOnce(job_id, fields,
2159 prev_job_info, prev_log_serial)
2161 def QueryJobs(self, job_ids, fields):
2162 """Returns the selected fields for the selected job IDs.
2165 return self.cl.QueryJobs(job_ids, fields)
2168 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2169 def __init__(self, feedback_fn):
2170 """Initializes this class.
2173 JobPollReportCbBase.__init__(self)
2175 self.feedback_fn = feedback_fn
2177 assert callable(feedback_fn)
2179 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2180 """Handles a log message.
2183 self.feedback_fn((timestamp, log_type, log_msg))
2185 def ReportNotChanged(self, job_id, status):
2186 """Called if a job hasn't changed in a while.
2192 class StdioJobPollReportCb(JobPollReportCbBase):
2194 """Initializes this class.
2197 JobPollReportCbBase.__init__(self)
2199 self.notified_queued = False
2200 self.notified_waitlock = False
2202 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2203 """Handles a log message.
2206 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2207 FormatLogMessage(log_type, log_msg))
2209 def ReportNotChanged(self, job_id, status):
2210 """Called if a job hasn't changed in a while.
2216 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2217 ToStderr("Job %s is waiting in queue", job_id)
2218 self.notified_queued = True
2220 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2221 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2222 self.notified_waitlock = True
2225 def FormatLogMessage(log_type, log_msg):
2226 """Formats a job message according to its type.
2229 if log_type != constants.ELOG_MESSAGE:
2230 log_msg = str(log_msg)
2232 return utils.SafeEncode(log_msg)
2235 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2236 """Function to poll for the result of a job.
2238 @type job_id: job identified
2239 @param job_id: the job to poll for results
2240 @type cl: luxi.Client
2241 @param cl: the luxi client to use for communicating with the master;
2242 if None, a new client will be created
2248 if reporter is None:
2250 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2252 reporter = StdioJobPollReportCb()
2254 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2256 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2259 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2260 """Legacy function to submit an opcode.
2262 This is just a simple wrapper over the construction of the processor
2263 instance. It should be extended to better handle feedback and
2264 interaction functions.
2270 SetGenericOpcodeOpts([op], opts)
2272 job_id = SendJob([op], cl=cl)
2273 if hasattr(opts, "print_jobid") and opts.print_jobid:
2274 ToStdout("%d" % job_id)
2276 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2279 return op_results[0]
2282 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2283 """Wrapper around SubmitOpCode or SendJob.
2285 This function will decide, based on the 'opts' parameter, whether to
2286 submit and wait for the result of the opcode (and return it), or
2287 whether to just send the job and print its identifier. It is used in
2288 order to simplify the implementation of the '--submit' option.
2290 It will also process the opcodes if we're sending the via SendJob
2291 (otherwise SubmitOpCode does it).
2294 if opts and opts.submit_only:
2296 SetGenericOpcodeOpts(job, opts)
2297 job_id = SendJob(job, cl=cl)
2298 if opts.print_jobid:
2299 ToStdout("%d" % job_id)
2300 raise JobSubmittedException(job_id)
2302 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2305 def _InitReasonTrail(op, opts):
2306 """Builds the first part of the reason trail
2308 Builds the initial part of the reason trail, adding the user provided reason
2309 (if it exists) and the name of the command starting the operation.
2311 @param op: the opcode the reason trail will be added to
2312 @param opts: the command line options selected by the user
2315 assert len(sys.argv) >= 2
2319 trail.append((constants.OPCODE_REASON_SRC_USER,
2323 binary = os.path.basename(sys.argv[0])
2324 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2325 command = sys.argv[1]
2326 trail.append((source, command, utils.EpochNano()))
2330 def SetGenericOpcodeOpts(opcode_list, options):
2331 """Processor for generic options.
2333 This function updates the given opcodes based on generic command
2334 line options (like debug, dry-run, etc.).
2336 @param opcode_list: list of opcodes
2337 @param options: command line options or None
2338 @return: None (in-place modification)
2343 for op in opcode_list:
2344 op.debug_level = options.debug
2345 if hasattr(options, "dry_run"):
2346 op.dry_run = options.dry_run
2347 if getattr(options, "priority", None) is not None:
2348 op.priority = options.priority
2349 _InitReasonTrail(op, options)
2352 def GetClient(query=False):
2353 """Connects to the a luxi socket and returns a client.
2355 @type query: boolean
2356 @param query: this signifies that the client will only be
2357 used for queries; if the build-time parameter
2358 enable-split-queries is enabled, then the client will be
2359 connected to the query socket instead of the masterd socket
2362 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2364 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2365 address = pathutils.MASTER_SOCKET
2366 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2367 address = pathutils.QUERY_SOCKET
2369 address = override_socket
2370 elif query and constants.ENABLE_SPLIT_QUERY:
2371 address = pathutils.QUERY_SOCKET
2374 # TODO: Cache object?
2376 client = luxi.Client(address=address)
2377 except luxi.NoMasterError:
2378 ss = ssconf.SimpleStore()
2380 # Try to read ssconf file
2383 except errors.ConfigurationError:
2384 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2385 " not part of a cluster",
2388 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2389 if master != myself:
2390 raise errors.OpPrereqError("This is not the master node, please connect"
2391 " to node '%s' and rerun the command" %
2392 master, errors.ECODE_INVAL)
2397 def FormatError(err):
2398 """Return a formatted error message for a given error.
2400 This function takes an exception instance and returns a tuple
2401 consisting of two values: first, the recommended exit code, and
2402 second, a string describing the error message (not
2403 newline-terminated).
2409 if isinstance(err, errors.ConfigurationError):
2410 txt = "Corrupt configuration file: %s" % msg
2412 obuf.write(txt + "\n")
2413 obuf.write("Aborting.")
2415 elif isinstance(err, errors.HooksAbort):
2416 obuf.write("Failure: hooks execution failed:\n")
2417 for node, script, out in err.args[0]:
2419 obuf.write(" node: %s, script: %s, output: %s\n" %
2420 (node, script, out))
2422 obuf.write(" node: %s, script: %s (no output)\n" %
2424 elif isinstance(err, errors.HooksFailure):
2425 obuf.write("Failure: hooks general failure: %s" % msg)
2426 elif isinstance(err, errors.ResolverError):
2427 this_host = netutils.Hostname.GetSysName()
2428 if err.args[0] == this_host:
2429 msg = "Failure: can't resolve my own hostname ('%s')"
2431 msg = "Failure: can't resolve hostname '%s'"
2432 obuf.write(msg % err.args[0])
2433 elif isinstance(err, errors.OpPrereqError):
2434 if len(err.args) == 2:
2435 obuf.write("Failure: prerequisites not met for this"
2436 " operation:\nerror type: %s, error details:\n%s" %
2437 (err.args[1], err.args[0]))
2439 obuf.write("Failure: prerequisites not met for this"
2440 " operation:\n%s" % msg)
2441 elif isinstance(err, errors.OpExecError):
2442 obuf.write("Failure: command execution error:\n%s" % msg)
2443 elif isinstance(err, errors.TagError):
2444 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2445 elif isinstance(err, errors.JobQueueDrainError):
2446 obuf.write("Failure: the job queue is marked for drain and doesn't"
2447 " accept new requests\n")
2448 elif isinstance(err, errors.JobQueueFull):
2449 obuf.write("Failure: the job queue is full and doesn't accept new"
2450 " job submissions until old jobs are archived\n")
2451 elif isinstance(err, errors.TypeEnforcementError):
2452 obuf.write("Parameter Error: %s" % msg)
2453 elif isinstance(err, errors.ParameterError):
2454 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2455 elif isinstance(err, luxi.NoMasterError):
2456 if err.args[0] == pathutils.MASTER_SOCKET:
2457 daemon = "the master daemon"
2458 elif err.args[0] == pathutils.QUERY_SOCKET:
2459 daemon = "the config daemon"
2461 daemon = "socket '%s'" % str(err.args[0])
2462 obuf.write("Cannot communicate with %s.\nIs the process running"
2463 " and listening for connections?" % daemon)
2464 elif isinstance(err, luxi.TimeoutError):
2465 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2466 " been submitted and will continue to run even if the call"
2467 " timed out. Useful commands in this situation are \"gnt-job"
2468 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2470 elif isinstance(err, luxi.PermissionError):
2471 obuf.write("It seems you don't have permissions to connect to the"
2472 " master daemon.\nPlease retry as a different user.")
2473 elif isinstance(err, luxi.ProtocolError):
2474 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2476 elif isinstance(err, errors.JobLost):
2477 obuf.write("Error checking job status: %s" % msg)
2478 elif isinstance(err, errors.QueryFilterParseError):
2479 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2480 obuf.write("\n".join(err.GetDetails()))
2481 elif isinstance(err, errors.GenericError):
2482 obuf.write("Unhandled Ganeti error: %s" % msg)
2483 elif isinstance(err, JobSubmittedException):
2484 obuf.write("JobID: %s\n" % err.args[0])
2487 obuf.write("Unhandled exception: %s" % msg)
2488 return retcode, obuf.getvalue().rstrip("\n")
2491 def GenericMain(commands, override=None, aliases=None,
2492 env_override=frozenset()):
2493 """Generic main function for all the gnt-* commands.
2495 @param commands: a dictionary with a special structure, see the design doc
2496 for command line handling.
2497 @param override: if not None, we expect a dictionary with keys that will
2498 override command line options; this can be used to pass
2499 options from the scripts to generic functions
2500 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2501 @param env_override: list of environment names which are allowed to submit
2502 default args for commands
2505 # save the program name and the entire command line for later logging
2507 binary = os.path.basename(sys.argv[0])
2509 binary = sys.argv[0]
2511 if len(sys.argv) >= 2:
2512 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2516 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2518 binary = "<unknown program>"
2519 cmdline = "<unknown>"
2525 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2527 except _ShowVersion:
2528 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2529 constants.RELEASE_VERSION)
2530 return constants.EXIT_SUCCESS
2531 except _ShowUsage, err:
2532 for line in _FormatUsage(binary, commands):
2536 return constants.EXIT_FAILURE
2538 return constants.EXIT_SUCCESS
2539 except errors.ParameterError, err:
2540 result, err_msg = FormatError(err)
2544 if func is None: # parse error
2547 if override is not None:
2548 for key, val in override.iteritems():
2549 setattr(options, key, val)
2551 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2552 stderr_logging=True)
2554 logging.info("Command line: %s", cmdline)
2557 result = func(options, args)
2558 except (errors.GenericError, luxi.ProtocolError,
2559 JobSubmittedException), err:
2560 result, err_msg = FormatError(err)
2561 logging.exception("Error during command processing")
2563 except KeyboardInterrupt:
2564 result = constants.EXIT_FAILURE
2565 ToStderr("Aborted. Note that if the operation created any jobs, they"
2566 " might have been submitted and"
2567 " will continue to run in the background.")
2568 except IOError, err:
2569 if err.errno == errno.EPIPE:
2570 # our terminal went away, we'll exit
2571 sys.exit(constants.EXIT_FAILURE)
2578 def ParseNicOption(optvalue):
2579 """Parses the value of the --net option(s).
2583 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2584 except (TypeError, ValueError), err:
2585 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2588 nics = [{}] * nic_max
2589 for nidx, ndict in optvalue:
2592 if not isinstance(ndict, dict):
2593 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2594 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2596 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2603 def GenericInstanceCreate(mode, opts, args):
2604 """Add an instance to the cluster via either creation or import.
2606 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2607 @param opts: the command line options selected by the user
2609 @param args: should contain only one element, the new instance name
2611 @return: the desired exit code
2616 (pnode, snode) = SplitNodeOption(opts.node)
2621 hypervisor, hvparams = opts.hypervisor
2624 nics = ParseNicOption(opts.nics)
2628 elif mode == constants.INSTANCE_CREATE:
2629 # default of one nic, all auto
2635 if opts.disk_template == constants.DT_DISKLESS:
2636 if opts.disks or opts.sd_size is not None:
2637 raise errors.OpPrereqError("Diskless instance but disk"
2638 " information passed", errors.ECODE_INVAL)
2641 if (not opts.disks and not opts.sd_size
2642 and mode == constants.INSTANCE_CREATE):
2643 raise errors.OpPrereqError("No disk information specified",
2645 if opts.disks and opts.sd_size is not None:
2646 raise errors.OpPrereqError("Please use either the '--disk' or"
2647 " '-s' option", errors.ECODE_INVAL)
2648 if opts.sd_size is not None:
2649 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2653 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2654 except ValueError, err:
2655 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2657 disks = [{}] * disk_max
2660 for didx, ddict in opts.disks:
2662 if not isinstance(ddict, dict):
2663 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2664 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2665 elif constants.IDISK_SIZE in ddict:
2666 if constants.IDISK_ADOPT in ddict:
2667 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2668 " (disk %d)" % didx, errors.ECODE_INVAL)
2670 ddict[constants.IDISK_SIZE] = \
2671 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2672 except ValueError, err:
2673 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2674 (didx, err), errors.ECODE_INVAL)
2675 elif constants.IDISK_ADOPT in ddict:
2676 if constants.IDISK_SPINDLES in ddict:
2677 raise errors.OpPrereqError("spindles is not a valid option when"
2678 " adopting a disk", errors.ECODE_INVAL)
2679 if mode == constants.INSTANCE_IMPORT:
2680 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2681 " import", errors.ECODE_INVAL)
2682 ddict[constants.IDISK_SIZE] = 0
2684 raise errors.OpPrereqError("Missing size or adoption source for"
2685 " disk %d" % didx, errors.ECODE_INVAL)
2688 if opts.tags is not None:
2689 tags = opts.tags.split(",")
2693 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2694 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2696 if mode == constants.INSTANCE_CREATE:
2699 force_variant = opts.force_variant
2702 no_install = opts.no_install
2703 identify_defaults = False
2704 elif mode == constants.INSTANCE_IMPORT:
2707 force_variant = False
2708 src_node = opts.src_node
2709 src_path = opts.src_dir
2711 identify_defaults = opts.identify_defaults
2713 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2715 op = opcodes.OpInstanceCreate(instance_name=instance,
2717 disk_template=opts.disk_template,
2719 conflicts_check=opts.conflicts_check,
2720 pnode=pnode, snode=snode,
2721 ip_check=opts.ip_check,
2722 name_check=opts.name_check,
2723 wait_for_sync=opts.wait_for_sync,
2724 file_storage_dir=opts.file_storage_dir,
2725 file_driver=opts.file_driver,
2726 iallocator=opts.iallocator,
2727 hypervisor=hypervisor,
2729 beparams=opts.beparams,
2730 osparams=opts.osparams,
2734 force_variant=force_variant,
2738 no_install=no_install,
2739 identify_defaults=identify_defaults,
2740 ignore_ipolicy=opts.ignore_ipolicy)
2742 SubmitOrSend(op, opts)
2746 class _RunWhileClusterStoppedHelper:
2747 """Helper class for L{RunWhileClusterStopped} to simplify state management
2750 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2751 """Initializes this class.
2753 @type feedback_fn: callable
2754 @param feedback_fn: Feedback function
2755 @type cluster_name: string
2756 @param cluster_name: Cluster name
2757 @type master_node: string
2758 @param master_node Master node name
2759 @type online_nodes: list
2760 @param online_nodes: List of names of online nodes
2763 self.feedback_fn = feedback_fn
2764 self.cluster_name = cluster_name
2765 self.master_node = master_node
2766 self.online_nodes = online_nodes
2768 self.ssh = ssh.SshRunner(self.cluster_name)
2770 self.nonmaster_nodes = [name for name in online_nodes
2771 if name != master_node]
2773 assert self.master_node not in self.nonmaster_nodes
2775 def _RunCmd(self, node_name, cmd):
2776 """Runs a command on the local or a remote machine.
2778 @type node_name: string
2779 @param node_name: Machine name
2784 if node_name is None or node_name == self.master_node:
2785 # No need to use SSH
2786 result = utils.RunCmd(cmd)
2788 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2789 utils.ShellQuoteArgs(cmd))
2792 errmsg = ["Failed to run command %s" % result.cmd]
2794 errmsg.append("on node %s" % node_name)
2795 errmsg.append(": exitcode %s and error %s" %
2796 (result.exit_code, result.output))
2797 raise errors.OpExecError(" ".join(errmsg))
2799 def Call(self, fn, *args):
2800 """Call function while all daemons are stopped.
2803 @param fn: Function to be called
2806 # Pause watcher by acquiring an exclusive lock on watcher state file
2807 self.feedback_fn("Blocking watcher")
2808 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2810 # TODO: Currently, this just blocks. There's no timeout.
2811 # TODO: Should it be a shared lock?
2812 watcher_block.Exclusive(blocking=True)
2814 # Stop master daemons, so that no new jobs can come in and all running
2816 self.feedback_fn("Stopping master daemons")
2817 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2819 # Stop daemons on all nodes
2820 for node_name in self.online_nodes:
2821 self.feedback_fn("Stopping daemons on %s" % node_name)
2822 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2824 # All daemons are shut down now
2826 return fn(self, *args)
2827 except Exception, err:
2828 _, errmsg = FormatError(err)
2829 logging.exception("Caught exception")
2830 self.feedback_fn(errmsg)
2833 # Start cluster again, master node last
2834 for node_name in self.nonmaster_nodes + [self.master_node]:
2835 self.feedback_fn("Starting daemons on %s" % node_name)
2836 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2839 watcher_block.Close()
2842 def RunWhileClusterStopped(feedback_fn, fn, *args):
2843 """Calls a function while all cluster daemons are stopped.
2845 @type feedback_fn: callable
2846 @param feedback_fn: Feedback function
2848 @param fn: Function to be called when daemons are stopped
2851 feedback_fn("Gathering cluster information")
2853 # This ensures we're running on the master daemon
2856 (cluster_name, master_node) = \
2857 cl.QueryConfigValues(["cluster_name", "master_node"])
2859 online_nodes = GetOnlineNodes([], cl=cl)
2861 # Don't keep a reference to the client. The master daemon will go away.
2864 assert master_node in online_nodes
2866 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2867 online_nodes).Call(fn, *args)
2870 def GenerateTable(headers, fields, separator, data,
2871 numfields=None, unitfields=None,
2873 """Prints a table with headers and different fields.
2876 @param headers: dictionary mapping field names to headers for
2879 @param fields: the field names corresponding to each row in
2881 @param separator: the separator to be used; if this is None,
2882 the default 'smart' algorithm is used which computes optimal
2883 field width, otherwise just the separator is used between
2886 @param data: a list of lists, each sublist being one row to be output
2887 @type numfields: list
2888 @param numfields: a list with the fields that hold numeric
2889 values and thus should be right-aligned
2890 @type unitfields: list
2891 @param unitfields: a list with the fields that hold numeric
2892 values that should be formatted with the units field
2893 @type units: string or None
2894 @param units: the units we should use for formatting, or None for
2895 automatic choice (human-readable for non-separator usage, otherwise
2896 megabytes); this is a one-letter string
2905 if numfields is None:
2907 if unitfields is None:
2910 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2911 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2914 for field in fields:
2915 if headers and field not in headers:
2916 # TODO: handle better unknown fields (either revert to old
2917 # style of raising exception, or deal more intelligently with
2919 headers[field] = field
2920 if separator is not None:
2921 format_fields.append("%s")
2922 elif numfields.Matches(field):
2923 format_fields.append("%*s")
2925 format_fields.append("%-*s")
2927 if separator is None:
2928 mlens = [0 for name in fields]
2929 format_str = " ".join(format_fields)
2931 format_str = separator.replace("%", "%%").join(format_fields)
2936 for idx, val in enumerate(row):
2937 if unitfields.Matches(fields[idx]):
2940 except (TypeError, ValueError):
2943 val = row[idx] = utils.FormatUnit(val, units)
2944 val = row[idx] = str(val)
2945 if separator is None:
2946 mlens[idx] = max(mlens[idx], len(val))
2951 for idx, name in enumerate(fields):
2953 if separator is None:
2954 mlens[idx] = max(mlens[idx], len(hdr))
2955 args.append(mlens[idx])
2957 result.append(format_str % tuple(args))
2959 if separator is None:
2960 assert len(mlens) == len(fields)
2962 if fields and not numfields.Matches(fields[-1]):
2968 line = ["-" for _ in fields]
2969 for idx in range(len(fields)):
2970 if separator is None:
2971 args.append(mlens[idx])
2972 args.append(line[idx])
2973 result.append(format_str % tuple(args))
2978 def _FormatBool(value):
2979 """Formats a boolean value as a string.
2987 #: Default formatting for query results; (callback, align right)
2988 _DEFAULT_FORMAT_QUERY = {
2989 constants.QFT_TEXT: (str, False),
2990 constants.QFT_BOOL: (_FormatBool, False),
2991 constants.QFT_NUMBER: (str, True),
2992 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2993 constants.QFT_OTHER: (str, False),
2994 constants.QFT_UNKNOWN: (str, False),
2998 def _GetColumnFormatter(fdef, override, unit):
2999 """Returns formatting function for a field.
3001 @type fdef: L{objects.QueryFieldDefinition}
3002 @type override: dict
3003 @param override: Dictionary for overriding field formatting functions,
3004 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3006 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3007 @rtype: tuple; (callable, bool)
3008 @return: Returns the function to format a value (takes one parameter) and a
3009 boolean for aligning the value on the right-hand side
3012 fmt = override.get(fdef.name, None)
3016 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3018 if fdef.kind == constants.QFT_UNIT:
3019 # Can't keep this information in the static dictionary
3020 return (lambda value: utils.FormatUnit(value, unit), True)
3022 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3026 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3029 class _QueryColumnFormatter:
3030 """Callable class for formatting fields of a query.
3033 def __init__(self, fn, status_fn, verbose):
3034 """Initializes this class.
3037 @param fn: Formatting function
3038 @type status_fn: callable
3039 @param status_fn: Function to report fields' status
3040 @type verbose: boolean
3041 @param verbose: whether to use verbose field descriptions or not
3045 self._status_fn = status_fn
3046 self._verbose = verbose
3048 def __call__(self, data):
3049 """Returns a field's string representation.
3052 (status, value) = data
3055 self._status_fn(status)
3057 if status == constants.RS_NORMAL:
3058 return self._fn(value)
3060 assert value is None, \
3061 "Found value %r for abnormal status %s" % (value, status)
3063 return FormatResultError(status, self._verbose)
3066 def FormatResultError(status, verbose):
3067 """Formats result status other than L{constants.RS_NORMAL}.
3069 @param status: The result status
3070 @type verbose: boolean
3071 @param verbose: Whether to return the verbose text
3072 @return: Text of result status
3075 assert status != constants.RS_NORMAL, \
3076 "FormatResultError called with status equal to constants.RS_NORMAL"
3078 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3080 raise NotImplementedError("Unknown status %s" % status)
3087 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3088 header=False, verbose=False):
3089 """Formats data in L{objects.QueryResponse}.
3091 @type result: L{objects.QueryResponse}
3092 @param result: result of query operation
3094 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3095 see L{utils.text.FormatUnit}
3096 @type format_override: dict
3097 @param format_override: Dictionary for overriding field formatting functions,
3098 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3099 @type separator: string or None
3100 @param separator: String used to separate fields
3102 @param header: Whether to output header row
3103 @type verbose: boolean
3104 @param verbose: whether to use verbose field descriptions or not
3113 if format_override is None:
3114 format_override = {}
3116 stats = dict.fromkeys(constants.RS_ALL, 0)
3118 def _RecordStatus(status):
3123 for fdef in result.fields:
3124 assert fdef.title and fdef.name
3125 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3126 columns.append(TableColumn(fdef.title,
3127 _QueryColumnFormatter(fn, _RecordStatus,
3131 table = FormatTable(result.data, columns, header, separator)
3133 # Collect statistics
3134 assert len(stats) == len(constants.RS_ALL)
3135 assert compat.all(count >= 0 for count in stats.values())
3137 # Determine overall status. If there was no data, unknown fields must be
3138 # detected via the field definitions.
3139 if (stats[constants.RS_UNKNOWN] or
3140 (not result.data and _GetUnknownFields(result.fields))):
3142 elif compat.any(count > 0 for key, count in stats.items()
3143 if key != constants.RS_NORMAL):
3144 status = QR_INCOMPLETE
3148 return (status, table)
3151 def _GetUnknownFields(fdefs):
3152 """Returns list of unknown fields included in C{fdefs}.
3154 @type fdefs: list of L{objects.QueryFieldDefinition}
3157 return [fdef for fdef in fdefs
3158 if fdef.kind == constants.QFT_UNKNOWN]
3161 def _WarnUnknownFields(fdefs):
3162 """Prints a warning to stderr if a query included unknown fields.
3164 @type fdefs: list of L{objects.QueryFieldDefinition}
3167 unknown = _GetUnknownFields(fdefs)
3169 ToStderr("Warning: Queried for unknown fields %s",
3170 utils.CommaJoin(fdef.name for fdef in unknown))
3176 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3177 format_override=None, verbose=False, force_filter=False,
3178 namefield=None, qfilter=None, isnumeric=False):
3179 """Generic implementation for listing all items of a resource.
3181 @param resource: One of L{constants.QR_VIA_LUXI}
3182 @type fields: list of strings
3183 @param fields: List of fields to query for
3184 @type names: list of strings
3185 @param names: Names of items to query for
3186 @type unit: string or None
3187 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3188 None for automatic choice (human-readable for non-separator usage,
3189 otherwise megabytes); this is a one-letter string
3190 @type separator: string or None
3191 @param separator: String used to separate fields
3193 @param header: Whether to show header row
3194 @type force_filter: bool
3195 @param force_filter: Whether to always treat names as filter
3196 @type format_override: dict
3197 @param format_override: Dictionary for overriding field formatting functions,
3198 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3199 @type verbose: boolean
3200 @param verbose: whether to use verbose field descriptions or not
3201 @type namefield: string
3202 @param namefield: Name of field to use for simple filters (see
3203 L{qlang.MakeFilter} for details)
3204 @type qfilter: list or None
3205 @param qfilter: Query filter (in addition to names)
3206 @param isnumeric: bool
3207 @param isnumeric: Whether the namefield's type is numeric, and therefore
3208 any simple filters built by namefield should use integer values to
3215 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3216 isnumeric=isnumeric)
3219 qfilter = namefilter
3220 elif namefilter is not None:
3221 qfilter = [qlang.OP_AND, namefilter, qfilter]
3226 response = cl.Query(resource, fields, qfilter)
3228 found_unknown = _WarnUnknownFields(response.fields)
3230 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3232 format_override=format_override,
3238 assert ((found_unknown and status == QR_UNKNOWN) or
3239 (not found_unknown and status != QR_UNKNOWN))
3241 if status == QR_UNKNOWN:
3242 return constants.EXIT_UNKNOWN_FIELD
3244 # TODO: Should the list command fail if not all data could be collected?
3245 return constants.EXIT_SUCCESS
3248 def _FieldDescValues(fdef):
3249 """Helper function for L{GenericListFields} to get query field description.
3251 @type fdef: L{objects.QueryFieldDefinition}
3257 _QFT_NAMES.get(fdef.kind, fdef.kind),
3263 def GenericListFields(resource, fields, separator, header, cl=None):
3264 """Generic implementation for listing fields for a resource.
3266 @param resource: One of L{constants.QR_VIA_LUXI}
3267 @type fields: list of strings
3268 @param fields: List of fields to query for
3269 @type separator: string or None
3270 @param separator: String used to separate fields
3272 @param header: Whether to show header row
3281 response = cl.QueryFields(resource, fields)
3283 found_unknown = _WarnUnknownFields(response.fields)
3286 TableColumn("Name", str, False),
3287 TableColumn("Type", str, False),
3288 TableColumn("Title", str, False),
3289 TableColumn("Description", str, False),
3292 rows = map(_FieldDescValues, response.fields)
3294 for line in FormatTable(rows, columns, header, separator):
3298 return constants.EXIT_UNKNOWN_FIELD
3300 return constants.EXIT_SUCCESS
3304 """Describes a column for L{FormatTable}.
3307 def __init__(self, title, fn, align_right):
3308 """Initializes this class.
3311 @param title: Column title
3313 @param fn: Formatting function
3314 @type align_right: bool
3315 @param align_right: Whether to align values on the right-hand side
3320 self.align_right = align_right
3323 def _GetColFormatString(width, align_right):
3324 """Returns the format string for a field.
3332 return "%%%s%ss" % (sign, width)
3335 def FormatTable(rows, columns, header, separator):
3336 """Formats data as a table.
3338 @type rows: list of lists
3339 @param rows: Row data, one list per row
3340 @type columns: list of L{TableColumn}
3341 @param columns: Column descriptions
3343 @param header: Whether to show header row
3344 @type separator: string or None
3345 @param separator: String used to separate columns
3349 data = [[col.title for col in columns]]
3350 colwidth = [len(col.title) for col in columns]
3353 colwidth = [0 for _ in columns]
3357 assert len(row) == len(columns)
3359 formatted = [col.format(value) for value, col in zip(row, columns)]
3361 if separator is None:
3362 # Update column widths
3363 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3364 # Modifying a list's items while iterating is fine
3365 colwidth[idx] = max(oldwidth, len(value))
3367 data.append(formatted)
3369 if separator is not None:
3370 # Return early if a separator is used
3371 return [separator.join(row) for row in data]
3373 if columns and not columns[-1].align_right:
3374 # Avoid unnecessary spaces at end of line
3377 # Build format string
3378 fmt = " ".join([_GetColFormatString(width, col.align_right)
3379 for col, width in zip(columns, colwidth)])
3381 return [fmt % tuple(row) for row in data]
3384 def FormatTimestamp(ts):
3385 """Formats a given timestamp.
3388 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3391 @return: a string with the formatted timestamp
3394 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3398 return utils.FormatTime(sec, usecs=usecs)
3401 def ParseTimespec(value):
3402 """Parse a time specification.
3404 The following suffixed will be recognized:
3412 Without any suffix, the value will be taken to be in seconds.
3417 raise errors.OpPrereqError("Empty time specification passed",
3426 if value[-1] not in suffix_map:
3429 except (TypeError, ValueError):
3430 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3433 multiplier = suffix_map[value[-1]]
3435 if not value: # no data left after stripping the suffix
3436 raise errors.OpPrereqError("Invalid time specification (only"
3437 " suffix passed)", errors.ECODE_INVAL)
3439 value = int(value) * multiplier
3440 except (TypeError, ValueError):
3441 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3446 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3447 filter_master=False, nodegroup=None):
3448 """Returns the names of online nodes.
3450 This function will also log a warning on stderr with the names of
3453 @param nodes: if not empty, use only this subset of nodes (minus the
3455 @param cl: if not None, luxi client to use
3456 @type nowarn: boolean
3457 @param nowarn: by default, this function will output a note with the
3458 offline nodes that are skipped; if this parameter is True the
3459 note is not displayed
3460 @type secondary_ips: boolean
3461 @param secondary_ips: if True, return the secondary IPs instead of the
3462 names, useful for doing network traffic over the replication interface
3464 @type filter_master: boolean
3465 @param filter_master: if True, do not return the master node in the list
3466 (useful in coordination with secondary_ips where we cannot check our
3467 node name against the list)
3468 @type nodegroup: string
3469 @param nodegroup: If set, only return nodes in this node group
3478 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3480 if nodegroup is not None:
3481 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3482 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3485 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3488 if len(qfilter) > 1:
3489 final_filter = [qlang.OP_AND] + qfilter
3491 assert len(qfilter) == 1
3492 final_filter = qfilter[0]
3496 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3498 def _IsOffline(row):
3499 (_, (_, offline), _) = row
3503 ((_, name), _, _) = row
3507 (_, _, (_, sip)) = row
3510 (offline, online) = compat.partition(result.data, _IsOffline)
3512 if offline and not nowarn:
3513 ToStderr("Note: skipping offline node(s): %s" %
3514 utils.CommaJoin(map(_GetName, offline)))
3521 return map(fn, online)
3524 def _ToStream(stream, txt, *args):
3525 """Write a message to a stream, bypassing the logging system
3527 @type stream: file object
3528 @param stream: the file to which we should write
3530 @param txt: the message
3536 stream.write(txt % args)
3541 except IOError, err:
3542 if err.errno == errno.EPIPE:
3543 # our terminal went away, we'll exit
3544 sys.exit(constants.EXIT_FAILURE)
3549 def ToStdout(txt, *args):
3550 """Write a message to stdout only, bypassing the logging system
3552 This is just a wrapper over _ToStream.
3555 @param txt: the message
3558 _ToStream(sys.stdout, txt, *args)
3561 def ToStderr(txt, *args):
3562 """Write a message to stderr only, bypassing the logging system
3564 This is just a wrapper over _ToStream.
3567 @param txt: the message
3570 _ToStream(sys.stderr, txt, *args)
3573 class JobExecutor(object):
3574 """Class which manages the submission and execution of multiple jobs.
3576 Note that instances of this class should not be reused between
3580 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3585 self.verbose = verbose
3588 self.feedback_fn = feedback_fn
3589 self._counter = itertools.count()
3592 def _IfName(name, fmt):
3593 """Helper function for formatting name.
3601 def QueueJob(self, name, *ops):
3602 """Record a job for later submit.
3605 @param name: a description of the job, will be used in WaitJobSet
3608 SetGenericOpcodeOpts(ops, self.opts)
3609 self.queue.append((self._counter.next(), name, ops))
3611 def AddJobId(self, name, status, job_id):
3612 """Adds a job ID to the internal queue.
3615 self.jobs.append((self._counter.next(), status, job_id, name))
3617 def SubmitPending(self, each=False):
3618 """Submit all pending jobs.
3623 for (_, _, ops) in self.queue:
3624 # SubmitJob will remove the success status, but raise an exception if
3625 # the submission fails, so we'll notice that anyway.
3626 results.append([True, self.cl.SubmitJob(ops)[0]])
3628 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3629 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3630 self.jobs.append((idx, status, data, name))
3632 def _ChooseJob(self):
3633 """Choose a non-waiting/queued job to poll next.
3636 assert self.jobs, "_ChooseJob called with empty job list"
3638 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3642 for job_data, status in zip(self.jobs, result):
3643 if (isinstance(status, list) and status and
3644 status[0] in (constants.JOB_STATUS_QUEUED,
3645 constants.JOB_STATUS_WAITING,
3646 constants.JOB_STATUS_CANCELING)):
3647 # job is still present and waiting
3649 # good candidate found (either running job or lost job)
3650 self.jobs.remove(job_data)
3654 return self.jobs.pop(0)
3656 def GetResults(self):
3657 """Wait for and return the results of all jobs.
3660 @return: list of tuples (success, job results), in the same order
3661 as the submitted jobs; if a job has failed, instead of the result
3662 there will be the error message
3666 self.SubmitPending()
3669 ok_jobs = [row[2] for row in self.jobs if row[1]]
3671 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3673 # first, remove any non-submitted jobs
3674 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3675 for idx, _, jid, name in failures:
3676 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3677 results.append((idx, False, jid))
3680 (idx, _, jid, name) = self._ChooseJob()
3681 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3683 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3685 except errors.JobLost, err:
3686 _, job_result = FormatError(err)
3687 ToStderr("Job %s%s has been archived, cannot check its result",
3688 jid, self._IfName(name, " for %s"))
3690 except (errors.GenericError, luxi.ProtocolError), err:
3691 _, job_result = FormatError(err)
3693 # the error message will always be shown, verbose or not
3694 ToStderr("Job %s%s has failed: %s",
3695 jid, self._IfName(name, " for %s"), job_result)
3697 results.append((idx, success, job_result))
3699 # sort based on the index, then drop it
3701 results = [i[1:] for i in results]
3705 def WaitOrShow(self, wait):
3706 """Wait for job results or only print the job IDs.
3709 @param wait: whether to wait or not
3713 return self.GetResults()
3716 self.SubmitPending()
3717 for _, status, result, name in self.jobs:
3719 ToStdout("%s: %s", result, name)
3721 ToStderr("Failure for %s: %s", name, result)
3722 return [row[1:3] for row in self.jobs]
3725 def FormatParamsDictInfo(param_dict, actual):
3726 """Formats a parameter dictionary.
3728 @type param_dict: dict
3729 @param param_dict: the own parameters
3731 @param actual: the current parameter set (including defaults)
3733 @return: dictionary where the value of each parameter is either a fully
3734 formatted string or a dictionary containing formatted strings
3738 for (key, data) in actual.items():
3739 if isinstance(data, dict) and data:
3740 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3742 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3746 def _FormatListInfoDefault(data, def_data):
3747 if data is not None:
3748 ret = utils.CommaJoin(data)
3750 ret = "default (%s)" % utils.CommaJoin(def_data)
3754 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3755 """Formats an instance policy.
3757 @type custom_ipolicy: dict
3758 @param custom_ipolicy: own policy
3759 @type eff_ipolicy: dict
3760 @param eff_ipolicy: effective policy (including defaults); ignored for
3762 @type iscluster: bool
3763 @param iscluster: the policy is at cluster level
3764 @rtype: list of pairs
3765 @return: formatted data, suitable for L{PrintGenericInfo}
3769 eff_ipolicy = custom_ipolicy
3772 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3774 for (k, minmax) in enumerate(custom_minmax):
3776 ("%s/%s" % (key, k),
3777 FormatParamsDictInfo(minmax[key], minmax[key]))
3778 for key in constants.ISPECS_MINMAX_KEYS
3781 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3783 ("%s/%s" % (key, k),
3784 FormatParamsDictInfo({}, minmax[key]))
3785 for key in constants.ISPECS_MINMAX_KEYS
3787 ret = [("bounds specs", minmax_out)]
3790 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3792 (constants.ISPECS_STD,
3793 FormatParamsDictInfo(stdspecs, stdspecs))
3797 ("allowed disk templates",
3798 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3799 eff_ipolicy[constants.IPOLICY_DTS]))
3802 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3803 for key in constants.IPOLICY_PARAMETERS
3808 def _PrintSpecsParameters(buf, specs):
3809 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3810 buf.write(",".join(values))
3813 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3814 """Print the command option used to generate the given instance policy.
3816 Currently only the parts dealing with specs are supported.
3819 @param buf: stream to write into
3821 @param ipolicy: instance policy
3823 @param isgroup: whether the policy is at group level
3827 stdspecs = ipolicy.get("std")
3829 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3830 _PrintSpecsParameters(buf, stdspecs)
3831 minmaxes = ipolicy.get("minmax", [])
3833 for minmax in minmaxes:
3834 minspecs = minmax.get("min")
3835 maxspecs = minmax.get("max")
3836 if minspecs and maxspecs:
3838 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3843 _PrintSpecsParameters(buf, minspecs)
3845 _PrintSpecsParameters(buf, maxspecs)
3848 def ConfirmOperation(names, list_type, text, extra=""):
3849 """Ask the user to confirm an operation on a list of list_type.
3851 This function is used to request confirmation for doing an operation
3852 on a given list of list_type.
3855 @param names: the list of names that we display when
3856 we ask for confirmation
3857 @type list_type: str
3858 @param list_type: Human readable name for elements in the list (e.g. nodes)
3860 @param text: the operation that the user should confirm
3862 @return: True or False depending on user's confirmation.
3866 msg = ("The %s will operate on %d %s.\n%s"
3867 "Do you want to continue?" % (text, count, list_type, extra))
3868 affected = (("\nAffected %s:\n" % list_type) +
3869 "\n".join([" %s" % name for name in names]))
3871 choices = [("y", True, "Yes, execute the %s" % text),
3872 ("n", False, "No, abort the %s" % text)]
3875 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3878 question = msg + affected
3880 choice = AskUser(question, choices)
3883 choice = AskUser(msg + affected, choices)
3887 def _MaybeParseUnit(elements):
3888 """Parses and returns an array of potential values with units.
3892 for k, v in elements.items():
3893 if v == constants.VALUE_DEFAULT:
3896 parsed[k] = utils.ParseUnit(v)
3900 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3901 ispecs_disk_count, ispecs_disk_size,
3902 ispecs_nic_count, group_ipolicy, fill_all):
3905 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3906 if ispecs_disk_size:
3907 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3908 except (TypeError, ValueError, errors.UnitParseError), err:
3909 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3911 (ispecs_disk_size, ispecs_mem_size, err),
3914 # prepare ipolicy dict
3915 ispecs_transposed = {
3916 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3917 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3918 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3919 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3920 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3923 # first, check that the values given are correct
3925 forced_type = TISPECS_GROUP_TYPES
3927 forced_type = TISPECS_CLUSTER_TYPES
3928 for specs in ispecs_transposed.values():
3929 assert type(specs) is dict
3930 utils.ForceDictType(specs, forced_type)
3934 constants.ISPECS_MIN: {},
3935 constants.ISPECS_MAX: {},
3936 constants.ISPECS_STD: {},
3938 for (name, specs) in ispecs_transposed.iteritems():
3939 assert name in constants.ISPECS_PARAMETERS
3940 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3941 assert key in ispecs
3942 ispecs[key][name] = val
3944 for key in constants.ISPECS_MINMAX_KEYS:
3947 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3949 minmax_out[key] = ispecs[key]
3950 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3952 ipolicy[constants.ISPECS_STD] = \
3953 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3954 ispecs[constants.ISPECS_STD])
3956 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3959 def _ParseSpecUnit(spec, keyname):
3961 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3964 ret[k] = utils.ParseUnit(ret[k])
3965 except (TypeError, ValueError, errors.UnitParseError), err:
3966 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3967 " specs: %s" % (k, ret[k], keyname, err)),
3972 def _ParseISpec(spec, keyname, required):
3973 ret = _ParseSpecUnit(spec, keyname)
3974 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3975 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3976 if required and missing:
3977 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3978 (keyname, utils.CommaJoin(missing)),
3983 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3985 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3986 len(minmax_ispecs[0]) == 1):
3987 for (key, spec) in minmax_ispecs[0].items():
3988 # This loop is executed exactly once
3989 if key in allowed_values and not spec:
3994 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3995 group_ipolicy, allowed_values):
3996 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3997 if found_allowed is not None:
3998 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3999 elif minmax_ispecs is not None:
4001 for mmpair in minmax_ispecs:
4003 for (key, spec) in mmpair.items():
4004 if key not in constants.ISPECS_MINMAX_KEYS:
4005 msg = "Invalid key in bounds instance specifications: %s" % key
4006 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4007 mmpair_out[key] = _ParseISpec(spec, key, True)
4008 minmax_out.append(mmpair_out)
4009 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4010 if std_ispecs is not None:
4011 assert not group_ipolicy # This is not an option for gnt-group
4012 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4015 def CreateIPolicyFromOpts(ispecs_mem_size=None,
4016 ispecs_cpu_count=None,
4017 ispecs_disk_count=None,
4018 ispecs_disk_size=None,
4019 ispecs_nic_count=None,
4022 ipolicy_disk_templates=None,
4023 ipolicy_vcpu_ratio=None,
4024 ipolicy_spindle_ratio=None,
4025 group_ipolicy=False,
4026 allowed_values=None,
4028 """Creation of instance policy based on command line options.
4030 @param fill_all: whether for cluster policies we should ensure that
4031 all values are filled
4034 assert not (fill_all and allowed_values)
4036 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4037 ispecs_disk_size or ispecs_nic_count)
4038 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4039 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4040 " together with any --ipolicy-xxx-specs option",
4043 ipolicy_out = objects.MakeEmptyIPolicy()
4046 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4047 ispecs_disk_count, ispecs_disk_size,
4048 ispecs_nic_count, group_ipolicy, fill_all)
4049 elif (minmax_ispecs is not None or std_ispecs is not None):
4050 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4051 group_ipolicy, allowed_values)
4053 if ipolicy_disk_templates is not None:
4054 if allowed_values and ipolicy_disk_templates in allowed_values:
4055 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4057 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4058 if ipolicy_vcpu_ratio is not None:
4059 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4060 if ipolicy_spindle_ratio is not None:
4061 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4063 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4065 if not group_ipolicy and fill_all:
4066 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4071 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4072 """Formatting core of L{PrintGenericInfo}.
4074 @param buf: (string) stream to accumulate the result into
4075 @param data: data to format
4077 @param level: depth in the data hierarchy, used for indenting
4078 @type afterkey: bool
4079 @param afterkey: True when we are in the middle of a line after a key (used
4080 to properly add newlines or indentation)
4084 if isinstance(data, dict):
4093 for key in sorted(data):
4095 buf.write(baseind * level)
4100 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4101 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4102 # list of tuples (an ordered dictionary)
4108 for (key, val) in data:
4110 buf.write(baseind * level)
4115 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4116 elif isinstance(data, list):
4127 buf.write(baseind * level)
4131 buf.write(baseind[1:])
4132 _SerializeGenericInfo(buf, item, level + 1)
4134 # This branch should be only taken for strings, but it's practically
4135 # impossible to guarantee that no other types are produced somewhere
4136 buf.write(str(data))
4140 def PrintGenericInfo(data):
4141 """Print information formatted according to the hierarchy.
4143 The output is a valid YAML string.
4145 @param data: the data to print. It's a hierarchical structure whose elements
4147 - dictionaries, where keys are strings and values are of any of the
4149 - lists of pairs (key, value), where key is a string and value is of
4150 any of the types listed here; it's a way to encode ordered
4152 - lists of any of the types listed here
4157 _SerializeGenericInfo(buf, data, 0)
4158 ToStdout(buf.getvalue().rstrip("\n"))