4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
103 "DEFAULT_IALLOCATOR_OPT",
104 "IDENTIFY_DEFAULTS_OPT",
105 "IGNORE_CONSIST_OPT",
107 "IGNORE_FAILURES_OPT",
108 "IGNORE_OFFLINE_OPT",
109 "IGNORE_REMOVE_FAILURES_OPT",
110 "IGNORE_SECONDARIES_OPT",
112 "INCLUDEDEFAULTS_OPT",
115 "MAINTAIN_NODE_HEALTH_OPT",
117 "MASTER_NETMASK_OPT",
119 "MIGRATION_MODE_OPT",
120 "MODIFY_ETCHOSTS_OPT",
124 "NEW_CLUSTER_CERT_OPT",
125 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126 "NEW_CONFD_HMAC_KEY_OPT",
130 "NEW_SPICE_CERT_OPT",
132 "NOCONFLICTSCHECK_OPT",
133 "NODE_FORCE_JOIN_OPT",
135 "NODE_PLACEMENT_OPT",
144 "NOMODIFY_ETCHOSTS_OPT",
145 "NOMODIFY_SSH_SETUP_OPT",
149 "NORUNTIME_CHGS_OPT",
152 "NOSSH_KEYCHECK_OPT",
166 "PREALLOC_WIPE_DISKS_OPT",
167 "PRIMARY_IP_VERSION_OPT",
175 "REMOVE_INSTANCE_OPT",
176 "REMOVE_RESERVED_IPS_OPT",
182 "SECONDARY_ONLY_OPT",
187 "SHUTDOWN_TIMEOUT_OPT",
189 "SPECS_CPU_COUNT_OPT",
190 "SPECS_DISK_COUNT_OPT",
191 "SPECS_DISK_SIZE_OPT",
192 "SPECS_MEM_SIZE_OPT",
193 "SPECS_NIC_COUNT_OPT",
195 "IPOLICY_STD_SPECS_OPT",
196 "IPOLICY_DISK_TEMPLATES",
197 "IPOLICY_VCPU_RATIO",
204 "STARTUP_PAUSED_OPT",
213 "USE_EXTERNAL_MIP_SCRIPT",
221 "IGNORE_IPOLICY_OPT",
222 "INSTANCE_POLICY_OPTS",
223 # Generic functions for CLI programs
225 "CreateIPolicyFromOpts",
227 "GenericInstanceCreate",
233 "JobSubmittedException",
235 "RunWhileClusterStopped",
237 "SubmitOpCodeToDrainedQueue",
240 # Formatting functions
241 "ToStderr", "ToStdout",
244 "FormatParamsDictInfo",
246 "PrintIPolicyCommand",
256 # command line options support infrastructure
257 "ARGS_MANY_INSTANCES",
260 "ARGS_MANY_NETWORKS",
280 "OPT_COMPL_INST_ADD_NODES",
281 "OPT_COMPL_MANY_NODES",
282 "OPT_COMPL_ONE_IALLOCATOR",
283 "OPT_COMPL_ONE_INSTANCE",
284 "OPT_COMPL_ONE_NODE",
285 "OPT_COMPL_ONE_NODEGROUP",
286 "OPT_COMPL_ONE_NETWORK",
288 "OPT_COMPL_ONE_EXTSTORAGE",
293 "COMMON_CREATE_OPTS",
299 #: Priorities (sorted)
301 ("low", constants.OP_PRIO_LOW),
302 ("normal", constants.OP_PRIO_NORMAL),
303 ("high", constants.OP_PRIO_HIGH),
306 #: Priority dictionary for easier lookup
307 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
308 # we migrate to Python 2.6
309 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
311 # Query result status for clients
314 QR_INCOMPLETE) = range(3)
316 #: Maximum batch size for ChooseJob
320 # constants used to create InstancePolicy dictionary
321 TISPECS_GROUP_TYPES = {
322 constants.ISPECS_MIN: constants.VTYPE_INT,
323 constants.ISPECS_MAX: constants.VTYPE_INT,
326 TISPECS_CLUSTER_TYPES = {
327 constants.ISPECS_MIN: constants.VTYPE_INT,
328 constants.ISPECS_MAX: constants.VTYPE_INT,
329 constants.ISPECS_STD: constants.VTYPE_INT,
332 #: User-friendly names for query2 field types
334 constants.QFT_UNKNOWN: "Unknown",
335 constants.QFT_TEXT: "Text",
336 constants.QFT_BOOL: "Boolean",
337 constants.QFT_NUMBER: "Number",
338 constants.QFT_UNIT: "Storage size",
339 constants.QFT_TIMESTAMP: "Timestamp",
340 constants.QFT_OTHER: "Custom",
345 def __init__(self, min=0, max=None): # pylint: disable=W0622
350 return ("<%s min=%s max=%s>" %
351 (self.__class__.__name__, self.min, self.max))
354 class ArgSuggest(_Argument):
355 """Suggesting argument.
357 Value can be any of the ones passed to the constructor.
360 # pylint: disable=W0622
361 def __init__(self, min=0, max=None, choices=None):
362 _Argument.__init__(self, min=min, max=max)
363 self.choices = choices
366 return ("<%s min=%s max=%s choices=%r>" %
367 (self.__class__.__name__, self.min, self.max, self.choices))
370 class ArgChoice(ArgSuggest):
373 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
374 but value must be one of the choices.
379 class ArgUnknown(_Argument):
380 """Unknown argument to program (e.g. determined at runtime).
385 class ArgInstance(_Argument):
386 """Instances argument.
391 class ArgNode(_Argument):
397 class ArgNetwork(_Argument):
403 class ArgGroup(_Argument):
404 """Node group argument.
409 class ArgJobId(_Argument):
415 class ArgFile(_Argument):
416 """File path argument.
421 class ArgCommand(_Argument):
427 class ArgHost(_Argument):
433 class ArgOs(_Argument):
439 class ArgExtStorage(_Argument):
440 """ExtStorage argument.
446 ARGS_MANY_INSTANCES = [ArgInstance()]
447 ARGS_MANY_NETWORKS = [ArgNetwork()]
448 ARGS_MANY_NODES = [ArgNode()]
449 ARGS_MANY_GROUPS = [ArgGroup()]
450 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
451 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
452 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
454 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
455 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
458 def _ExtractTagsObject(opts, args):
459 """Extract the tag type object.
461 Note that this function will modify its args parameter.
464 if not hasattr(opts, "tag_type"):
465 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
467 if kind == constants.TAG_CLUSTER:
469 elif kind in (constants.TAG_NODEGROUP,
471 constants.TAG_NETWORK,
472 constants.TAG_INSTANCE):
474 raise errors.OpPrereqError("no arguments passed to the command",
479 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
483 def _ExtendTags(opts, args):
484 """Extend the args if a source file has been given.
486 This function will extend the tags with the contents of the file
487 passed in the 'tags_source' attribute of the opts parameter. A file
488 named '-' will be replaced by stdin.
491 fname = opts.tags_source
497 new_fh = open(fname, "r")
500 # we don't use the nice 'new_data = [line.strip() for line in fh]'
501 # because of python bug 1633941
503 line = new_fh.readline()
506 new_data.append(line.strip())
509 args.extend(new_data)
512 def ListTags(opts, args):
513 """List the tags on a given object.
515 This is a generic implementation that knows how to deal with all
516 three cases of tag objects (cluster, node, instance). The opts
517 argument is expected to contain a tag_type field denoting what
518 object type we work on.
521 kind, name = _ExtractTagsObject(opts, args)
522 cl = GetClient(query=True)
523 result = cl.QueryTags(kind, name)
524 result = list(result)
530 def AddTags(opts, args):
531 """Add tags on a given object.
533 This is a generic implementation that knows how to deal with all
534 three cases of tag objects (cluster, node, instance). The opts
535 argument is expected to contain a tag_type field denoting what
536 object type we work on.
539 kind, name = _ExtractTagsObject(opts, args)
540 _ExtendTags(opts, args)
542 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
543 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
544 SubmitOrSend(op, opts)
547 def RemoveTags(opts, args):
548 """Remove tags from a given object.
550 This is a generic implementation that knows how to deal with all
551 three cases of tag objects (cluster, node, instance). The opts
552 argument is expected to contain a tag_type field denoting what
553 object type we work on.
556 kind, name = _ExtractTagsObject(opts, args)
557 _ExtendTags(opts, args)
559 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
560 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
561 SubmitOrSend(op, opts)
564 def check_unit(option, opt, value): # pylint: disable=W0613
565 """OptParsers custom converter for units.
569 return utils.ParseUnit(value)
570 except errors.UnitParseError, err:
571 raise OptionValueError("option %s: %s" % (opt, err))
574 def _SplitKeyVal(opt, data, parse_prefixes):
575 """Convert a KeyVal string into a dict.
577 This function will convert a key=val[,...] string into a dict. Empty
578 values will be converted specially: keys which have the prefix 'no_'
579 will have the value=False and the prefix stripped, keys with the prefix
580 "-" will have value=None and the prefix stripped, and the others will
584 @param opt: a string holding the option name for which we process the
585 data, used in building error messages
587 @param data: a string of the format key=val,key=val,...
588 @type parse_prefixes: bool
589 @param parse_prefixes: whether to handle prefixes specially
591 @return: {key=val, key=val}
592 @raises errors.ParameterError: if there are duplicate keys
597 for elem in utils.UnescapeAndSplit(data, sep=","):
599 key, val = elem.split("=", 1)
601 if elem.startswith(NO_PREFIX):
602 key, val = elem[len(NO_PREFIX):], False
603 elif elem.startswith(UN_PREFIX):
604 key, val = elem[len(UN_PREFIX):], None
606 key, val = elem, True
608 raise errors.ParameterError("Missing value for key '%s' in option %s" %
611 raise errors.ParameterError("Duplicate key '%s' in option %s" %
617 def _SplitIdentKeyVal(opt, value, parse_prefixes):
618 """Helper function to parse "ident:key=val,key=val" options.
621 @param opt: option name, used in error messages
623 @param value: expected to be in the format "ident:key=val,key=val,..."
624 @type parse_prefixes: bool
625 @param parse_prefixes: whether to handle prefixes specially (see
628 @return: (ident, {key=val, key=val})
629 @raises errors.ParameterError: in case of duplicates or other parsing errors
633 ident, rest = value, ""
635 ident, rest = value.split(":", 1)
637 if parse_prefixes and ident.startswith(NO_PREFIX):
639 msg = "Cannot pass options when removing parameter groups: %s" % value
640 raise errors.ParameterError(msg)
641 retval = (ident[len(NO_PREFIX):], False)
642 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
643 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
645 msg = "Cannot pass options when removing parameter groups: %s" % value
646 raise errors.ParameterError(msg)
647 retval = (ident[len(UN_PREFIX):], None)
649 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
650 retval = (ident, kv_dict)
654 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
655 """Custom parser for ident:key=val,key=val options.
657 This will store the parsed values as a tuple (ident, {key: val}). As such,
658 multiple uses of this option via action=append is possible.
661 return _SplitIdentKeyVal(opt, value, True)
664 def check_key_val(option, opt, value): # pylint: disable=W0613
665 """Custom parser class for key=val,key=val options.
667 This will store the parsed values as a dict {key: val}.
670 return _SplitKeyVal(opt, value, True)
673 def _SplitListKeyVal(opt, value):
675 for elem in value.split("/"):
677 raise errors.ParameterError("Empty section in option '%s'" % opt)
678 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
680 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
682 raise errors.ParameterError(msg)
683 retval[ident] = valdict
687 def check_multilist_ident_key_val(_, opt, value):
688 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
690 @rtype: list of dictionary
691 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
695 for line in value.split("//"):
696 retval.append(_SplitListKeyVal(opt, line))
700 def check_bool(option, opt, value): # pylint: disable=W0613
701 """Custom parser for yes/no options.
703 This will store the parsed value as either True or False.
706 value = value.lower()
707 if value == constants.VALUE_FALSE or value == "no":
709 elif value == constants.VALUE_TRUE or value == "yes":
712 raise errors.ParameterError("Invalid boolean value '%s'" % value)
715 def check_list(option, opt, value): # pylint: disable=W0613
716 """Custom parser for comma-separated lists.
719 # we have to make this explicit check since "".split(",") is [""],
720 # not an empty list :(
724 return utils.UnescapeAndSplit(value)
727 def check_maybefloat(option, opt, value): # pylint: disable=W0613
728 """Custom parser for float numbers which might be also defaults.
731 value = value.lower()
733 if value == constants.VALUE_DEFAULT:
739 # completion_suggestion is normally a list. Using numeric values not evaluating
740 # to False for dynamic completion.
741 (OPT_COMPL_MANY_NODES,
743 OPT_COMPL_ONE_INSTANCE,
745 OPT_COMPL_ONE_EXTSTORAGE,
746 OPT_COMPL_ONE_IALLOCATOR,
747 OPT_COMPL_ONE_NETWORK,
748 OPT_COMPL_INST_ADD_NODES,
749 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
751 OPT_COMPL_ALL = compat.UniqueFrozenset([
752 OPT_COMPL_MANY_NODES,
754 OPT_COMPL_ONE_INSTANCE,
756 OPT_COMPL_ONE_EXTSTORAGE,
757 OPT_COMPL_ONE_IALLOCATOR,
758 OPT_COMPL_ONE_NETWORK,
759 OPT_COMPL_INST_ADD_NODES,
760 OPT_COMPL_ONE_NODEGROUP,
764 class CliOption(Option):
765 """Custom option class for optparse.
768 ATTRS = Option.ATTRS + [
769 "completion_suggest",
771 TYPES = Option.TYPES + (
772 "multilistidentkeyval",
780 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
781 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
782 TYPE_CHECKER["identkeyval"] = check_ident_key_val
783 TYPE_CHECKER["keyval"] = check_key_val
784 TYPE_CHECKER["unit"] = check_unit
785 TYPE_CHECKER["bool"] = check_bool
786 TYPE_CHECKER["list"] = check_list
787 TYPE_CHECKER["maybefloat"] = check_maybefloat
790 # optparse.py sets make_option, so we do it for our own option class, too
791 cli_option = CliOption
796 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
797 help="Increase debugging level")
799 NOHDR_OPT = cli_option("--no-headers", default=False,
800 action="store_true", dest="no_headers",
801 help="Don't display column headers")
803 SEP_OPT = cli_option("--separator", default=None,
804 action="store", dest="separator",
805 help=("Separator between output fields"
806 " (defaults to one space)"))
808 USEUNITS_OPT = cli_option("--units", default=None,
809 dest="units", choices=("h", "m", "g", "t"),
810 help="Specify units for output (one of h/m/g/t)")
812 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
813 type="string", metavar="FIELDS",
814 help="Comma separated list of output fields")
816 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
817 default=False, help="Force the operation")
819 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
820 default=False, help="Do not require confirmation")
822 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
823 action="store_true", default=False,
824 help=("Ignore offline nodes and do as much"
827 TAG_ADD_OPT = cli_option("--tags", dest="tags",
828 default=None, help="Comma-separated list of instance"
831 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
832 default=None, help="File with tag names")
834 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
835 default=False, action="store_true",
836 help=("Submit the job and return the job ID, but"
837 " don't wait for the job to finish"))
839 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
840 default=False, action="store_true",
841 help=("Additionally print the job as first line"
842 " on stdout (for scripting)."))
844 SYNC_OPT = cli_option("--sync", dest="do_locking",
845 default=False, action="store_true",
846 help=("Grab locks while doing the queries"
847 " in order to ensure more consistent results"))
849 DRY_RUN_OPT = cli_option("--dry-run", default=False,
851 help=("Do not execute the operation, just run the"
852 " check steps and verify if it could be"
855 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
857 help="Increase the verbosity of the operation")
859 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
860 action="store_true", dest="simulate_errors",
861 help="Debugging option that makes the operation"
862 " treat most runtime checks as failed")
864 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
865 default=True, action="store_false",
866 help="Don't wait for sync (DANGEROUS!)")
868 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
869 default=False, action="store_true",
870 help="Wait for disks to sync")
872 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
873 action="store_true", default=False,
874 help="Enable offline instance")
876 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
877 action="store_true", default=False,
878 help="Disable down instance")
880 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
881 help=("Custom disk setup (%s)" %
882 utils.CommaJoin(constants.DISK_TEMPLATES)),
883 default=None, metavar="TEMPL",
884 choices=list(constants.DISK_TEMPLATES))
886 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
887 help="Do not create any network cards for"
890 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
891 help="Relative path under default cluster-wide"
892 " file storage dir to store file-based disks",
893 default=None, metavar="<DIR>")
895 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
896 help="Driver to use for image files",
897 default=None, metavar="<DRIVER>",
898 choices=list(constants.FILE_DRIVER))
900 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
901 help="Select nodes for the instance automatically"
902 " using the <NAME> iallocator plugin",
903 default=None, type="string",
904 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
908 help="Set the default instance"
910 default=None, type="string",
911 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
915 completion_suggest=OPT_COMPL_ONE_OS)
917 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
918 type="keyval", default={},
919 help="OS parameters")
921 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
922 action="store_true", default=False,
923 help="Force an unknown variant")
925 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
926 action="store_true", default=False,
927 help="Do not install the OS (will"
930 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
931 dest="allow_runtime_chgs",
932 default=True, action="store_false",
933 help="Don't allow runtime changes")
935 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
936 type="keyval", default={},
937 help="Backend parameters")
939 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
940 default={}, dest="hvparams",
941 help="Hypervisor parameters")
943 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
944 help="Disk template parameters, in the format"
945 " template:option=value,option=value,...",
946 type="identkeyval", action="append", default=[])
948 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
949 type="keyval", default={},
950 help="Memory size specs: list of key=value,"
951 " where key is one of min, max, std"
952 " (in MB or using a unit)")
954 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
955 type="keyval", default={},
956 help="CPU count specs: list of key=value,"
957 " where key is one of min, max, std")
959 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
960 dest="ispecs_disk_count",
961 type="keyval", default={},
962 help="Disk count specs: list of key=value,"
963 " where key is one of min, max, std")
965 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
966 type="keyval", default={},
967 help="Disk size specs: list of key=value,"
968 " where key is one of min, max, std"
969 " (in MB or using a unit)")
971 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
972 type="keyval", default={},
973 help="NIC count specs: list of key=value,"
974 " where key is one of min, max, std")
976 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
977 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
978 dest="ipolicy_bounds_specs",
979 type="multilistidentkeyval", default=None,
980 help="Complete instance specs limits")
982 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
983 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
984 dest="ipolicy_std_specs",
985 type="keyval", default=None,
986 help="Complte standard instance specs")
988 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
989 dest="ipolicy_disk_templates",
990 type="list", default=None,
991 help="Comma-separated list of"
992 " enabled disk templates")
994 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
995 dest="ipolicy_vcpu_ratio",
996 type="maybefloat", default=None,
997 help="The maximum allowed vcpu-to-cpu ratio")
999 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1000 dest="ipolicy_spindle_ratio",
1001 type="maybefloat", default=None,
1002 help=("The maximum allowed instances to"
1005 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1006 help="Hypervisor and hypervisor options, in the"
1007 " format hypervisor:option=value,option=value,...",
1008 default=None, type="identkeyval")
1010 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1011 help="Hypervisor and hypervisor options, in the"
1012 " format hypervisor:option=value,option=value,...",
1013 default=[], action="append", type="identkeyval")
1015 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1016 action="store_false",
1017 help="Don't check that the instance's IP"
1020 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1021 default=True, action="store_false",
1022 help="Don't check that the instance's name"
1025 NET_OPT = cli_option("--net",
1026 help="NIC parameters", default=[],
1027 dest="nics", action="append", type="identkeyval")
1029 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1030 dest="disks", action="append", type="identkeyval")
1032 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1033 help="Comma-separated list of disks"
1034 " indices to act on (e.g. 0,2) (optional,"
1035 " defaults to all disks)")
1037 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1038 help="Enforces a single-disk configuration using the"
1039 " given disk size, in MiB unless a suffix is used",
1040 default=None, type="unit", metavar="<size>")
1042 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1043 dest="ignore_consistency",
1044 action="store_true", default=False,
1045 help="Ignore the consistency of the disks on"
1048 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1049 dest="allow_failover",
1050 action="store_true", default=False,
1051 help="If migration is not possible fallback to"
1054 NONLIVE_OPT = cli_option("--non-live", dest="live",
1055 default=True, action="store_false",
1056 help="Do a non-live migration (this usually means"
1057 " freeze the instance, save the state, transfer and"
1058 " only then resume running on the secondary node)")
1060 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1062 choices=list(constants.HT_MIGRATION_MODES),
1063 help="Override default migration mode (choose"
1064 " either live or non-live")
1066 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1067 help="Target node and optional secondary node",
1068 metavar="<pnode>[:<snode>]",
1069 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1071 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1072 action="append", metavar="<node>",
1073 help="Use only this node (can be used multiple"
1074 " times, if not given defaults to all nodes)",
1075 completion_suggest=OPT_COMPL_ONE_NODE)
1077 NODEGROUP_OPT_NAME = "--node-group"
1078 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1080 help="Node group (name or uuid)",
1081 metavar="<nodegroup>",
1082 default=None, type="string",
1083 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1085 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1087 completion_suggest=OPT_COMPL_ONE_NODE)
1089 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1090 action="store_false",
1091 help="Don't start the instance after creation")
1093 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1094 action="store_true", default=False,
1095 help="Show command instead of executing it")
1097 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1098 default=False, action="store_true",
1099 help="Instead of performing the migration/failover,"
1100 " try to recover from a failed cleanup. This is safe"
1101 " to run even if the instance is healthy, but it"
1102 " will create extra replication traffic and "
1103 " disrupt briefly the replication (like during the"
1104 " migration/failover")
1106 STATIC_OPT = cli_option("-s", "--static", dest="static",
1107 action="store_true", default=False,
1108 help="Only show configuration data, not runtime data")
1110 ALL_OPT = cli_option("--all", dest="show_all",
1111 default=False, action="store_true",
1112 help="Show info on all instances on the cluster."
1113 " This can take a long time to run, use wisely")
1115 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1116 action="store_true", default=False,
1117 help="Interactive OS reinstall, lists available"
1118 " OS templates for selection")
1120 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1121 action="store_true", default=False,
1122 help="Remove the instance from the cluster"
1123 " configuration even if there are failures"
1124 " during the removal process")
1126 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1127 dest="ignore_remove_failures",
1128 action="store_true", default=False,
1129 help="Remove the instance from the"
1130 " cluster configuration even if there"
1131 " are failures during the removal"
1134 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1135 action="store_true", default=False,
1136 help="Remove the instance from the cluster")
1138 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1139 help="Specifies the new node for the instance",
1140 metavar="NODE", default=None,
1141 completion_suggest=OPT_COMPL_ONE_NODE)
1143 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1144 help="Specifies the new secondary node",
1145 metavar="NODE", default=None,
1146 completion_suggest=OPT_COMPL_ONE_NODE)
1148 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1149 help="Specifies the new primary node",
1150 metavar="<node>", default=None,
1151 completion_suggest=OPT_COMPL_ONE_NODE)
1153 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1154 default=False, action="store_true",
1155 help="Replace the disk(s) on the primary"
1156 " node (applies only to internally mirrored"
1157 " disk templates, e.g. %s)" %
1158 utils.CommaJoin(constants.DTS_INT_MIRROR))
1160 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1161 default=False, action="store_true",
1162 help="Replace the disk(s) on the secondary"
1163 " node (applies only to internally mirrored"
1164 " disk templates, e.g. %s)" %
1165 utils.CommaJoin(constants.DTS_INT_MIRROR))
1167 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1168 default=False, action="store_true",
1169 help="Lock all nodes and auto-promote as needed"
1172 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1173 default=False, action="store_true",
1174 help="Automatically replace faulty disks"
1175 " (applies only to internally mirrored"
1176 " disk templates, e.g. %s)" %
1177 utils.CommaJoin(constants.DTS_INT_MIRROR))
1179 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1180 default=False, action="store_true",
1181 help="Ignore current recorded size"
1182 " (useful for forcing activation when"
1183 " the recorded size is wrong)")
1185 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1187 completion_suggest=OPT_COMPL_ONE_NODE)
1189 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1192 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1193 help="Specify the secondary ip for the node",
1194 metavar="ADDRESS", default=None)
1196 READD_OPT = cli_option("--readd", dest="readd",
1197 default=False, action="store_true",
1198 help="Readd old node after replacing it")
1200 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1201 default=True, action="store_false",
1202 help="Disable SSH key fingerprint checking")
1204 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1205 default=False, action="store_true",
1206 help="Force the joining of a node")
1208 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1209 type="bool", default=None, metavar=_YORNO,
1210 help="Set the master_candidate flag on the node")
1212 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1213 type="bool", default=None,
1214 help=("Set the offline flag on the node"
1215 " (cluster does not communicate with offline"
1218 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1219 type="bool", default=None,
1220 help=("Set the drained flag on the node"
1221 " (excluded from allocation operations)"))
1223 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1224 type="bool", default=None, metavar=_YORNO,
1225 help="Set the master_capable flag on the node")
1227 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1228 type="bool", default=None, metavar=_YORNO,
1229 help="Set the vm_capable flag on the node")
1231 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1232 type="bool", default=None, metavar=_YORNO,
1233 help="Set the allocatable flag on a volume")
1235 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1236 help="Disable support for lvm based instances"
1238 action="store_false", default=True)
1240 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1241 dest="enabled_hypervisors",
1242 help="Comma-separated list of hypervisors",
1243 type="string", default=None)
1245 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1246 dest="enabled_disk_templates",
1247 help="Comma-separated list of "
1249 type="string", default=None)
1251 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1252 type="keyval", default={},
1253 help="NIC parameters")
1255 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1256 dest="candidate_pool_size", type="int",
1257 help="Set the candidate pool size")
1259 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1260 help=("Enables LVM and specifies the volume group"
1261 " name (cluster-wide) for disk allocation"
1262 " [%s]" % constants.DEFAULT_VG),
1263 metavar="VG", default=None)
1265 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1266 help="Destroy cluster", action="store_true")
1268 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1269 help="Skip node agreement check (dangerous)",
1270 action="store_true", default=False)
1272 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1273 help="Specify the mac prefix for the instance IP"
1274 " addresses, in the format XX:XX:XX",
1278 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1279 help="Specify the node interface (cluster-wide)"
1280 " on which the master IP address will be added"
1281 " (cluster init default: %s)" %
1282 constants.DEFAULT_BRIDGE,
1286 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1287 help="Specify the netmask of the master IP",
1291 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1292 dest="use_external_mip_script",
1293 help="Specify whether to run a"
1294 " user-provided script for the master"
1295 " IP address turnup and"
1296 " turndown operations",
1297 type="bool", metavar=_YORNO, default=None)
1299 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1300 help="Specify the default directory (cluster-"
1301 "wide) for storing the file-based disks [%s]" %
1302 pathutils.DEFAULT_FILE_STORAGE_DIR,
1306 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1307 "--shared-file-storage-dir",
1308 dest="shared_file_storage_dir",
1309 help="Specify the default directory (cluster-wide) for storing the"
1310 " shared file-based disks [%s]" %
1311 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1312 metavar="SHAREDDIR", default=None)
1314 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1315 help="Don't modify %s" % pathutils.ETC_HOSTS,
1316 action="store_false", default=True)
1318 MODIFY_ETCHOSTS_OPT = \
1319 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1320 default=None, type="bool",
1321 help="Defines whether the cluster should autonomously modify"
1322 " and keep in sync the /etc/hosts file of the nodes")
1324 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1325 help="Don't initialize SSH keys",
1326 action="store_false", default=True)
1328 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1329 help="Enable parseable error messages",
1330 action="store_true", default=False)
1332 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1333 help="Skip N+1 memory redundancy tests",
1334 action="store_true", default=False)
1336 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1337 help="Type of reboot: soft/hard/full",
1338 default=constants.INSTANCE_REBOOT_HARD,
1340 choices=list(constants.REBOOT_TYPES))
1342 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1343 dest="ignore_secondaries",
1344 default=False, action="store_true",
1345 help="Ignore errors from secondaries")
1347 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1348 action="store_false", default=True,
1349 help="Don't shutdown the instance (unsafe)")
1351 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1352 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1353 help="Maximum time to wait")
1355 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1356 dest="shutdown_timeout", type="int",
1357 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1358 help="Maximum time to wait for instance"
1361 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1363 help=("Number of seconds between repetions of the"
1366 EARLY_RELEASE_OPT = cli_option("--early-release",
1367 dest="early_release", default=False,
1368 action="store_true",
1369 help="Release the locks on the secondary"
1372 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1373 dest="new_cluster_cert",
1374 default=False, action="store_true",
1375 help="Generate a new cluster certificate")
1377 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1379 help="File containing new RAPI certificate")
1381 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1382 default=None, action="store_true",
1383 help=("Generate a new self-signed RAPI"
1386 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1388 help="File containing new SPICE certificate")
1390 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1392 help="File containing the certificate of the CA"
1393 " which signed the SPICE certificate")
1395 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1396 dest="new_spice_cert", default=None,
1397 action="store_true",
1398 help=("Generate a new self-signed SPICE"
1401 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1402 dest="new_confd_hmac_key",
1403 default=False, action="store_true",
1404 help=("Create a new HMAC key for %s" %
1407 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1408 dest="cluster_domain_secret",
1410 help=("Load new new cluster domain"
1411 " secret from file"))
1413 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1414 dest="new_cluster_domain_secret",
1415 default=False, action="store_true",
1416 help=("Create a new cluster domain"
1419 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1420 dest="use_replication_network",
1421 help="Whether to use the replication network"
1422 " for talking to the nodes",
1423 action="store_true", default=False)
1425 MAINTAIN_NODE_HEALTH_OPT = \
1426 cli_option("--maintain-node-health", dest="maintain_node_health",
1427 metavar=_YORNO, default=None, type="bool",
1428 help="Configure the cluster to automatically maintain node"
1429 " health, by shutting down unknown instances, shutting down"
1430 " unknown DRBD devices, etc.")
1432 IDENTIFY_DEFAULTS_OPT = \
1433 cli_option("--identify-defaults", dest="identify_defaults",
1434 default=False, action="store_true",
1435 help="Identify which saved instance parameters are equal to"
1436 " the current cluster defaults and set them as such, instead"
1437 " of marking them as overridden")
1439 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1440 action="store", dest="uid_pool",
1441 help=("A list of user-ids or user-id"
1442 " ranges separated by commas"))
1444 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1445 action="store", dest="add_uids",
1446 help=("A list of user-ids or user-id"
1447 " ranges separated by commas, to be"
1448 " added to the user-id pool"))
1450 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1451 action="store", dest="remove_uids",
1452 help=("A list of user-ids or user-id"
1453 " ranges separated by commas, to be"
1454 " removed from the user-id pool"))
1456 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1457 action="store", dest="reserved_lvs",
1458 help=("A comma-separated list of reserved"
1459 " logical volumes names, that will be"
1460 " ignored by cluster verify"))
1462 ROMAN_OPT = cli_option("--roman",
1463 dest="roman_integers", default=False,
1464 action="store_true",
1465 help="Use roman numbers for positive integers")
1467 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1468 action="store", default=None,
1469 help="Specifies usermode helper for DRBD")
1471 PRIMARY_IP_VERSION_OPT = \
1472 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1473 action="store", dest="primary_ip_version",
1474 metavar="%d|%d" % (constants.IP4_VERSION,
1475 constants.IP6_VERSION),
1476 help="Cluster-wide IP version for primary IP")
1478 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1479 action="store_true",
1480 help="Show machine name for every line in output")
1482 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1483 action="store_true",
1484 help=("Hide successful results and show failures"
1485 " only (determined by the exit code)"))
1487 REASON_OPT = cli_option("--reason", default=None,
1488 help="The reason for executing the command")
1491 def _PriorityOptionCb(option, _, value, parser):
1492 """Callback for processing C{--priority} option.
1495 value = _PRIONAME_TO_VALUE[value]
1497 setattr(parser.values, option.dest, value)
1500 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1501 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1502 choices=_PRIONAME_TO_VALUE.keys(),
1503 action="callback", type="choice",
1504 callback=_PriorityOptionCb,
1505 help="Priority for opcode processing")
1507 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1508 type="bool", default=None, metavar=_YORNO,
1509 help="Sets the hidden flag on the OS")
1511 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1512 type="bool", default=None, metavar=_YORNO,
1513 help="Sets the blacklisted flag on the OS")
1515 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1516 type="bool", metavar=_YORNO,
1517 dest="prealloc_wipe_disks",
1518 help=("Wipe disks prior to instance"
1521 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1522 type="keyval", default=None,
1523 help="Node parameters")
1525 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1526 action="store", metavar="POLICY", default=None,
1527 help="Allocation policy for the node group")
1529 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1530 type="bool", metavar=_YORNO,
1531 dest="node_powered",
1532 help="Specify if the SoR for node is powered")
1534 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1535 default=constants.OOB_TIMEOUT,
1536 help="Maximum time to wait for out-of-band helper")
1538 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1539 default=constants.OOB_POWER_DELAY,
1540 help="Time in seconds to wait between power-ons")
1542 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1543 action="store_true", default=False,
1544 help=("Whether command argument should be treated"
1547 NO_REMEMBER_OPT = cli_option("--no-remember",
1549 action="store_true", default=False,
1550 help="Perform but do not record the change"
1551 " in the configuration")
1553 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1554 default=False, action="store_true",
1555 help="Evacuate primary instances only")
1557 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1558 default=False, action="store_true",
1559 help="Evacuate secondary instances only"
1560 " (applies only to internally mirrored"
1561 " disk templates, e.g. %s)" %
1562 utils.CommaJoin(constants.DTS_INT_MIRROR))
1564 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1565 action="store_true", default=False,
1566 help="Pause instance at startup")
1568 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1569 help="Destination node group (name or uuid)",
1570 default=None, action="append",
1571 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1573 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1574 action="append", dest="ignore_errors",
1575 choices=list(constants.CV_ALL_ECODES_STRINGS),
1576 help="Error code to be ignored")
1578 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1580 help=("Specify disk state information in the"
1582 " storage_type/identifier:option=value,...;"
1583 " note this is unused for now"),
1586 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1588 help=("Specify hypervisor state information in the"
1589 " format hypervisor:option=value,...;"
1590 " note this is unused for now"),
1593 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1594 action="store_true", default=False,
1595 help="Ignore instance policy violations")
1597 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1598 help="Sets the instance's runtime memory,"
1599 " ballooning it up or down to the new value",
1600 default=None, type="unit", metavar="<size>")
1602 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1603 action="store_true", default=False,
1604 help="Marks the grow as absolute instead of the"
1605 " (default) relative mode")
1607 NETWORK_OPT = cli_option("--network",
1608 action="store", default=None, dest="network",
1609 help="IP network in CIDR notation")
1611 GATEWAY_OPT = cli_option("--gateway",
1612 action="store", default=None, dest="gateway",
1613 help="IP address of the router (gateway)")
1615 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1616 action="store", default=None,
1617 dest="add_reserved_ips",
1618 help="Comma-separated list of"
1619 " reserved IPs to add")
1621 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1622 action="store", default=None,
1623 dest="remove_reserved_ips",
1624 help="Comma-delimited list of"
1625 " reserved IPs to remove")
1627 NETWORK6_OPT = cli_option("--network6",
1628 action="store", default=None, dest="network6",
1629 help="IP network in CIDR notation")
1631 GATEWAY6_OPT = cli_option("--gateway6",
1632 action="store", default=None, dest="gateway6",
1633 help="IP6 address of the router (gateway)")
1635 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1636 dest="conflicts_check",
1638 action="store_false",
1639 help="Don't check for conflicting IPs")
1641 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1642 default=False, action="store_true",
1643 help="Include default values")
1645 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1646 action="store_true", default=False,
1647 help="Hotplug supported devices (NICs and Disks)")
1649 #: Options provided by all commands
1650 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1652 # options related to asynchronous job handling
1659 # common options for creating instances. add and import then add their own
1661 COMMON_CREATE_OPTS = [
1666 FILESTORE_DRIVER_OPT,
1672 NOCONFLICTSCHECK_OPT,
1685 # common instance policy options
1686 INSTANCE_POLICY_OPTS = [
1687 IPOLICY_BOUNDS_SPECS_OPT,
1688 IPOLICY_DISK_TEMPLATES,
1690 IPOLICY_SPINDLE_RATIO,
1693 # instance policy split specs options
1694 SPLIT_ISPECS_OPTS = [
1695 SPECS_CPU_COUNT_OPT,
1696 SPECS_DISK_COUNT_OPT,
1697 SPECS_DISK_SIZE_OPT,
1699 SPECS_NIC_COUNT_OPT,
1703 class _ShowUsage(Exception):
1704 """Exception class for L{_ParseArgs}.
1707 def __init__(self, exit_error):
1708 """Initializes instances of this class.
1710 @type exit_error: bool
1711 @param exit_error: Whether to report failure on exit
1714 Exception.__init__(self)
1715 self.exit_error = exit_error
1718 class _ShowVersion(Exception):
1719 """Exception class for L{_ParseArgs}.
1724 def _ParseArgs(binary, argv, commands, aliases, env_override):
1725 """Parser for the command line arguments.
1727 This function parses the arguments and returns the function which
1728 must be executed together with its (modified) arguments.
1730 @param binary: Script name
1731 @param argv: Command line arguments
1732 @param commands: Dictionary containing command definitions
1733 @param aliases: dictionary with command aliases {"alias": "target", ...}
1734 @param env_override: list of env variables allowed for default args
1735 @raise _ShowUsage: If usage description should be shown
1736 @raise _ShowVersion: If version should be shown
1739 assert not (env_override - set(commands))
1740 assert not (set(aliases.keys()) & set(commands.keys()))
1745 # No option or command given
1746 raise _ShowUsage(exit_error=True)
1748 if cmd == "--version":
1749 raise _ShowVersion()
1750 elif cmd == "--help":
1751 raise _ShowUsage(exit_error=False)
1752 elif not (cmd in commands or cmd in aliases):
1753 raise _ShowUsage(exit_error=True)
1755 # get command, unalias it, and look it up in commands
1757 if aliases[cmd] not in commands:
1758 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1759 " command '%s'" % (cmd, aliases[cmd]))
1763 if cmd in env_override:
1764 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1765 env_args = os.environ.get(args_env_name)
1767 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1769 func, args_def, parser_opts, usage, description = commands[cmd]
1770 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1771 description=description,
1772 formatter=TitledHelpFormatter(),
1773 usage="%%prog %s %s" % (cmd, usage))
1774 parser.disable_interspersed_args()
1775 options, args = parser.parse_args(args=argv[2:])
1777 if not _CheckArguments(cmd, args_def, args):
1778 return None, None, None
1780 return func, options, args
1783 def _FormatUsage(binary, commands):
1784 """Generates a nice description of all commands.
1786 @param binary: Script name
1787 @param commands: Dictionary containing command definitions
1790 # compute the max line length for cmd + usage
1791 mlen = min(60, max(map(len, commands)))
1793 yield "Usage: %s {command} [options...] [argument...]" % binary
1794 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1798 # and format a nice command list
1799 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1800 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1801 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1802 for line in help_lines:
1803 yield " %-*s %s" % (mlen, "", line)
1808 def _CheckArguments(cmd, args_def, args):
1809 """Verifies the arguments using the argument definition.
1813 1. Abort with error if values specified by user but none expected.
1815 1. For each argument in definition
1817 1. Keep running count of minimum number of values (min_count)
1818 1. Keep running count of maximum number of values (max_count)
1819 1. If it has an unlimited number of values
1821 1. Abort with error if it's not the last argument in the definition
1823 1. If last argument has limited number of values
1825 1. Abort with error if number of values doesn't match or is too large
1827 1. Abort with error if user didn't pass enough values (min_count)
1830 if args and not args_def:
1831 ToStderr("Error: Command %s expects no arguments", cmd)
1838 last_idx = len(args_def) - 1
1840 for idx, arg in enumerate(args_def):
1841 if min_count is None:
1843 elif arg.min is not None:
1844 min_count += arg.min
1846 if max_count is None:
1848 elif arg.max is not None:
1849 max_count += arg.max
1852 check_max = (arg.max is not None)
1854 elif arg.max is None:
1855 raise errors.ProgrammerError("Only the last argument can have max=None")
1858 # Command with exact number of arguments
1859 if (min_count is not None and max_count is not None and
1860 min_count == max_count and len(args) != min_count):
1861 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1864 # Command with limited number of arguments
1865 if max_count is not None and len(args) > max_count:
1866 ToStderr("Error: Command %s expects only %d argument(s)",
1870 # Command with some required arguments
1871 if min_count is not None and len(args) < min_count:
1872 ToStderr("Error: Command %s expects at least %d argument(s)",
1879 def SplitNodeOption(value):
1880 """Splits the value of a --node option.
1883 if value and ":" in value:
1884 return value.split(":", 1)
1886 return (value, None)
1889 def CalculateOSNames(os_name, os_variants):
1890 """Calculates all the names an OS can be called, according to its variants.
1892 @type os_name: string
1893 @param os_name: base name of the os
1894 @type os_variants: list or None
1895 @param os_variants: list of supported variants
1897 @return: list of valid names
1901 return ["%s+%s" % (os_name, v) for v in os_variants]
1906 def ParseFields(selected, default):
1907 """Parses the values of "--field"-like options.
1909 @type selected: string or None
1910 @param selected: User-selected options
1912 @param default: Default fields
1915 if selected is None:
1918 if selected.startswith("+"):
1919 return default + selected[1:].split(",")
1921 return selected.split(",")
1924 UsesRPC = rpc.RunWithRPC
1927 def AskUser(text, choices=None):
1928 """Ask the user a question.
1930 @param text: the question to ask
1932 @param choices: list with elements tuples (input_char, return_value,
1933 description); if not given, it will default to: [('y', True,
1934 'Perform the operation'), ('n', False, 'Do no do the operation')];
1935 note that the '?' char is reserved for help
1937 @return: one of the return values from the choices list; if input is
1938 not possible (i.e. not running with a tty, we return the last
1943 choices = [("y", True, "Perform the operation"),
1944 ("n", False, "Do not perform the operation")]
1945 if not choices or not isinstance(choices, list):
1946 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1947 for entry in choices:
1948 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1949 raise errors.ProgrammerError("Invalid choices element to AskUser")
1951 answer = choices[-1][1]
1953 for line in text.splitlines():
1954 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1955 text = "\n".join(new_text)
1957 f = file("/dev/tty", "a+")
1961 chars = [entry[0] for entry in choices]
1962 chars[-1] = "[%s]" % chars[-1]
1964 maps = dict([(entry[0], entry[1]) for entry in choices])
1968 f.write("/".join(chars))
1970 line = f.readline(2).strip().lower()
1975 for entry in choices:
1976 f.write(" %s - %s\n" % (entry[0], entry[2]))
1984 class JobSubmittedException(Exception):
1985 """Job was submitted, client should exit.
1987 This exception has one argument, the ID of the job that was
1988 submitted. The handler should print this ID.
1990 This is not an error, just a structured way to exit from clients.
1995 def SendJob(ops, cl=None):
1996 """Function to submit an opcode without waiting for the results.
1999 @param ops: list of opcodes
2000 @type cl: luxi.Client
2001 @param cl: the luxi client to use for communicating with the master;
2002 if None, a new client will be created
2008 job_id = cl.SubmitJob(ops)
2013 def GenericPollJob(job_id, cbs, report_cbs):
2014 """Generic job-polling function.
2016 @type job_id: number
2017 @param job_id: Job ID
2018 @type cbs: Instance of L{JobPollCbBase}
2019 @param cbs: Data callbacks
2020 @type report_cbs: Instance of L{JobPollReportCbBase}
2021 @param report_cbs: Reporting callbacks
2024 prev_job_info = None
2025 prev_logmsg_serial = None
2030 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2033 # job not found, go away!
2034 raise errors.JobLost("Job with id %s lost" % job_id)
2036 if result == constants.JOB_NOTCHANGED:
2037 report_cbs.ReportNotChanged(job_id, status)
2042 # Split result, a tuple of (field values, log entries)
2043 (job_info, log_entries) = result
2044 (status, ) = job_info
2047 for log_entry in log_entries:
2048 (serial, timestamp, log_type, message) = log_entry
2049 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2051 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2053 # TODO: Handle canceled and archived jobs
2054 elif status in (constants.JOB_STATUS_SUCCESS,
2055 constants.JOB_STATUS_ERROR,
2056 constants.JOB_STATUS_CANCELING,
2057 constants.JOB_STATUS_CANCELED):
2060 prev_job_info = job_info
2062 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2064 raise errors.JobLost("Job with id %s lost" % job_id)
2066 status, opstatus, result = jobs[0]
2068 if status == constants.JOB_STATUS_SUCCESS:
2071 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2072 raise errors.OpExecError("Job was canceled")
2075 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2076 if status == constants.OP_STATUS_SUCCESS:
2078 elif status == constants.OP_STATUS_ERROR:
2079 errors.MaybeRaise(msg)
2082 raise errors.OpExecError("partial failure (opcode %d): %s" %
2085 raise errors.OpExecError(str(msg))
2087 # default failure mode
2088 raise errors.OpExecError(result)
2091 class JobPollCbBase:
2092 """Base class for L{GenericPollJob} callbacks.
2096 """Initializes this class.
2100 def WaitForJobChangeOnce(self, job_id, fields,
2101 prev_job_info, prev_log_serial):
2102 """Waits for changes on a job.
2105 raise NotImplementedError()
2107 def QueryJobs(self, job_ids, fields):
2108 """Returns the selected fields for the selected job IDs.
2110 @type job_ids: list of numbers
2111 @param job_ids: Job IDs
2112 @type fields: list of strings
2113 @param fields: Fields
2116 raise NotImplementedError()
2119 class JobPollReportCbBase:
2120 """Base class for L{GenericPollJob} reporting callbacks.
2124 """Initializes this class.
2128 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2129 """Handles a log message.
2132 raise NotImplementedError()
2134 def ReportNotChanged(self, job_id, status):
2135 """Called for if a job hasn't changed in a while.
2137 @type job_id: number
2138 @param job_id: Job ID
2139 @type status: string or None
2140 @param status: Job status if available
2143 raise NotImplementedError()
2146 class _LuxiJobPollCb(JobPollCbBase):
2147 def __init__(self, cl):
2148 """Initializes this class.
2151 JobPollCbBase.__init__(self)
2154 def WaitForJobChangeOnce(self, job_id, fields,
2155 prev_job_info, prev_log_serial):
2156 """Waits for changes on a job.
2159 return self.cl.WaitForJobChangeOnce(job_id, fields,
2160 prev_job_info, prev_log_serial)
2162 def QueryJobs(self, job_ids, fields):
2163 """Returns the selected fields for the selected job IDs.
2166 return self.cl.QueryJobs(job_ids, fields)
2169 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2170 def __init__(self, feedback_fn):
2171 """Initializes this class.
2174 JobPollReportCbBase.__init__(self)
2176 self.feedback_fn = feedback_fn
2178 assert callable(feedback_fn)
2180 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181 """Handles a log message.
2184 self.feedback_fn((timestamp, log_type, log_msg))
2186 def ReportNotChanged(self, job_id, status):
2187 """Called if a job hasn't changed in a while.
2193 class StdioJobPollReportCb(JobPollReportCbBase):
2195 """Initializes this class.
2198 JobPollReportCbBase.__init__(self)
2200 self.notified_queued = False
2201 self.notified_waitlock = False
2203 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2204 """Handles a log message.
2207 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2208 FormatLogMessage(log_type, log_msg))
2210 def ReportNotChanged(self, job_id, status):
2211 """Called if a job hasn't changed in a while.
2217 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2218 ToStderr("Job %s is waiting in queue", job_id)
2219 self.notified_queued = True
2221 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2222 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2223 self.notified_waitlock = True
2226 def FormatLogMessage(log_type, log_msg):
2227 """Formats a job message according to its type.
2230 if log_type != constants.ELOG_MESSAGE:
2231 log_msg = str(log_msg)
2233 return utils.SafeEncode(log_msg)
2236 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2237 """Function to poll for the result of a job.
2239 @type job_id: job identified
2240 @param job_id: the job to poll for results
2241 @type cl: luxi.Client
2242 @param cl: the luxi client to use for communicating with the master;
2243 if None, a new client will be created
2249 if reporter is None:
2251 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2253 reporter = StdioJobPollReportCb()
2255 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2257 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2260 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2261 """Legacy function to submit an opcode.
2263 This is just a simple wrapper over the construction of the processor
2264 instance. It should be extended to better handle feedback and
2265 interaction functions.
2271 SetGenericOpcodeOpts([op], opts)
2273 job_id = SendJob([op], cl=cl)
2274 if hasattr(opts, "print_jobid") and opts.print_jobid:
2275 ToStdout("%d" % job_id)
2277 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2280 return op_results[0]
2283 def SubmitOpCodeToDrainedQueue(op):
2284 """Forcefully insert a job in the queue, even if it is drained.
2288 job_id = cl.SubmitJobToDrainedQueue([op])
2289 op_results = PollJob(job_id, cl=cl)
2290 return op_results[0]
2293 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2294 """Wrapper around SubmitOpCode or SendJob.
2296 This function will decide, based on the 'opts' parameter, whether to
2297 submit and wait for the result of the opcode (and return it), or
2298 whether to just send the job and print its identifier. It is used in
2299 order to simplify the implementation of the '--submit' option.
2301 It will also process the opcodes if we're sending the via SendJob
2302 (otherwise SubmitOpCode does it).
2305 if opts and opts.submit_only:
2307 SetGenericOpcodeOpts(job, opts)
2308 job_id = SendJob(job, cl=cl)
2309 if opts.print_jobid:
2310 ToStdout("%d" % job_id)
2311 raise JobSubmittedException(job_id)
2313 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2316 def _InitReasonTrail(op, opts):
2317 """Builds the first part of the reason trail
2319 Builds the initial part of the reason trail, adding the user provided reason
2320 (if it exists) and the name of the command starting the operation.
2322 @param op: the opcode the reason trail will be added to
2323 @param opts: the command line options selected by the user
2326 assert len(sys.argv) >= 2
2330 trail.append((constants.OPCODE_REASON_SRC_USER,
2334 binary = os.path.basename(sys.argv[0])
2335 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2336 command = sys.argv[1]
2337 trail.append((source, command, utils.EpochNano()))
2341 def SetGenericOpcodeOpts(opcode_list, options):
2342 """Processor for generic options.
2344 This function updates the given opcodes based on generic command
2345 line options (like debug, dry-run, etc.).
2347 @param opcode_list: list of opcodes
2348 @param options: command line options or None
2349 @return: None (in-place modification)
2354 for op in opcode_list:
2355 op.debug_level = options.debug
2356 if hasattr(options, "dry_run"):
2357 op.dry_run = options.dry_run
2358 if getattr(options, "priority", None) is not None:
2359 op.priority = options.priority
2360 _InitReasonTrail(op, options)
2363 def GetClient(query=False):
2364 """Connects to the a luxi socket and returns a client.
2366 @type query: boolean
2367 @param query: this signifies that the client will only be
2368 used for queries; if the build-time parameter
2369 enable-split-queries is enabled, then the client will be
2370 connected to the query socket instead of the masterd socket
2373 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2375 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2376 address = pathutils.MASTER_SOCKET
2377 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2378 address = pathutils.QUERY_SOCKET
2380 address = override_socket
2381 elif query and constants.ENABLE_SPLIT_QUERY:
2382 address = pathutils.QUERY_SOCKET
2385 # TODO: Cache object?
2387 client = luxi.Client(address=address)
2388 except luxi.NoMasterError:
2389 ss = ssconf.SimpleStore()
2391 # Try to read ssconf file
2394 except errors.ConfigurationError:
2395 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2396 " not part of a cluster",
2399 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2400 if master != myself:
2401 raise errors.OpPrereqError("This is not the master node, please connect"
2402 " to node '%s' and rerun the command" %
2403 master, errors.ECODE_INVAL)
2408 def FormatError(err):
2409 """Return a formatted error message for a given error.
2411 This function takes an exception instance and returns a tuple
2412 consisting of two values: first, the recommended exit code, and
2413 second, a string describing the error message (not
2414 newline-terminated).
2420 if isinstance(err, errors.ConfigurationError):
2421 txt = "Corrupt configuration file: %s" % msg
2423 obuf.write(txt + "\n")
2424 obuf.write("Aborting.")
2426 elif isinstance(err, errors.HooksAbort):
2427 obuf.write("Failure: hooks execution failed:\n")
2428 for node, script, out in err.args[0]:
2430 obuf.write(" node: %s, script: %s, output: %s\n" %
2431 (node, script, out))
2433 obuf.write(" node: %s, script: %s (no output)\n" %
2435 elif isinstance(err, errors.HooksFailure):
2436 obuf.write("Failure: hooks general failure: %s" % msg)
2437 elif isinstance(err, errors.ResolverError):
2438 this_host = netutils.Hostname.GetSysName()
2439 if err.args[0] == this_host:
2440 msg = "Failure: can't resolve my own hostname ('%s')"
2442 msg = "Failure: can't resolve hostname '%s'"
2443 obuf.write(msg % err.args[0])
2444 elif isinstance(err, errors.OpPrereqError):
2445 if len(err.args) == 2:
2446 obuf.write("Failure: prerequisites not met for this"
2447 " operation:\nerror type: %s, error details:\n%s" %
2448 (err.args[1], err.args[0]))
2450 obuf.write("Failure: prerequisites not met for this"
2451 " operation:\n%s" % msg)
2452 elif isinstance(err, errors.OpExecError):
2453 obuf.write("Failure: command execution error:\n%s" % msg)
2454 elif isinstance(err, errors.TagError):
2455 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2456 elif isinstance(err, errors.JobQueueDrainError):
2457 obuf.write("Failure: the job queue is marked for drain and doesn't"
2458 " accept new requests\n")
2459 elif isinstance(err, errors.JobQueueFull):
2460 obuf.write("Failure: the job queue is full and doesn't accept new"
2461 " job submissions until old jobs are archived\n")
2462 elif isinstance(err, errors.TypeEnforcementError):
2463 obuf.write("Parameter Error: %s" % msg)
2464 elif isinstance(err, errors.ParameterError):
2465 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2466 elif isinstance(err, luxi.NoMasterError):
2467 if err.args[0] == pathutils.MASTER_SOCKET:
2468 daemon = "the master daemon"
2469 elif err.args[0] == pathutils.QUERY_SOCKET:
2470 daemon = "the config daemon"
2472 daemon = "socket '%s'" % str(err.args[0])
2473 obuf.write("Cannot communicate with %s.\nIs the process running"
2474 " and listening for connections?" % daemon)
2475 elif isinstance(err, luxi.TimeoutError):
2476 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2477 " been submitted and will continue to run even if the call"
2478 " timed out. Useful commands in this situation are \"gnt-job"
2479 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2481 elif isinstance(err, luxi.PermissionError):
2482 obuf.write("It seems you don't have permissions to connect to the"
2483 " master daemon.\nPlease retry as a different user.")
2484 elif isinstance(err, luxi.ProtocolError):
2485 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2487 elif isinstance(err, errors.JobLost):
2488 obuf.write("Error checking job status: %s" % msg)
2489 elif isinstance(err, errors.QueryFilterParseError):
2490 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2491 obuf.write("\n".join(err.GetDetails()))
2492 elif isinstance(err, errors.GenericError):
2493 obuf.write("Unhandled Ganeti error: %s" % msg)
2494 elif isinstance(err, JobSubmittedException):
2495 obuf.write("JobID: %s\n" % err.args[0])
2498 obuf.write("Unhandled exception: %s" % msg)
2499 return retcode, obuf.getvalue().rstrip("\n")
2502 def GenericMain(commands, override=None, aliases=None,
2503 env_override=frozenset()):
2504 """Generic main function for all the gnt-* commands.
2506 @param commands: a dictionary with a special structure, see the design doc
2507 for command line handling.
2508 @param override: if not None, we expect a dictionary with keys that will
2509 override command line options; this can be used to pass
2510 options from the scripts to generic functions
2511 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2512 @param env_override: list of environment names which are allowed to submit
2513 default args for commands
2516 # save the program name and the entire command line for later logging
2518 binary = os.path.basename(sys.argv[0])
2520 binary = sys.argv[0]
2522 if len(sys.argv) >= 2:
2523 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2527 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2529 binary = "<unknown program>"
2530 cmdline = "<unknown>"
2536 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2538 except _ShowVersion:
2539 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2540 constants.RELEASE_VERSION)
2541 return constants.EXIT_SUCCESS
2542 except _ShowUsage, err:
2543 for line in _FormatUsage(binary, commands):
2547 return constants.EXIT_FAILURE
2549 return constants.EXIT_SUCCESS
2550 except errors.ParameterError, err:
2551 result, err_msg = FormatError(err)
2555 if func is None: # parse error
2558 if override is not None:
2559 for key, val in override.iteritems():
2560 setattr(options, key, val)
2562 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2563 stderr_logging=True)
2565 logging.info("Command line: %s", cmdline)
2568 result = func(options, args)
2569 except (errors.GenericError, luxi.ProtocolError,
2570 JobSubmittedException), err:
2571 result, err_msg = FormatError(err)
2572 logging.exception("Error during command processing")
2574 except KeyboardInterrupt:
2575 result = constants.EXIT_FAILURE
2576 ToStderr("Aborted. Note that if the operation created any jobs, they"
2577 " might have been submitted and"
2578 " will continue to run in the background.")
2579 except IOError, err:
2580 if err.errno == errno.EPIPE:
2581 # our terminal went away, we'll exit
2582 sys.exit(constants.EXIT_FAILURE)
2589 def ParseNicOption(optvalue):
2590 """Parses the value of the --net option(s).
2594 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2595 except (TypeError, ValueError), err:
2596 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2599 nics = [{}] * nic_max
2600 for nidx, ndict in optvalue:
2603 if not isinstance(ndict, dict):
2604 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2605 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2607 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2614 def GenericInstanceCreate(mode, opts, args):
2615 """Add an instance to the cluster via either creation or import.
2617 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2618 @param opts: the command line options selected by the user
2620 @param args: should contain only one element, the new instance name
2622 @return: the desired exit code
2627 (pnode, snode) = SplitNodeOption(opts.node)
2632 hypervisor, hvparams = opts.hypervisor
2635 nics = ParseNicOption(opts.nics)
2639 elif mode == constants.INSTANCE_CREATE:
2640 # default of one nic, all auto
2646 if opts.disk_template == constants.DT_DISKLESS:
2647 if opts.disks or opts.sd_size is not None:
2648 raise errors.OpPrereqError("Diskless instance but disk"
2649 " information passed", errors.ECODE_INVAL)
2652 if (not opts.disks and not opts.sd_size
2653 and mode == constants.INSTANCE_CREATE):
2654 raise errors.OpPrereqError("No disk information specified",
2656 if opts.disks and opts.sd_size is not None:
2657 raise errors.OpPrereqError("Please use either the '--disk' or"
2658 " '-s' option", errors.ECODE_INVAL)
2659 if opts.sd_size is not None:
2660 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2664 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2665 except ValueError, err:
2666 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2668 disks = [{}] * disk_max
2671 for didx, ddict in opts.disks:
2673 if not isinstance(ddict, dict):
2674 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2675 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2676 elif constants.IDISK_SIZE in ddict:
2677 if constants.IDISK_ADOPT in ddict:
2678 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2679 " (disk %d)" % didx, errors.ECODE_INVAL)
2681 ddict[constants.IDISK_SIZE] = \
2682 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2683 except ValueError, err:
2684 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2685 (didx, err), errors.ECODE_INVAL)
2686 elif constants.IDISK_ADOPT in ddict:
2687 if constants.IDISK_SPINDLES in ddict:
2688 raise errors.OpPrereqError("spindles is not a valid option when"
2689 " adopting a disk", errors.ECODE_INVAL)
2690 if mode == constants.INSTANCE_IMPORT:
2691 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2692 " import", errors.ECODE_INVAL)
2693 ddict[constants.IDISK_SIZE] = 0
2695 raise errors.OpPrereqError("Missing size or adoption source for"
2696 " disk %d" % didx, errors.ECODE_INVAL)
2699 if opts.tags is not None:
2700 tags = opts.tags.split(",")
2704 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2705 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2707 if mode == constants.INSTANCE_CREATE:
2710 force_variant = opts.force_variant
2713 no_install = opts.no_install
2714 identify_defaults = False
2715 elif mode == constants.INSTANCE_IMPORT:
2718 force_variant = False
2719 src_node = opts.src_node
2720 src_path = opts.src_dir
2722 identify_defaults = opts.identify_defaults
2724 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2726 op = opcodes.OpInstanceCreate(instance_name=instance,
2728 disk_template=opts.disk_template,
2730 conflicts_check=opts.conflicts_check,
2731 pnode=pnode, snode=snode,
2732 ip_check=opts.ip_check,
2733 name_check=opts.name_check,
2734 wait_for_sync=opts.wait_for_sync,
2735 file_storage_dir=opts.file_storage_dir,
2736 file_driver=opts.file_driver,
2737 iallocator=opts.iallocator,
2738 hypervisor=hypervisor,
2740 beparams=opts.beparams,
2741 osparams=opts.osparams,
2745 force_variant=force_variant,
2749 no_install=no_install,
2750 identify_defaults=identify_defaults,
2751 ignore_ipolicy=opts.ignore_ipolicy)
2753 SubmitOrSend(op, opts)
2757 class _RunWhileClusterStoppedHelper:
2758 """Helper class for L{RunWhileClusterStopped} to simplify state management
2761 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2762 """Initializes this class.
2764 @type feedback_fn: callable
2765 @param feedback_fn: Feedback function
2766 @type cluster_name: string
2767 @param cluster_name: Cluster name
2768 @type master_node: string
2769 @param master_node Master node name
2770 @type online_nodes: list
2771 @param online_nodes: List of names of online nodes
2774 self.feedback_fn = feedback_fn
2775 self.cluster_name = cluster_name
2776 self.master_node = master_node
2777 self.online_nodes = online_nodes
2779 self.ssh = ssh.SshRunner(self.cluster_name)
2781 self.nonmaster_nodes = [name for name in online_nodes
2782 if name != master_node]
2784 assert self.master_node not in self.nonmaster_nodes
2786 def _RunCmd(self, node_name, cmd):
2787 """Runs a command on the local or a remote machine.
2789 @type node_name: string
2790 @param node_name: Machine name
2795 if node_name is None or node_name == self.master_node:
2796 # No need to use SSH
2797 result = utils.RunCmd(cmd)
2799 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2800 utils.ShellQuoteArgs(cmd))
2803 errmsg = ["Failed to run command %s" % result.cmd]
2805 errmsg.append("on node %s" % node_name)
2806 errmsg.append(": exitcode %s and error %s" %
2807 (result.exit_code, result.output))
2808 raise errors.OpExecError(" ".join(errmsg))
2810 def Call(self, fn, *args):
2811 """Call function while all daemons are stopped.
2814 @param fn: Function to be called
2817 # Pause watcher by acquiring an exclusive lock on watcher state file
2818 self.feedback_fn("Blocking watcher")
2819 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2821 # TODO: Currently, this just blocks. There's no timeout.
2822 # TODO: Should it be a shared lock?
2823 watcher_block.Exclusive(blocking=True)
2825 # Stop master daemons, so that no new jobs can come in and all running
2827 self.feedback_fn("Stopping master daemons")
2828 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2830 # Stop daemons on all nodes
2831 for node_name in self.online_nodes:
2832 self.feedback_fn("Stopping daemons on %s" % node_name)
2833 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2835 # All daemons are shut down now
2837 return fn(self, *args)
2838 except Exception, err:
2839 _, errmsg = FormatError(err)
2840 logging.exception("Caught exception")
2841 self.feedback_fn(errmsg)
2844 # Start cluster again, master node last
2845 for node_name in self.nonmaster_nodes + [self.master_node]:
2846 self.feedback_fn("Starting daemons on %s" % node_name)
2847 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2850 watcher_block.Close()
2853 def RunWhileClusterStopped(feedback_fn, fn, *args):
2854 """Calls a function while all cluster daemons are stopped.
2856 @type feedback_fn: callable
2857 @param feedback_fn: Feedback function
2859 @param fn: Function to be called when daemons are stopped
2862 feedback_fn("Gathering cluster information")
2864 # This ensures we're running on the master daemon
2867 (cluster_name, master_node) = \
2868 cl.QueryConfigValues(["cluster_name", "master_node"])
2870 online_nodes = GetOnlineNodes([], cl=cl)
2872 # Don't keep a reference to the client. The master daemon will go away.
2875 assert master_node in online_nodes
2877 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2878 online_nodes).Call(fn, *args)
2881 def GenerateTable(headers, fields, separator, data,
2882 numfields=None, unitfields=None,
2884 """Prints a table with headers and different fields.
2887 @param headers: dictionary mapping field names to headers for
2890 @param fields: the field names corresponding to each row in
2892 @param separator: the separator to be used; if this is None,
2893 the default 'smart' algorithm is used which computes optimal
2894 field width, otherwise just the separator is used between
2897 @param data: a list of lists, each sublist being one row to be output
2898 @type numfields: list
2899 @param numfields: a list with the fields that hold numeric
2900 values and thus should be right-aligned
2901 @type unitfields: list
2902 @param unitfields: a list with the fields that hold numeric
2903 values that should be formatted with the units field
2904 @type units: string or None
2905 @param units: the units we should use for formatting, or None for
2906 automatic choice (human-readable for non-separator usage, otherwise
2907 megabytes); this is a one-letter string
2916 if numfields is None:
2918 if unitfields is None:
2921 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2922 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2925 for field in fields:
2926 if headers and field not in headers:
2927 # TODO: handle better unknown fields (either revert to old
2928 # style of raising exception, or deal more intelligently with
2930 headers[field] = field
2931 if separator is not None:
2932 format_fields.append("%s")
2933 elif numfields.Matches(field):
2934 format_fields.append("%*s")
2936 format_fields.append("%-*s")
2938 if separator is None:
2939 mlens = [0 for name in fields]
2940 format_str = " ".join(format_fields)
2942 format_str = separator.replace("%", "%%").join(format_fields)
2947 for idx, val in enumerate(row):
2948 if unitfields.Matches(fields[idx]):
2951 except (TypeError, ValueError):
2954 val = row[idx] = utils.FormatUnit(val, units)
2955 val = row[idx] = str(val)
2956 if separator is None:
2957 mlens[idx] = max(mlens[idx], len(val))
2962 for idx, name in enumerate(fields):
2964 if separator is None:
2965 mlens[idx] = max(mlens[idx], len(hdr))
2966 args.append(mlens[idx])
2968 result.append(format_str % tuple(args))
2970 if separator is None:
2971 assert len(mlens) == len(fields)
2973 if fields and not numfields.Matches(fields[-1]):
2979 line = ["-" for _ in fields]
2980 for idx in range(len(fields)):
2981 if separator is None:
2982 args.append(mlens[idx])
2983 args.append(line[idx])
2984 result.append(format_str % tuple(args))
2989 def _FormatBool(value):
2990 """Formats a boolean value as a string.
2998 #: Default formatting for query results; (callback, align right)
2999 _DEFAULT_FORMAT_QUERY = {
3000 constants.QFT_TEXT: (str, False),
3001 constants.QFT_BOOL: (_FormatBool, False),
3002 constants.QFT_NUMBER: (str, True),
3003 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3004 constants.QFT_OTHER: (str, False),
3005 constants.QFT_UNKNOWN: (str, False),
3009 def _GetColumnFormatter(fdef, override, unit):
3010 """Returns formatting function for a field.
3012 @type fdef: L{objects.QueryFieldDefinition}
3013 @type override: dict
3014 @param override: Dictionary for overriding field formatting functions,
3015 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3017 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3018 @rtype: tuple; (callable, bool)
3019 @return: Returns the function to format a value (takes one parameter) and a
3020 boolean for aligning the value on the right-hand side
3023 fmt = override.get(fdef.name, None)
3027 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3029 if fdef.kind == constants.QFT_UNIT:
3030 # Can't keep this information in the static dictionary
3031 return (lambda value: utils.FormatUnit(value, unit), True)
3033 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3037 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3040 class _QueryColumnFormatter:
3041 """Callable class for formatting fields of a query.
3044 def __init__(self, fn, status_fn, verbose):
3045 """Initializes this class.
3048 @param fn: Formatting function
3049 @type status_fn: callable
3050 @param status_fn: Function to report fields' status
3051 @type verbose: boolean
3052 @param verbose: whether to use verbose field descriptions or not
3056 self._status_fn = status_fn
3057 self._verbose = verbose
3059 def __call__(self, data):
3060 """Returns a field's string representation.
3063 (status, value) = data
3066 self._status_fn(status)
3068 if status == constants.RS_NORMAL:
3069 return self._fn(value)
3071 assert value is None, \
3072 "Found value %r for abnormal status %s" % (value, status)
3074 return FormatResultError(status, self._verbose)
3077 def FormatResultError(status, verbose):
3078 """Formats result status other than L{constants.RS_NORMAL}.
3080 @param status: The result status
3081 @type verbose: boolean
3082 @param verbose: Whether to return the verbose text
3083 @return: Text of result status
3086 assert status != constants.RS_NORMAL, \
3087 "FormatResultError called with status equal to constants.RS_NORMAL"
3089 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3091 raise NotImplementedError("Unknown status %s" % status)
3098 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3099 header=False, verbose=False):
3100 """Formats data in L{objects.QueryResponse}.
3102 @type result: L{objects.QueryResponse}
3103 @param result: result of query operation
3105 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3106 see L{utils.text.FormatUnit}
3107 @type format_override: dict
3108 @param format_override: Dictionary for overriding field formatting functions,
3109 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3110 @type separator: string or None
3111 @param separator: String used to separate fields
3113 @param header: Whether to output header row
3114 @type verbose: boolean
3115 @param verbose: whether to use verbose field descriptions or not
3124 if format_override is None:
3125 format_override = {}
3127 stats = dict.fromkeys(constants.RS_ALL, 0)
3129 def _RecordStatus(status):
3134 for fdef in result.fields:
3135 assert fdef.title and fdef.name
3136 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3137 columns.append(TableColumn(fdef.title,
3138 _QueryColumnFormatter(fn, _RecordStatus,
3142 table = FormatTable(result.data, columns, header, separator)
3144 # Collect statistics
3145 assert len(stats) == len(constants.RS_ALL)
3146 assert compat.all(count >= 0 for count in stats.values())
3148 # Determine overall status. If there was no data, unknown fields must be
3149 # detected via the field definitions.
3150 if (stats[constants.RS_UNKNOWN] or
3151 (not result.data and _GetUnknownFields(result.fields))):
3153 elif compat.any(count > 0 for key, count in stats.items()
3154 if key != constants.RS_NORMAL):
3155 status = QR_INCOMPLETE
3159 return (status, table)
3162 def _GetUnknownFields(fdefs):
3163 """Returns list of unknown fields included in C{fdefs}.
3165 @type fdefs: list of L{objects.QueryFieldDefinition}
3168 return [fdef for fdef in fdefs
3169 if fdef.kind == constants.QFT_UNKNOWN]
3172 def _WarnUnknownFields(fdefs):
3173 """Prints a warning to stderr if a query included unknown fields.
3175 @type fdefs: list of L{objects.QueryFieldDefinition}
3178 unknown = _GetUnknownFields(fdefs)
3180 ToStderr("Warning: Queried for unknown fields %s",
3181 utils.CommaJoin(fdef.name for fdef in unknown))
3187 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3188 format_override=None, verbose=False, force_filter=False,
3189 namefield=None, qfilter=None, isnumeric=False):
3190 """Generic implementation for listing all items of a resource.
3192 @param resource: One of L{constants.QR_VIA_LUXI}
3193 @type fields: list of strings
3194 @param fields: List of fields to query for
3195 @type names: list of strings
3196 @param names: Names of items to query for
3197 @type unit: string or None
3198 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3199 None for automatic choice (human-readable for non-separator usage,
3200 otherwise megabytes); this is a one-letter string
3201 @type separator: string or None
3202 @param separator: String used to separate fields
3204 @param header: Whether to show header row
3205 @type force_filter: bool
3206 @param force_filter: Whether to always treat names as filter
3207 @type format_override: dict
3208 @param format_override: Dictionary for overriding field formatting functions,
3209 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3210 @type verbose: boolean
3211 @param verbose: whether to use verbose field descriptions or not
3212 @type namefield: string
3213 @param namefield: Name of field to use for simple filters (see
3214 L{qlang.MakeFilter} for details)
3215 @type qfilter: list or None
3216 @param qfilter: Query filter (in addition to names)
3217 @param isnumeric: bool
3218 @param isnumeric: Whether the namefield's type is numeric, and therefore
3219 any simple filters built by namefield should use integer values to
3226 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3227 isnumeric=isnumeric)
3230 qfilter = namefilter
3231 elif namefilter is not None:
3232 qfilter = [qlang.OP_AND, namefilter, qfilter]
3237 response = cl.Query(resource, fields, qfilter)
3239 found_unknown = _WarnUnknownFields(response.fields)
3241 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3243 format_override=format_override,
3249 assert ((found_unknown and status == QR_UNKNOWN) or
3250 (not found_unknown and status != QR_UNKNOWN))
3252 if status == QR_UNKNOWN:
3253 return constants.EXIT_UNKNOWN_FIELD
3255 # TODO: Should the list command fail if not all data could be collected?
3256 return constants.EXIT_SUCCESS
3259 def _FieldDescValues(fdef):
3260 """Helper function for L{GenericListFields} to get query field description.
3262 @type fdef: L{objects.QueryFieldDefinition}
3268 _QFT_NAMES.get(fdef.kind, fdef.kind),
3274 def GenericListFields(resource, fields, separator, header, cl=None):
3275 """Generic implementation for listing fields for a resource.
3277 @param resource: One of L{constants.QR_VIA_LUXI}
3278 @type fields: list of strings
3279 @param fields: List of fields to query for
3280 @type separator: string or None
3281 @param separator: String used to separate fields
3283 @param header: Whether to show header row
3292 response = cl.QueryFields(resource, fields)
3294 found_unknown = _WarnUnknownFields(response.fields)
3297 TableColumn("Name", str, False),
3298 TableColumn("Type", str, False),
3299 TableColumn("Title", str, False),
3300 TableColumn("Description", str, False),
3303 rows = map(_FieldDescValues, response.fields)
3305 for line in FormatTable(rows, columns, header, separator):
3309 return constants.EXIT_UNKNOWN_FIELD
3311 return constants.EXIT_SUCCESS
3315 """Describes a column for L{FormatTable}.
3318 def __init__(self, title, fn, align_right):
3319 """Initializes this class.
3322 @param title: Column title
3324 @param fn: Formatting function
3325 @type align_right: bool
3326 @param align_right: Whether to align values on the right-hand side
3331 self.align_right = align_right
3334 def _GetColFormatString(width, align_right):
3335 """Returns the format string for a field.
3343 return "%%%s%ss" % (sign, width)
3346 def FormatTable(rows, columns, header, separator):
3347 """Formats data as a table.
3349 @type rows: list of lists
3350 @param rows: Row data, one list per row
3351 @type columns: list of L{TableColumn}
3352 @param columns: Column descriptions
3354 @param header: Whether to show header row
3355 @type separator: string or None
3356 @param separator: String used to separate columns
3360 data = [[col.title for col in columns]]
3361 colwidth = [len(col.title) for col in columns]
3364 colwidth = [0 for _ in columns]
3368 assert len(row) == len(columns)
3370 formatted = [col.format(value) for value, col in zip(row, columns)]
3372 if separator is None:
3373 # Update column widths
3374 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3375 # Modifying a list's items while iterating is fine
3376 colwidth[idx] = max(oldwidth, len(value))
3378 data.append(formatted)
3380 if separator is not None:
3381 # Return early if a separator is used
3382 return [separator.join(row) for row in data]
3384 if columns and not columns[-1].align_right:
3385 # Avoid unnecessary spaces at end of line
3388 # Build format string
3389 fmt = " ".join([_GetColFormatString(width, col.align_right)
3390 for col, width in zip(columns, colwidth)])
3392 return [fmt % tuple(row) for row in data]
3395 def FormatTimestamp(ts):
3396 """Formats a given timestamp.
3399 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3402 @return: a string with the formatted timestamp
3405 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3409 return utils.FormatTime(sec, usecs=usecs)
3412 def ParseTimespec(value):
3413 """Parse a time specification.
3415 The following suffixed will be recognized:
3423 Without any suffix, the value will be taken to be in seconds.
3428 raise errors.OpPrereqError("Empty time specification passed",
3437 if value[-1] not in suffix_map:
3440 except (TypeError, ValueError):
3441 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3444 multiplier = suffix_map[value[-1]]
3446 if not value: # no data left after stripping the suffix
3447 raise errors.OpPrereqError("Invalid time specification (only"
3448 " suffix passed)", errors.ECODE_INVAL)
3450 value = int(value) * multiplier
3451 except (TypeError, ValueError):
3452 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3457 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3458 filter_master=False, nodegroup=None):
3459 """Returns the names of online nodes.
3461 This function will also log a warning on stderr with the names of
3464 @param nodes: if not empty, use only this subset of nodes (minus the
3466 @param cl: if not None, luxi client to use
3467 @type nowarn: boolean
3468 @param nowarn: by default, this function will output a note with the
3469 offline nodes that are skipped; if this parameter is True the
3470 note is not displayed
3471 @type secondary_ips: boolean
3472 @param secondary_ips: if True, return the secondary IPs instead of the
3473 names, useful for doing network traffic over the replication interface
3475 @type filter_master: boolean
3476 @param filter_master: if True, do not return the master node in the list
3477 (useful in coordination with secondary_ips where we cannot check our
3478 node name against the list)
3479 @type nodegroup: string
3480 @param nodegroup: If set, only return nodes in this node group
3489 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3491 if nodegroup is not None:
3492 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3493 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3496 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3499 if len(qfilter) > 1:
3500 final_filter = [qlang.OP_AND] + qfilter
3502 assert len(qfilter) == 1
3503 final_filter = qfilter[0]
3507 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3509 def _IsOffline(row):
3510 (_, (_, offline), _) = row
3514 ((_, name), _, _) = row
3518 (_, _, (_, sip)) = row
3521 (offline, online) = compat.partition(result.data, _IsOffline)
3523 if offline and not nowarn:
3524 ToStderr("Note: skipping offline node(s): %s" %
3525 utils.CommaJoin(map(_GetName, offline)))
3532 return map(fn, online)
3535 def _ToStream(stream, txt, *args):
3536 """Write a message to a stream, bypassing the logging system
3538 @type stream: file object
3539 @param stream: the file to which we should write
3541 @param txt: the message
3547 stream.write(txt % args)
3552 except IOError, err:
3553 if err.errno == errno.EPIPE:
3554 # our terminal went away, we'll exit
3555 sys.exit(constants.EXIT_FAILURE)
3560 def ToStdout(txt, *args):
3561 """Write a message to stdout only, bypassing the logging system
3563 This is just a wrapper over _ToStream.
3566 @param txt: the message
3569 _ToStream(sys.stdout, txt, *args)
3572 def ToStderr(txt, *args):
3573 """Write a message to stderr only, bypassing the logging system
3575 This is just a wrapper over _ToStream.
3578 @param txt: the message
3581 _ToStream(sys.stderr, txt, *args)
3584 class JobExecutor(object):
3585 """Class which manages the submission and execution of multiple jobs.
3587 Note that instances of this class should not be reused between
3591 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3596 self.verbose = verbose
3599 self.feedback_fn = feedback_fn
3600 self._counter = itertools.count()
3603 def _IfName(name, fmt):
3604 """Helper function for formatting name.
3612 def QueueJob(self, name, *ops):
3613 """Record a job for later submit.
3616 @param name: a description of the job, will be used in WaitJobSet
3619 SetGenericOpcodeOpts(ops, self.opts)
3620 self.queue.append((self._counter.next(), name, ops))
3622 def AddJobId(self, name, status, job_id):
3623 """Adds a job ID to the internal queue.
3626 self.jobs.append((self._counter.next(), status, job_id, name))
3628 def SubmitPending(self, each=False):
3629 """Submit all pending jobs.
3634 for (_, _, ops) in self.queue:
3635 # SubmitJob will remove the success status, but raise an exception if
3636 # the submission fails, so we'll notice that anyway.
3637 results.append([True, self.cl.SubmitJob(ops)[0]])
3639 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3640 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3641 self.jobs.append((idx, status, data, name))
3643 def _ChooseJob(self):
3644 """Choose a non-waiting/queued job to poll next.
3647 assert self.jobs, "_ChooseJob called with empty job list"
3649 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3653 for job_data, status in zip(self.jobs, result):
3654 if (isinstance(status, list) and status and
3655 status[0] in (constants.JOB_STATUS_QUEUED,
3656 constants.JOB_STATUS_WAITING,
3657 constants.JOB_STATUS_CANCELING)):
3658 # job is still present and waiting
3660 # good candidate found (either running job or lost job)
3661 self.jobs.remove(job_data)
3665 return self.jobs.pop(0)
3667 def GetResults(self):
3668 """Wait for and return the results of all jobs.
3671 @return: list of tuples (success, job results), in the same order
3672 as the submitted jobs; if a job has failed, instead of the result
3673 there will be the error message
3677 self.SubmitPending()
3680 ok_jobs = [row[2] for row in self.jobs if row[1]]
3682 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3684 # first, remove any non-submitted jobs
3685 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3686 for idx, _, jid, name in failures:
3687 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3688 results.append((idx, False, jid))
3691 (idx, _, jid, name) = self._ChooseJob()
3692 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3694 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3696 except errors.JobLost, err:
3697 _, job_result = FormatError(err)
3698 ToStderr("Job %s%s has been archived, cannot check its result",
3699 jid, self._IfName(name, " for %s"))
3701 except (errors.GenericError, luxi.ProtocolError), err:
3702 _, job_result = FormatError(err)
3704 # the error message will always be shown, verbose or not
3705 ToStderr("Job %s%s has failed: %s",
3706 jid, self._IfName(name, " for %s"), job_result)
3708 results.append((idx, success, job_result))
3710 # sort based on the index, then drop it
3712 results = [i[1:] for i in results]
3716 def WaitOrShow(self, wait):
3717 """Wait for job results or only print the job IDs.
3720 @param wait: whether to wait or not
3724 return self.GetResults()
3727 self.SubmitPending()
3728 for _, status, result, name in self.jobs:
3730 ToStdout("%s: %s", result, name)
3732 ToStderr("Failure for %s: %s", name, result)
3733 return [row[1:3] for row in self.jobs]
3736 def FormatParamsDictInfo(param_dict, actual):
3737 """Formats a parameter dictionary.
3739 @type param_dict: dict
3740 @param param_dict: the own parameters
3742 @param actual: the current parameter set (including defaults)
3744 @return: dictionary where the value of each parameter is either a fully
3745 formatted string or a dictionary containing formatted strings
3749 for (key, data) in actual.items():
3750 if isinstance(data, dict) and data:
3751 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3753 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3757 def _FormatListInfoDefault(data, def_data):
3758 if data is not None:
3759 ret = utils.CommaJoin(data)
3761 ret = "default (%s)" % utils.CommaJoin(def_data)
3765 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3766 """Formats an instance policy.
3768 @type custom_ipolicy: dict
3769 @param custom_ipolicy: own policy
3770 @type eff_ipolicy: dict
3771 @param eff_ipolicy: effective policy (including defaults); ignored for
3773 @type iscluster: bool
3774 @param iscluster: the policy is at cluster level
3775 @rtype: list of pairs
3776 @return: formatted data, suitable for L{PrintGenericInfo}
3780 eff_ipolicy = custom_ipolicy
3783 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3785 for (k, minmax) in enumerate(custom_minmax):
3787 ("%s/%s" % (key, k),
3788 FormatParamsDictInfo(minmax[key], minmax[key]))
3789 for key in constants.ISPECS_MINMAX_KEYS
3792 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3794 ("%s/%s" % (key, k),
3795 FormatParamsDictInfo({}, minmax[key]))
3796 for key in constants.ISPECS_MINMAX_KEYS
3798 ret = [("bounds specs", minmax_out)]
3801 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3803 (constants.ISPECS_STD,
3804 FormatParamsDictInfo(stdspecs, stdspecs))
3808 ("allowed disk templates",
3809 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3810 eff_ipolicy[constants.IPOLICY_DTS]))
3813 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3814 for key in constants.IPOLICY_PARAMETERS
3819 def _PrintSpecsParameters(buf, specs):
3820 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3821 buf.write(",".join(values))
3824 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3825 """Print the command option used to generate the given instance policy.
3827 Currently only the parts dealing with specs are supported.
3830 @param buf: stream to write into
3832 @param ipolicy: instance policy
3834 @param isgroup: whether the policy is at group level
3838 stdspecs = ipolicy.get("std")
3840 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3841 _PrintSpecsParameters(buf, stdspecs)
3842 minmaxes = ipolicy.get("minmax", [])
3844 for minmax in minmaxes:
3845 minspecs = minmax.get("min")
3846 maxspecs = minmax.get("max")
3847 if minspecs and maxspecs:
3849 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3854 _PrintSpecsParameters(buf, minspecs)
3856 _PrintSpecsParameters(buf, maxspecs)
3859 def ConfirmOperation(names, list_type, text, extra=""):
3860 """Ask the user to confirm an operation on a list of list_type.
3862 This function is used to request confirmation for doing an operation
3863 on a given list of list_type.
3866 @param names: the list of names that we display when
3867 we ask for confirmation
3868 @type list_type: str
3869 @param list_type: Human readable name for elements in the list (e.g. nodes)
3871 @param text: the operation that the user should confirm
3873 @return: True or False depending on user's confirmation.
3877 msg = ("The %s will operate on %d %s.\n%s"
3878 "Do you want to continue?" % (text, count, list_type, extra))
3879 affected = (("\nAffected %s:\n" % list_type) +
3880 "\n".join([" %s" % name for name in names]))
3882 choices = [("y", True, "Yes, execute the %s" % text),
3883 ("n", False, "No, abort the %s" % text)]
3886 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3889 question = msg + affected
3891 choice = AskUser(question, choices)
3894 choice = AskUser(msg + affected, choices)
3898 def _MaybeParseUnit(elements):
3899 """Parses and returns an array of potential values with units.
3903 for k, v in elements.items():
3904 if v == constants.VALUE_DEFAULT:
3907 parsed[k] = utils.ParseUnit(v)
3911 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3912 ispecs_disk_count, ispecs_disk_size,
3913 ispecs_nic_count, group_ipolicy, fill_all):
3916 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3917 if ispecs_disk_size:
3918 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3919 except (TypeError, ValueError, errors.UnitParseError), err:
3920 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3922 (ispecs_disk_size, ispecs_mem_size, err),
3925 # prepare ipolicy dict
3926 ispecs_transposed = {
3927 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3928 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3929 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3930 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3931 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3934 # first, check that the values given are correct
3936 forced_type = TISPECS_GROUP_TYPES
3938 forced_type = TISPECS_CLUSTER_TYPES
3939 for specs in ispecs_transposed.values():
3940 assert type(specs) is dict
3941 utils.ForceDictType(specs, forced_type)
3945 constants.ISPECS_MIN: {},
3946 constants.ISPECS_MAX: {},
3947 constants.ISPECS_STD: {},
3949 for (name, specs) in ispecs_transposed.iteritems():
3950 assert name in constants.ISPECS_PARAMETERS
3951 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3952 assert key in ispecs
3953 ispecs[key][name] = val
3955 for key in constants.ISPECS_MINMAX_KEYS:
3958 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3960 minmax_out[key] = ispecs[key]
3961 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3963 ipolicy[constants.ISPECS_STD] = \
3964 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3965 ispecs[constants.ISPECS_STD])
3967 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3970 def _ParseSpecUnit(spec, keyname):
3972 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3975 ret[k] = utils.ParseUnit(ret[k])
3976 except (TypeError, ValueError, errors.UnitParseError), err:
3977 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3978 " specs: %s" % (k, ret[k], keyname, err)),
3983 def _ParseISpec(spec, keyname, required):
3984 ret = _ParseSpecUnit(spec, keyname)
3985 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3986 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3987 if required and missing:
3988 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3989 (keyname, utils.CommaJoin(missing)),
3994 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3996 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3997 len(minmax_ispecs[0]) == 1):
3998 for (key, spec) in minmax_ispecs[0].items():
3999 # This loop is executed exactly once
4000 if key in allowed_values and not spec:
4005 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4006 group_ipolicy, allowed_values):
4007 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4008 if found_allowed is not None:
4009 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4010 elif minmax_ispecs is not None:
4012 for mmpair in minmax_ispecs:
4014 for (key, spec) in mmpair.items():
4015 if key not in constants.ISPECS_MINMAX_KEYS:
4016 msg = "Invalid key in bounds instance specifications: %s" % key
4017 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4018 mmpair_out[key] = _ParseISpec(spec, key, True)
4019 minmax_out.append(mmpair_out)
4020 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4021 if std_ispecs is not None:
4022 assert not group_ipolicy # This is not an option for gnt-group
4023 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4026 def CreateIPolicyFromOpts(ispecs_mem_size=None,
4027 ispecs_cpu_count=None,
4028 ispecs_disk_count=None,
4029 ispecs_disk_size=None,
4030 ispecs_nic_count=None,
4033 ipolicy_disk_templates=None,
4034 ipolicy_vcpu_ratio=None,
4035 ipolicy_spindle_ratio=None,
4036 group_ipolicy=False,
4037 allowed_values=None,
4039 """Creation of instance policy based on command line options.
4041 @param fill_all: whether for cluster policies we should ensure that
4042 all values are filled
4045 assert not (fill_all and allowed_values)
4047 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4048 ispecs_disk_size or ispecs_nic_count)
4049 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4050 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4051 " together with any --ipolicy-xxx-specs option",
4054 ipolicy_out = objects.MakeEmptyIPolicy()
4057 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4058 ispecs_disk_count, ispecs_disk_size,
4059 ispecs_nic_count, group_ipolicy, fill_all)
4060 elif (minmax_ispecs is not None or std_ispecs is not None):
4061 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4062 group_ipolicy, allowed_values)
4064 if ipolicy_disk_templates is not None:
4065 if allowed_values and ipolicy_disk_templates in allowed_values:
4066 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4068 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4069 if ipolicy_vcpu_ratio is not None:
4070 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4071 if ipolicy_spindle_ratio is not None:
4072 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4074 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4076 if not group_ipolicy and fill_all:
4077 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4082 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4083 """Formatting core of L{PrintGenericInfo}.
4085 @param buf: (string) stream to accumulate the result into
4086 @param data: data to format
4088 @param level: depth in the data hierarchy, used for indenting
4089 @type afterkey: bool
4090 @param afterkey: True when we are in the middle of a line after a key (used
4091 to properly add newlines or indentation)
4095 if isinstance(data, dict):
4104 for key in sorted(data):
4106 buf.write(baseind * level)
4111 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4112 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4113 # list of tuples (an ordered dictionary)
4119 for (key, val) in data:
4121 buf.write(baseind * level)
4126 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4127 elif isinstance(data, list):
4138 buf.write(baseind * level)
4142 buf.write(baseind[1:])
4143 _SerializeGenericInfo(buf, item, level + 1)
4145 # This branch should be only taken for strings, but it's practically
4146 # impossible to guarantee that no other types are produced somewhere
4147 buf.write(str(data))
4151 def PrintGenericInfo(data):
4152 """Print information formatted according to the hierarchy.
4154 The output is a valid YAML string.
4156 @param data: the data to print. It's a hierarchical structure whose elements
4158 - dictionaries, where keys are strings and values are of any of the
4160 - lists of pairs (key, value), where key is a string and value is of
4161 any of the types listed here; it's a way to encode ordered
4163 - lists of any of the types listed here
4168 _SerializeGenericInfo(buf, data, 0)
4169 ToStdout(buf.getvalue().rstrip("\n"))