4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
119 "MODIFY_ETCHOSTS_OPT",
123 "NEW_CLUSTER_CERT_OPT",
124 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125 "NEW_CONFD_HMAC_KEY_OPT",
129 "NEW_SPICE_CERT_OPT",
131 "NOCONFLICTSCHECK_OPT",
132 "NODE_FORCE_JOIN_OPT",
134 "NODE_PLACEMENT_OPT",
138 "NODRBD_STORAGE_OPT",
144 "NOMODIFY_ETCHOSTS_OPT",
145 "NOMODIFY_SSH_SETUP_OPT",
149 "NORUNTIME_CHGS_OPT",
152 "NOSSH_KEYCHECK_OPT",
166 "PREALLOC_WIPE_DISKS_OPT",
167 "PRIMARY_IP_VERSION_OPT",
175 "REMOVE_INSTANCE_OPT",
176 "REMOVE_RESERVED_IPS_OPT",
182 "SECONDARY_ONLY_OPT",
187 "SHUTDOWN_TIMEOUT_OPT",
189 "SPECS_CPU_COUNT_OPT",
190 "SPECS_DISK_COUNT_OPT",
191 "SPECS_DISK_SIZE_OPT",
192 "SPECS_MEM_SIZE_OPT",
193 "SPECS_NIC_COUNT_OPT",
195 "IPOLICY_STD_SPECS_OPT",
196 "IPOLICY_DISK_TEMPLATES",
197 "IPOLICY_VCPU_RATIO",
204 "STARTUP_PAUSED_OPT",
213 "USE_EXTERNAL_MIP_SCRIPT",
221 "IGNORE_IPOLICY_OPT",
222 "INSTANCE_POLICY_OPTS",
223 # Generic functions for CLI programs
225 "CreateIPolicyFromOpts",
227 "GenericInstanceCreate",
233 "JobSubmittedException",
235 "RunWhileClusterStopped",
239 # Formatting functions
240 "ToStderr", "ToStdout",
243 "FormatParamsDictInfo",
245 "PrintIPolicyCommand",
255 # command line options support infrastructure
256 "ARGS_MANY_INSTANCES",
259 "ARGS_MANY_NETWORKS",
279 "OPT_COMPL_INST_ADD_NODES",
280 "OPT_COMPL_MANY_NODES",
281 "OPT_COMPL_ONE_IALLOCATOR",
282 "OPT_COMPL_ONE_INSTANCE",
283 "OPT_COMPL_ONE_NODE",
284 "OPT_COMPL_ONE_NODEGROUP",
285 "OPT_COMPL_ONE_NETWORK",
287 "OPT_COMPL_ONE_EXTSTORAGE",
293 "COMMON_CREATE_OPTS",
299 #: Priorities (sorted)
301 ("low", constants.OP_PRIO_LOW),
302 ("normal", constants.OP_PRIO_NORMAL),
303 ("high", constants.OP_PRIO_HIGH),
306 #: Priority dictionary for easier lookup
307 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
308 # we migrate to Python 2.6
309 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
311 # Query result status for clients
314 QR_INCOMPLETE) = range(3)
316 #: Maximum batch size for ChooseJob
320 # constants used to create InstancePolicy dictionary
321 TISPECS_GROUP_TYPES = {
322 constants.ISPECS_MIN: constants.VTYPE_INT,
323 constants.ISPECS_MAX: constants.VTYPE_INT,
326 TISPECS_CLUSTER_TYPES = {
327 constants.ISPECS_MIN: constants.VTYPE_INT,
328 constants.ISPECS_MAX: constants.VTYPE_INT,
329 constants.ISPECS_STD: constants.VTYPE_INT,
332 #: User-friendly names for query2 field types
334 constants.QFT_UNKNOWN: "Unknown",
335 constants.QFT_TEXT: "Text",
336 constants.QFT_BOOL: "Boolean",
337 constants.QFT_NUMBER: "Number",
338 constants.QFT_UNIT: "Storage size",
339 constants.QFT_TIMESTAMP: "Timestamp",
340 constants.QFT_OTHER: "Custom",
345 def __init__(self, min=0, max=None): # pylint: disable=W0622
350 return ("<%s min=%s max=%s>" %
351 (self.__class__.__name__, self.min, self.max))
354 class ArgSuggest(_Argument):
355 """Suggesting argument.
357 Value can be any of the ones passed to the constructor.
360 # pylint: disable=W0622
361 def __init__(self, min=0, max=None, choices=None):
362 _Argument.__init__(self, min=min, max=max)
363 self.choices = choices
366 return ("<%s min=%s max=%s choices=%r>" %
367 (self.__class__.__name__, self.min, self.max, self.choices))
370 class ArgChoice(ArgSuggest):
373 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
374 but value must be one of the choices.
379 class ArgUnknown(_Argument):
380 """Unknown argument to program (e.g. determined at runtime).
385 class ArgInstance(_Argument):
386 """Instances argument.
391 class ArgNode(_Argument):
397 class ArgNetwork(_Argument):
403 class ArgGroup(_Argument):
404 """Node group argument.
409 class ArgJobId(_Argument):
415 class ArgFile(_Argument):
416 """File path argument.
421 class ArgCommand(_Argument):
427 class ArgHost(_Argument):
433 class ArgOs(_Argument):
439 class ArgExtStorage(_Argument):
440 """ExtStorage argument.
446 ARGS_MANY_INSTANCES = [ArgInstance()]
447 ARGS_MANY_NETWORKS = [ArgNetwork()]
448 ARGS_MANY_NODES = [ArgNode()]
449 ARGS_MANY_GROUPS = [ArgGroup()]
450 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
451 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
452 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
454 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
455 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
458 def _ExtractTagsObject(opts, args):
459 """Extract the tag type object.
461 Note that this function will modify its args parameter.
464 if not hasattr(opts, "tag_type"):
465 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
467 if kind == constants.TAG_CLUSTER:
469 elif kind in (constants.TAG_NODEGROUP,
471 constants.TAG_NETWORK,
472 constants.TAG_INSTANCE):
474 raise errors.OpPrereqError("no arguments passed to the command",
479 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
483 def _ExtendTags(opts, args):
484 """Extend the args if a source file has been given.
486 This function will extend the tags with the contents of the file
487 passed in the 'tags_source' attribute of the opts parameter. A file
488 named '-' will be replaced by stdin.
491 fname = opts.tags_source
497 new_fh = open(fname, "r")
500 # we don't use the nice 'new_data = [line.strip() for line in fh]'
501 # because of python bug 1633941
503 line = new_fh.readline()
506 new_data.append(line.strip())
509 args.extend(new_data)
512 def ListTags(opts, args):
513 """List the tags on a given object.
515 This is a generic implementation that knows how to deal with all
516 three cases of tag objects (cluster, node, instance). The opts
517 argument is expected to contain a tag_type field denoting what
518 object type we work on.
521 kind, name = _ExtractTagsObject(opts, args)
522 cl = GetClient(query=True)
523 result = cl.QueryTags(kind, name)
524 result = list(result)
530 def AddTags(opts, args):
531 """Add tags on a given object.
533 This is a generic implementation that knows how to deal with all
534 three cases of tag objects (cluster, node, instance). The opts
535 argument is expected to contain a tag_type field denoting what
536 object type we work on.
539 kind, name = _ExtractTagsObject(opts, args)
540 _ExtendTags(opts, args)
542 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
543 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
544 SubmitOrSend(op, opts)
547 def RemoveTags(opts, args):
548 """Remove tags from a given object.
550 This is a generic implementation that knows how to deal with all
551 three cases of tag objects (cluster, node, instance). The opts
552 argument is expected to contain a tag_type field denoting what
553 object type we work on.
556 kind, name = _ExtractTagsObject(opts, args)
557 _ExtendTags(opts, args)
559 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
560 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
561 SubmitOrSend(op, opts)
564 def check_unit(option, opt, value): # pylint: disable=W0613
565 """OptParsers custom converter for units.
569 return utils.ParseUnit(value)
570 except errors.UnitParseError, err:
571 raise OptionValueError("option %s: %s" % (opt, err))
574 def _SplitKeyVal(opt, data, parse_prefixes):
575 """Convert a KeyVal string into a dict.
577 This function will convert a key=val[,...] string into a dict. Empty
578 values will be converted specially: keys which have the prefix 'no_'
579 will have the value=False and the prefix stripped, keys with the prefix
580 "-" will have value=None and the prefix stripped, and the others will
584 @param opt: a string holding the option name for which we process the
585 data, used in building error messages
587 @param data: a string of the format key=val,key=val,...
588 @type parse_prefixes: bool
589 @param parse_prefixes: whether to handle prefixes specially
591 @return: {key=val, key=val}
592 @raises errors.ParameterError: if there are duplicate keys
597 for elem in utils.UnescapeAndSplit(data, sep=","):
599 key, val = elem.split("=", 1)
601 if elem.startswith(NO_PREFIX):
602 key, val = elem[len(NO_PREFIX):], False
603 elif elem.startswith(UN_PREFIX):
604 key, val = elem[len(UN_PREFIX):], None
606 key, val = elem, True
608 raise errors.ParameterError("Missing value for key '%s' in option %s" %
611 raise errors.ParameterError("Duplicate key '%s' in option %s" %
617 def _SplitIdentKeyVal(opt, value, parse_prefixes):
618 """Helper function to parse "ident:key=val,key=val" options.
621 @param opt: option name, used in error messages
623 @param value: expected to be in the format "ident:key=val,key=val,..."
624 @type parse_prefixes: bool
625 @param parse_prefixes: whether to handle prefixes specially (see
628 @return: (ident, {key=val, key=val})
629 @raises errors.ParameterError: in case of duplicates or other parsing errors
633 ident, rest = value, ""
635 ident, rest = value.split(":", 1)
637 if parse_prefixes and ident.startswith(NO_PREFIX):
639 msg = "Cannot pass options when removing parameter groups: %s" % value
640 raise errors.ParameterError(msg)
641 retval = (ident[len(NO_PREFIX):], False)
642 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
643 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
645 msg = "Cannot pass options when removing parameter groups: %s" % value
646 raise errors.ParameterError(msg)
647 retval = (ident[len(UN_PREFIX):], None)
649 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
650 retval = (ident, kv_dict)
654 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
655 """Custom parser for ident:key=val,key=val options.
657 This will store the parsed values as a tuple (ident, {key: val}). As such,
658 multiple uses of this option via action=append is possible.
661 return _SplitIdentKeyVal(opt, value, True)
664 def check_key_val(option, opt, value): # pylint: disable=W0613
665 """Custom parser class for key=val,key=val options.
667 This will store the parsed values as a dict {key: val}.
670 return _SplitKeyVal(opt, value, True)
673 def _SplitListKeyVal(opt, value):
675 for elem in value.split("/"):
677 raise errors.ParameterError("Empty section in option '%s'" % opt)
678 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
680 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
682 raise errors.ParameterError(msg)
683 retval[ident] = valdict
687 def check_multilist_ident_key_val(_, opt, value):
688 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
690 @rtype: list of dictionary
691 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
695 for line in value.split("//"):
696 retval.append(_SplitListKeyVal(opt, line))
700 def check_bool(option, opt, value): # pylint: disable=W0613
701 """Custom parser for yes/no options.
703 This will store the parsed value as either True or False.
706 value = value.lower()
707 if value == constants.VALUE_FALSE or value == "no":
709 elif value == constants.VALUE_TRUE or value == "yes":
712 raise errors.ParameterError("Invalid boolean value '%s'" % value)
715 def check_list(option, opt, value): # pylint: disable=W0613
716 """Custom parser for comma-separated lists.
719 # we have to make this explicit check since "".split(",") is [""],
720 # not an empty list :(
724 return utils.UnescapeAndSplit(value)
727 def check_maybefloat(option, opt, value): # pylint: disable=W0613
728 """Custom parser for float numbers which might be also defaults.
731 value = value.lower()
733 if value == constants.VALUE_DEFAULT:
739 # completion_suggestion is normally a list. Using numeric values not evaluating
740 # to False for dynamic completion.
741 (OPT_COMPL_MANY_NODES,
743 OPT_COMPL_ONE_INSTANCE,
745 OPT_COMPL_ONE_EXTSTORAGE,
746 OPT_COMPL_ONE_IALLOCATOR,
747 OPT_COMPL_ONE_NETWORK,
748 OPT_COMPL_INST_ADD_NODES,
749 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
751 OPT_COMPL_ALL = compat.UniqueFrozenset([
752 OPT_COMPL_MANY_NODES,
754 OPT_COMPL_ONE_INSTANCE,
756 OPT_COMPL_ONE_EXTSTORAGE,
757 OPT_COMPL_ONE_IALLOCATOR,
758 OPT_COMPL_ONE_NETWORK,
759 OPT_COMPL_INST_ADD_NODES,
760 OPT_COMPL_ONE_NODEGROUP,
764 class CliOption(Option):
765 """Custom option class for optparse.
768 ATTRS = Option.ATTRS + [
769 "completion_suggest",
771 TYPES = Option.TYPES + (
772 "multilistidentkeyval",
780 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
781 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
782 TYPE_CHECKER["identkeyval"] = check_ident_key_val
783 TYPE_CHECKER["keyval"] = check_key_val
784 TYPE_CHECKER["unit"] = check_unit
785 TYPE_CHECKER["bool"] = check_bool
786 TYPE_CHECKER["list"] = check_list
787 TYPE_CHECKER["maybefloat"] = check_maybefloat
790 # optparse.py sets make_option, so we do it for our own option class, too
791 cli_option = CliOption
796 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
797 help="Increase debugging level")
799 NOHDR_OPT = cli_option("--no-headers", default=False,
800 action="store_true", dest="no_headers",
801 help="Don't display column headers")
803 SEP_OPT = cli_option("--separator", default=None,
804 action="store", dest="separator",
805 help=("Separator between output fields"
806 " (defaults to one space)"))
808 USEUNITS_OPT = cli_option("--units", default=None,
809 dest="units", choices=("h", "m", "g", "t"),
810 help="Specify units for output (one of h/m/g/t)")
812 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
813 type="string", metavar="FIELDS",
814 help="Comma separated list of output fields")
816 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
817 default=False, help="Force the operation")
819 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
820 default=False, help="Do not require confirmation")
822 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
823 action="store_true", default=False,
824 help=("Ignore offline nodes and do as much"
827 TAG_ADD_OPT = cli_option("--tags", dest="tags",
828 default=None, help="Comma-separated list of instance"
831 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
832 default=None, help="File with tag names")
834 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
835 default=False, action="store_true",
836 help=("Submit the job and return the job ID, but"
837 " don't wait for the job to finish"))
839 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
840 default=False, action="store_true",
841 help=("Additionally print the job as first line"
842 " on stdout (for scripting)."))
844 SYNC_OPT = cli_option("--sync", dest="do_locking",
845 default=False, action="store_true",
846 help=("Grab locks while doing the queries"
847 " in order to ensure more consistent results"))
849 DRY_RUN_OPT = cli_option("--dry-run", default=False,
851 help=("Do not execute the operation, just run the"
852 " check steps and verify if it could be"
855 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
857 help="Increase the verbosity of the operation")
859 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
860 action="store_true", dest="simulate_errors",
861 help="Debugging option that makes the operation"
862 " treat most runtime checks as failed")
864 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
865 default=True, action="store_false",
866 help="Don't wait for sync (DANGEROUS!)")
868 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
869 default=False, action="store_true",
870 help="Wait for disks to sync")
872 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
873 action="store_true", default=False,
874 help="Enable offline instance")
876 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
877 action="store_true", default=False,
878 help="Disable down instance")
880 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
881 help=("Custom disk setup (%s)" %
882 utils.CommaJoin(constants.DISK_TEMPLATES)),
883 default=None, metavar="TEMPL",
884 choices=list(constants.DISK_TEMPLATES))
886 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
887 help="Do not create any network cards for"
890 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
891 help="Relative path under default cluster-wide"
892 " file storage dir to store file-based disks",
893 default=None, metavar="<DIR>")
895 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
896 help="Driver to use for image files",
897 default=None, metavar="<DRIVER>",
898 choices=list(constants.FILE_DRIVER))
900 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
901 help="Select nodes for the instance automatically"
902 " using the <NAME> iallocator plugin",
903 default=None, type="string",
904 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
908 help="Set the default instance"
910 default=None, type="string",
911 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
915 completion_suggest=OPT_COMPL_ONE_OS)
917 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
918 type="keyval", default={},
919 help="OS parameters")
921 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
922 action="store_true", default=False,
923 help="Force an unknown variant")
925 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
926 action="store_true", default=False,
927 help="Do not install the OS (will"
930 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
931 dest="allow_runtime_chgs",
932 default=True, action="store_false",
933 help="Don't allow runtime changes")
935 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
936 type="keyval", default={},
937 help="Backend parameters")
939 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
940 default={}, dest="hvparams",
941 help="Hypervisor parameters")
943 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
944 help="Disk template parameters, in the format"
945 " template:option=value,option=value,...",
946 type="identkeyval", action="append", default=[])
948 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
949 type="keyval", default={},
950 help="Memory size specs: list of key=value,"
951 " where key is one of min, max, std"
952 " (in MB or using a unit)")
954 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
955 type="keyval", default={},
956 help="CPU count specs: list of key=value,"
957 " where key is one of min, max, std")
959 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
960 dest="ispecs_disk_count",
961 type="keyval", default={},
962 help="Disk count specs: list of key=value,"
963 " where key is one of min, max, std")
965 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
966 type="keyval", default={},
967 help="Disk size specs: list of key=value,"
968 " where key is one of min, max, std"
969 " (in MB or using a unit)")
971 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
972 type="keyval", default={},
973 help="NIC count specs: list of key=value,"
974 " where key is one of min, max, std")
976 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
977 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
978 dest="ipolicy_bounds_specs",
979 type="multilistidentkeyval", default=None,
980 help="Complete instance specs limits")
982 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
983 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
984 dest="ipolicy_std_specs",
985 type="keyval", default=None,
986 help="Complte standard instance specs")
988 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
989 dest="ipolicy_disk_templates",
990 type="list", default=None,
991 help="Comma-separated list of"
992 " enabled disk templates")
994 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
995 dest="ipolicy_vcpu_ratio",
996 type="maybefloat", default=None,
997 help="The maximum allowed vcpu-to-cpu ratio")
999 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1000 dest="ipolicy_spindle_ratio",
1001 type="maybefloat", default=None,
1002 help=("The maximum allowed instances to"
1005 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1006 help="Hypervisor and hypervisor options, in the"
1007 " format hypervisor:option=value,option=value,...",
1008 default=None, type="identkeyval")
1010 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1011 help="Hypervisor and hypervisor options, in the"
1012 " format hypervisor:option=value,option=value,...",
1013 default=[], action="append", type="identkeyval")
1015 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1016 action="store_false",
1017 help="Don't check that the instance's IP"
1020 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1021 default=True, action="store_false",
1022 help="Don't check that the instance's name"
1025 NET_OPT = cli_option("--net",
1026 help="NIC parameters", default=[],
1027 dest="nics", action="append", type="identkeyval")
1029 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1030 dest="disks", action="append", type="identkeyval")
1032 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1033 help="Comma-separated list of disks"
1034 " indices to act on (e.g. 0,2) (optional,"
1035 " defaults to all disks)")
1037 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1038 help="Enforces a single-disk configuration using the"
1039 " given disk size, in MiB unless a suffix is used",
1040 default=None, type="unit", metavar="<size>")
1042 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1043 dest="ignore_consistency",
1044 action="store_true", default=False,
1045 help="Ignore the consistency of the disks on"
1048 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1049 dest="allow_failover",
1050 action="store_true", default=False,
1051 help="If migration is not possible fallback to"
1054 NONLIVE_OPT = cli_option("--non-live", dest="live",
1055 default=True, action="store_false",
1056 help="Do a non-live migration (this usually means"
1057 " freeze the instance, save the state, transfer and"
1058 " only then resume running on the secondary node)")
1060 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1062 choices=list(constants.HT_MIGRATION_MODES),
1063 help="Override default migration mode (choose"
1064 " either live or non-live")
1066 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1067 help="Target node and optional secondary node",
1068 metavar="<pnode>[:<snode>]",
1069 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1071 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1072 action="append", metavar="<node>",
1073 help="Use only this node (can be used multiple"
1074 " times, if not given defaults to all nodes)",
1075 completion_suggest=OPT_COMPL_ONE_NODE)
1077 NODEGROUP_OPT_NAME = "--node-group"
1078 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1080 help="Node group (name or uuid)",
1081 metavar="<nodegroup>",
1082 default=None, type="string",
1083 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1085 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1087 completion_suggest=OPT_COMPL_ONE_NODE)
1089 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1090 action="store_false",
1091 help="Don't start the instance after creation")
1093 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1094 action="store_true", default=False,
1095 help="Show command instead of executing it")
1097 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1098 default=False, action="store_true",
1099 help="Instead of performing the migration/failover,"
1100 " try to recover from a failed cleanup. This is safe"
1101 " to run even if the instance is healthy, but it"
1102 " will create extra replication traffic and "
1103 " disrupt briefly the replication (like during the"
1104 " migration/failover")
1106 STATIC_OPT = cli_option("-s", "--static", dest="static",
1107 action="store_true", default=False,
1108 help="Only show configuration data, not runtime data")
1110 ALL_OPT = cli_option("--all", dest="show_all",
1111 default=False, action="store_true",
1112 help="Show info on all instances on the cluster."
1113 " This can take a long time to run, use wisely")
1115 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1116 action="store_true", default=False,
1117 help="Interactive OS reinstall, lists available"
1118 " OS templates for selection")
1120 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1121 action="store_true", default=False,
1122 help="Remove the instance from the cluster"
1123 " configuration even if there are failures"
1124 " during the removal process")
1126 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1127 dest="ignore_remove_failures",
1128 action="store_true", default=False,
1129 help="Remove the instance from the"
1130 " cluster configuration even if there"
1131 " are failures during the removal"
1134 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1135 action="store_true", default=False,
1136 help="Remove the instance from the cluster")
1138 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1139 help="Specifies the new node for the instance",
1140 metavar="NODE", default=None,
1141 completion_suggest=OPT_COMPL_ONE_NODE)
1143 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1144 help="Specifies the new secondary node",
1145 metavar="NODE", default=None,
1146 completion_suggest=OPT_COMPL_ONE_NODE)
1148 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1149 help="Specifies the new primary node",
1150 metavar="<node>", default=None,
1151 completion_suggest=OPT_COMPL_ONE_NODE)
1153 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1154 default=False, action="store_true",
1155 help="Replace the disk(s) on the primary"
1156 " node (applies only to internally mirrored"
1157 " disk templates, e.g. %s)" %
1158 utils.CommaJoin(constants.DTS_INT_MIRROR))
1160 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1161 default=False, action="store_true",
1162 help="Replace the disk(s) on the secondary"
1163 " node (applies only to internally mirrored"
1164 " disk templates, e.g. %s)" %
1165 utils.CommaJoin(constants.DTS_INT_MIRROR))
1167 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1168 default=False, action="store_true",
1169 help="Lock all nodes and auto-promote as needed"
1172 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1173 default=False, action="store_true",
1174 help="Automatically replace faulty disks"
1175 " (applies only to internally mirrored"
1176 " disk templates, e.g. %s)" %
1177 utils.CommaJoin(constants.DTS_INT_MIRROR))
1179 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1180 default=False, action="store_true",
1181 help="Ignore current recorded size"
1182 " (useful for forcing activation when"
1183 " the recorded size is wrong)")
1185 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1187 completion_suggest=OPT_COMPL_ONE_NODE)
1189 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1192 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1193 help="Specify the secondary ip for the node",
1194 metavar="ADDRESS", default=None)
1196 READD_OPT = cli_option("--readd", dest="readd",
1197 default=False, action="store_true",
1198 help="Readd old node after replacing it")
1200 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1201 default=True, action="store_false",
1202 help="Disable SSH key fingerprint checking")
1204 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1205 default=False, action="store_true",
1206 help="Force the joining of a node")
1208 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1209 type="bool", default=None, metavar=_YORNO,
1210 help="Set the master_candidate flag on the node")
1212 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1213 type="bool", default=None,
1214 help=("Set the offline flag on the node"
1215 " (cluster does not communicate with offline"
1218 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1219 type="bool", default=None,
1220 help=("Set the drained flag on the node"
1221 " (excluded from allocation operations)"))
1223 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1224 type="bool", default=None, metavar=_YORNO,
1225 help="Set the master_capable flag on the node")
1227 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1228 type="bool", default=None, metavar=_YORNO,
1229 help="Set the vm_capable flag on the node")
1231 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1232 type="bool", default=None, metavar=_YORNO,
1233 help="Set the allocatable flag on a volume")
1235 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1236 help="Disable support for lvm based instances"
1238 action="store_false", default=True)
1240 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1241 dest="enabled_hypervisors",
1242 help="Comma-separated list of hypervisors",
1243 type="string", default=None)
1245 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1246 dest="enabled_disk_templates",
1247 help="Comma-separated list of "
1249 type="string", default=None)
1251 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1252 type="keyval", default={},
1253 help="NIC parameters")
1255 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1256 dest="candidate_pool_size", type="int",
1257 help="Set the candidate pool size")
1259 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1260 help=("Enables LVM and specifies the volume group"
1261 " name (cluster-wide) for disk allocation"
1262 " [%s]" % constants.DEFAULT_VG),
1263 metavar="VG", default=None)
1265 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1266 help="Destroy cluster", action="store_true")
1268 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1269 help="Skip node agreement check (dangerous)",
1270 action="store_true", default=False)
1272 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1273 help="Specify the mac prefix for the instance IP"
1274 " addresses, in the format XX:XX:XX",
1278 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1279 help="Specify the node interface (cluster-wide)"
1280 " on which the master IP address will be added"
1281 " (cluster init default: %s)" %
1282 constants.DEFAULT_BRIDGE,
1286 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1287 help="Specify the netmask of the master IP",
1291 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1292 dest="use_external_mip_script",
1293 help="Specify whether to run a"
1294 " user-provided script for the master"
1295 " IP address turnup and"
1296 " turndown operations",
1297 type="bool", metavar=_YORNO, default=None)
1299 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1300 help="Specify the default directory (cluster-"
1301 "wide) for storing the file-based disks [%s]" %
1302 pathutils.DEFAULT_FILE_STORAGE_DIR,
1306 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1307 "--shared-file-storage-dir",
1308 dest="shared_file_storage_dir",
1309 help="Specify the default directory (cluster-wide) for storing the"
1310 " shared file-based disks [%s]" %
1311 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1312 metavar="SHAREDDIR", default=None)
1314 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1315 help="Don't modify %s" % pathutils.ETC_HOSTS,
1316 action="store_false", default=True)
1318 MODIFY_ETCHOSTS_OPT = \
1319 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1320 default=None, type="bool",
1321 help="Defines whether the cluster should autonomously modify"
1322 " and keep in sync the /etc/hosts file of the nodes")
1324 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1325 help="Don't initialize SSH keys",
1326 action="store_false", default=True)
1328 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1329 help="Enable parseable error messages",
1330 action="store_true", default=False)
1332 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1333 help="Skip N+1 memory redundancy tests",
1334 action="store_true", default=False)
1336 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1337 help="Type of reboot: soft/hard/full",
1338 default=constants.INSTANCE_REBOOT_HARD,
1340 choices=list(constants.REBOOT_TYPES))
1342 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1343 dest="ignore_secondaries",
1344 default=False, action="store_true",
1345 help="Ignore errors from secondaries")
1347 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1348 action="store_false", default=True,
1349 help="Don't shutdown the instance (unsafe)")
1351 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1352 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1353 help="Maximum time to wait")
1355 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1356 dest="shutdown_timeout", type="int",
1357 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1358 help="Maximum time to wait for instance"
1361 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1363 help=("Number of seconds between repetions of the"
1366 EARLY_RELEASE_OPT = cli_option("--early-release",
1367 dest="early_release", default=False,
1368 action="store_true",
1369 help="Release the locks on the secondary"
1372 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1373 dest="new_cluster_cert",
1374 default=False, action="store_true",
1375 help="Generate a new cluster certificate")
1377 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1379 help="File containing new RAPI certificate")
1381 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1382 default=None, action="store_true",
1383 help=("Generate a new self-signed RAPI"
1386 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1388 help="File containing new SPICE certificate")
1390 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1392 help="File containing the certificate of the CA"
1393 " which signed the SPICE certificate")
1395 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1396 dest="new_spice_cert", default=None,
1397 action="store_true",
1398 help=("Generate a new self-signed SPICE"
1401 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1402 dest="new_confd_hmac_key",
1403 default=False, action="store_true",
1404 help=("Create a new HMAC key for %s" %
1407 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1408 dest="cluster_domain_secret",
1410 help=("Load new new cluster domain"
1411 " secret from file"))
1413 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1414 dest="new_cluster_domain_secret",
1415 default=False, action="store_true",
1416 help=("Create a new cluster domain"
1419 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1420 dest="use_replication_network",
1421 help="Whether to use the replication network"
1422 " for talking to the nodes",
1423 action="store_true", default=False)
1425 MAINTAIN_NODE_HEALTH_OPT = \
1426 cli_option("--maintain-node-health", dest="maintain_node_health",
1427 metavar=_YORNO, default=None, type="bool",
1428 help="Configure the cluster to automatically maintain node"
1429 " health, by shutting down unknown instances, shutting down"
1430 " unknown DRBD devices, etc.")
1432 IDENTIFY_DEFAULTS_OPT = \
1433 cli_option("--identify-defaults", dest="identify_defaults",
1434 default=False, action="store_true",
1435 help="Identify which saved instance parameters are equal to"
1436 " the current cluster defaults and set them as such, instead"
1437 " of marking them as overridden")
1439 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1440 action="store", dest="uid_pool",
1441 help=("A list of user-ids or user-id"
1442 " ranges separated by commas"))
1444 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1445 action="store", dest="add_uids",
1446 help=("A list of user-ids or user-id"
1447 " ranges separated by commas, to be"
1448 " added to the user-id pool"))
1450 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1451 action="store", dest="remove_uids",
1452 help=("A list of user-ids or user-id"
1453 " ranges separated by commas, to be"
1454 " removed from the user-id pool"))
1456 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1457 action="store", dest="reserved_lvs",
1458 help=("A comma-separated list of reserved"
1459 " logical volumes names, that will be"
1460 " ignored by cluster verify"))
1462 ROMAN_OPT = cli_option("--roman",
1463 dest="roman_integers", default=False,
1464 action="store_true",
1465 help="Use roman numbers for positive integers")
1467 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1468 action="store", default=None,
1469 help="Specifies usermode helper for DRBD")
1471 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1472 action="store_false", default=True,
1473 help="Disable support for DRBD")
1475 PRIMARY_IP_VERSION_OPT = \
1476 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1477 action="store", dest="primary_ip_version",
1478 metavar="%d|%d" % (constants.IP4_VERSION,
1479 constants.IP6_VERSION),
1480 help="Cluster-wide IP version for primary IP")
1482 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1483 action="store_true",
1484 help="Show machine name for every line in output")
1486 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1487 action="store_true",
1488 help=("Hide successful results and show failures"
1489 " only (determined by the exit code)"))
1491 REASON_OPT = cli_option("--reason", default=None,
1492 help="The reason for executing the command")
1495 def _PriorityOptionCb(option, _, value, parser):
1496 """Callback for processing C{--priority} option.
1499 value = _PRIONAME_TO_VALUE[value]
1501 setattr(parser.values, option.dest, value)
1504 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1505 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1506 choices=_PRIONAME_TO_VALUE.keys(),
1507 action="callback", type="choice",
1508 callback=_PriorityOptionCb,
1509 help="Priority for opcode processing")
1511 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1512 type="bool", default=None, metavar=_YORNO,
1513 help="Sets the hidden flag on the OS")
1515 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1516 type="bool", default=None, metavar=_YORNO,
1517 help="Sets the blacklisted flag on the OS")
1519 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1520 type="bool", metavar=_YORNO,
1521 dest="prealloc_wipe_disks",
1522 help=("Wipe disks prior to instance"
1525 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1526 type="keyval", default=None,
1527 help="Node parameters")
1529 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1530 action="store", metavar="POLICY", default=None,
1531 help="Allocation policy for the node group")
1533 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1534 type="bool", metavar=_YORNO,
1535 dest="node_powered",
1536 help="Specify if the SoR for node is powered")
1538 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1539 default=constants.OOB_TIMEOUT,
1540 help="Maximum time to wait for out-of-band helper")
1542 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1543 default=constants.OOB_POWER_DELAY,
1544 help="Time in seconds to wait between power-ons")
1546 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1547 action="store_true", default=False,
1548 help=("Whether command argument should be treated"
1551 NO_REMEMBER_OPT = cli_option("--no-remember",
1553 action="store_true", default=False,
1554 help="Perform but do not record the change"
1555 " in the configuration")
1557 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1558 default=False, action="store_true",
1559 help="Evacuate primary instances only")
1561 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1562 default=False, action="store_true",
1563 help="Evacuate secondary instances only"
1564 " (applies only to internally mirrored"
1565 " disk templates, e.g. %s)" %
1566 utils.CommaJoin(constants.DTS_INT_MIRROR))
1568 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1569 action="store_true", default=False,
1570 help="Pause instance at startup")
1572 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1573 help="Destination node group (name or uuid)",
1574 default=None, action="append",
1575 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1577 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1578 action="append", dest="ignore_errors",
1579 choices=list(constants.CV_ALL_ECODES_STRINGS),
1580 help="Error code to be ignored")
1582 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1584 help=("Specify disk state information in the"
1586 " storage_type/identifier:option=value,...;"
1587 " note this is unused for now"),
1590 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1592 help=("Specify hypervisor state information in the"
1593 " format hypervisor:option=value,...;"
1594 " note this is unused for now"),
1597 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1598 action="store_true", default=False,
1599 help="Ignore instance policy violations")
1601 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1602 help="Sets the instance's runtime memory,"
1603 " ballooning it up or down to the new value",
1604 default=None, type="unit", metavar="<size>")
1606 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1607 action="store_true", default=False,
1608 help="Marks the grow as absolute instead of the"
1609 " (default) relative mode")
1611 NETWORK_OPT = cli_option("--network",
1612 action="store", default=None, dest="network",
1613 help="IP network in CIDR notation")
1615 GATEWAY_OPT = cli_option("--gateway",
1616 action="store", default=None, dest="gateway",
1617 help="IP address of the router (gateway)")
1619 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1620 action="store", default=None,
1621 dest="add_reserved_ips",
1622 help="Comma-separated list of"
1623 " reserved IPs to add")
1625 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1626 action="store", default=None,
1627 dest="remove_reserved_ips",
1628 help="Comma-delimited list of"
1629 " reserved IPs to remove")
1631 NETWORK6_OPT = cli_option("--network6",
1632 action="store", default=None, dest="network6",
1633 help="IP network in CIDR notation")
1635 GATEWAY6_OPT = cli_option("--gateway6",
1636 action="store", default=None, dest="gateway6",
1637 help="IP6 address of the router (gateway)")
1639 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1640 dest="conflicts_check",
1642 action="store_false",
1643 help="Don't check for conflicting IPs")
1645 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1646 default=False, action="store_true",
1647 help="Include default values")
1649 #: Options provided by all commands
1650 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1652 # options related to asynchronous job handling
1659 # common options for creating instances. add and import then add their own
1661 COMMON_CREATE_OPTS = [
1666 FILESTORE_DRIVER_OPT,
1672 NOCONFLICTSCHECK_OPT,
1685 # common instance policy options
1686 INSTANCE_POLICY_OPTS = [
1687 IPOLICY_BOUNDS_SPECS_OPT,
1688 IPOLICY_DISK_TEMPLATES,
1690 IPOLICY_SPINDLE_RATIO,
1693 # instance policy split specs options
1694 SPLIT_ISPECS_OPTS = [
1695 SPECS_CPU_COUNT_OPT,
1696 SPECS_DISK_COUNT_OPT,
1697 SPECS_DISK_SIZE_OPT,
1699 SPECS_NIC_COUNT_OPT,
1703 class _ShowUsage(Exception):
1704 """Exception class for L{_ParseArgs}.
1707 def __init__(self, exit_error):
1708 """Initializes instances of this class.
1710 @type exit_error: bool
1711 @param exit_error: Whether to report failure on exit
1714 Exception.__init__(self)
1715 self.exit_error = exit_error
1718 class _ShowVersion(Exception):
1719 """Exception class for L{_ParseArgs}.
1724 def _ParseArgs(binary, argv, commands, aliases, env_override):
1725 """Parser for the command line arguments.
1727 This function parses the arguments and returns the function which
1728 must be executed together with its (modified) arguments.
1730 @param binary: Script name
1731 @param argv: Command line arguments
1732 @param commands: Dictionary containing command definitions
1733 @param aliases: dictionary with command aliases {"alias": "target", ...}
1734 @param env_override: list of env variables allowed for default args
1735 @raise _ShowUsage: If usage description should be shown
1736 @raise _ShowVersion: If version should be shown
1739 assert not (env_override - set(commands))
1740 assert not (set(aliases.keys()) & set(commands.keys()))
1745 # No option or command given
1746 raise _ShowUsage(exit_error=True)
1748 if cmd == "--version":
1749 raise _ShowVersion()
1750 elif cmd == "--help":
1751 raise _ShowUsage(exit_error=False)
1752 elif not (cmd in commands or cmd in aliases):
1753 raise _ShowUsage(exit_error=True)
1755 # get command, unalias it, and look it up in commands
1757 if aliases[cmd] not in commands:
1758 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1759 " command '%s'" % (cmd, aliases[cmd]))
1763 if cmd in env_override:
1764 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1765 env_args = os.environ.get(args_env_name)
1767 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1769 func, args_def, parser_opts, usage, description = commands[cmd]
1770 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1771 description=description,
1772 formatter=TitledHelpFormatter(),
1773 usage="%%prog %s %s" % (cmd, usage))
1774 parser.disable_interspersed_args()
1775 options, args = parser.parse_args(args=argv[2:])
1777 if not _CheckArguments(cmd, args_def, args):
1778 return None, None, None
1780 return func, options, args
1783 def _FormatUsage(binary, commands):
1784 """Generates a nice description of all commands.
1786 @param binary: Script name
1787 @param commands: Dictionary containing command definitions
1790 # compute the max line length for cmd + usage
1791 mlen = min(60, max(map(len, commands)))
1793 yield "Usage: %s {command} [options...] [argument...]" % binary
1794 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1798 # and format a nice command list
1799 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1800 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1801 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1802 for line in help_lines:
1803 yield " %-*s %s" % (mlen, "", line)
1808 def _CheckArguments(cmd, args_def, args):
1809 """Verifies the arguments using the argument definition.
1813 1. Abort with error if values specified by user but none expected.
1815 1. For each argument in definition
1817 1. Keep running count of minimum number of values (min_count)
1818 1. Keep running count of maximum number of values (max_count)
1819 1. If it has an unlimited number of values
1821 1. Abort with error if it's not the last argument in the definition
1823 1. If last argument has limited number of values
1825 1. Abort with error if number of values doesn't match or is too large
1827 1. Abort with error if user didn't pass enough values (min_count)
1830 if args and not args_def:
1831 ToStderr("Error: Command %s expects no arguments", cmd)
1838 last_idx = len(args_def) - 1
1840 for idx, arg in enumerate(args_def):
1841 if min_count is None:
1843 elif arg.min is not None:
1844 min_count += arg.min
1846 if max_count is None:
1848 elif arg.max is not None:
1849 max_count += arg.max
1852 check_max = (arg.max is not None)
1854 elif arg.max is None:
1855 raise errors.ProgrammerError("Only the last argument can have max=None")
1858 # Command with exact number of arguments
1859 if (min_count is not None and max_count is not None and
1860 min_count == max_count and len(args) != min_count):
1861 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1864 # Command with limited number of arguments
1865 if max_count is not None and len(args) > max_count:
1866 ToStderr("Error: Command %s expects only %d argument(s)",
1870 # Command with some required arguments
1871 if min_count is not None and len(args) < min_count:
1872 ToStderr("Error: Command %s expects at least %d argument(s)",
1879 def SplitNodeOption(value):
1880 """Splits the value of a --node option.
1883 if value and ":" in value:
1884 return value.split(":", 1)
1886 return (value, None)
1889 def CalculateOSNames(os_name, os_variants):
1890 """Calculates all the names an OS can be called, according to its variants.
1892 @type os_name: string
1893 @param os_name: base name of the os
1894 @type os_variants: list or None
1895 @param os_variants: list of supported variants
1897 @return: list of valid names
1901 return ["%s+%s" % (os_name, v) for v in os_variants]
1906 def ParseFields(selected, default):
1907 """Parses the values of "--field"-like options.
1909 @type selected: string or None
1910 @param selected: User-selected options
1912 @param default: Default fields
1915 if selected is None:
1918 if selected.startswith("+"):
1919 return default + selected[1:].split(",")
1921 return selected.split(",")
1924 UsesRPC = rpc.RunWithRPC
1927 def AskUser(text, choices=None):
1928 """Ask the user a question.
1930 @param text: the question to ask
1932 @param choices: list with elements tuples (input_char, return_value,
1933 description); if not given, it will default to: [('y', True,
1934 'Perform the operation'), ('n', False, 'Do no do the operation')];
1935 note that the '?' char is reserved for help
1937 @return: one of the return values from the choices list; if input is
1938 not possible (i.e. not running with a tty, we return the last
1943 choices = [("y", True, "Perform the operation"),
1944 ("n", False, "Do not perform the operation")]
1945 if not choices or not isinstance(choices, list):
1946 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1947 for entry in choices:
1948 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1949 raise errors.ProgrammerError("Invalid choices element to AskUser")
1951 answer = choices[-1][1]
1953 for line in text.splitlines():
1954 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1955 text = "\n".join(new_text)
1957 f = file("/dev/tty", "a+")
1961 chars = [entry[0] for entry in choices]
1962 chars[-1] = "[%s]" % chars[-1]
1964 maps = dict([(entry[0], entry[1]) for entry in choices])
1968 f.write("/".join(chars))
1970 line = f.readline(2).strip().lower()
1975 for entry in choices:
1976 f.write(" %s - %s\n" % (entry[0], entry[2]))
1984 class JobSubmittedException(Exception):
1985 """Job was submitted, client should exit.
1987 This exception has one argument, the ID of the job that was
1988 submitted. The handler should print this ID.
1990 This is not an error, just a structured way to exit from clients.
1995 def SendJob(ops, cl=None):
1996 """Function to submit an opcode without waiting for the results.
1999 @param ops: list of opcodes
2000 @type cl: luxi.Client
2001 @param cl: the luxi client to use for communicating with the master;
2002 if None, a new client will be created
2008 job_id = cl.SubmitJob(ops)
2013 def GenericPollJob(job_id, cbs, report_cbs):
2014 """Generic job-polling function.
2016 @type job_id: number
2017 @param job_id: Job ID
2018 @type cbs: Instance of L{JobPollCbBase}
2019 @param cbs: Data callbacks
2020 @type report_cbs: Instance of L{JobPollReportCbBase}
2021 @param report_cbs: Reporting callbacks
2024 prev_job_info = None
2025 prev_logmsg_serial = None
2030 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2033 # job not found, go away!
2034 raise errors.JobLost("Job with id %s lost" % job_id)
2036 if result == constants.JOB_NOTCHANGED:
2037 report_cbs.ReportNotChanged(job_id, status)
2042 # Split result, a tuple of (field values, log entries)
2043 (job_info, log_entries) = result
2044 (status, ) = job_info
2047 for log_entry in log_entries:
2048 (serial, timestamp, log_type, message) = log_entry
2049 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2051 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2053 # TODO: Handle canceled and archived jobs
2054 elif status in (constants.JOB_STATUS_SUCCESS,
2055 constants.JOB_STATUS_ERROR,
2056 constants.JOB_STATUS_CANCELING,
2057 constants.JOB_STATUS_CANCELED):
2060 prev_job_info = job_info
2062 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2064 raise errors.JobLost("Job with id %s lost" % job_id)
2066 status, opstatus, result = jobs[0]
2068 if status == constants.JOB_STATUS_SUCCESS:
2071 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2072 raise errors.OpExecError("Job was canceled")
2075 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2076 if status == constants.OP_STATUS_SUCCESS:
2078 elif status == constants.OP_STATUS_ERROR:
2079 errors.MaybeRaise(msg)
2082 raise errors.OpExecError("partial failure (opcode %d): %s" %
2085 raise errors.OpExecError(str(msg))
2087 # default failure mode
2088 raise errors.OpExecError(result)
2091 class JobPollCbBase:
2092 """Base class for L{GenericPollJob} callbacks.
2096 """Initializes this class.
2100 def WaitForJobChangeOnce(self, job_id, fields,
2101 prev_job_info, prev_log_serial):
2102 """Waits for changes on a job.
2105 raise NotImplementedError()
2107 def QueryJobs(self, job_ids, fields):
2108 """Returns the selected fields for the selected job IDs.
2110 @type job_ids: list of numbers
2111 @param job_ids: Job IDs
2112 @type fields: list of strings
2113 @param fields: Fields
2116 raise NotImplementedError()
2119 class JobPollReportCbBase:
2120 """Base class for L{GenericPollJob} reporting callbacks.
2124 """Initializes this class.
2128 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2129 """Handles a log message.
2132 raise NotImplementedError()
2134 def ReportNotChanged(self, job_id, status):
2135 """Called for if a job hasn't changed in a while.
2137 @type job_id: number
2138 @param job_id: Job ID
2139 @type status: string or None
2140 @param status: Job status if available
2143 raise NotImplementedError()
2146 class _LuxiJobPollCb(JobPollCbBase):
2147 def __init__(self, cl):
2148 """Initializes this class.
2151 JobPollCbBase.__init__(self)
2154 def WaitForJobChangeOnce(self, job_id, fields,
2155 prev_job_info, prev_log_serial):
2156 """Waits for changes on a job.
2159 return self.cl.WaitForJobChangeOnce(job_id, fields,
2160 prev_job_info, prev_log_serial)
2162 def QueryJobs(self, job_ids, fields):
2163 """Returns the selected fields for the selected job IDs.
2166 return self.cl.QueryJobs(job_ids, fields)
2169 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2170 def __init__(self, feedback_fn):
2171 """Initializes this class.
2174 JobPollReportCbBase.__init__(self)
2176 self.feedback_fn = feedback_fn
2178 assert callable(feedback_fn)
2180 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181 """Handles a log message.
2184 self.feedback_fn((timestamp, log_type, log_msg))
2186 def ReportNotChanged(self, job_id, status):
2187 """Called if a job hasn't changed in a while.
2193 class StdioJobPollReportCb(JobPollReportCbBase):
2195 """Initializes this class.
2198 JobPollReportCbBase.__init__(self)
2200 self.notified_queued = False
2201 self.notified_waitlock = False
2203 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2204 """Handles a log message.
2207 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2208 FormatLogMessage(log_type, log_msg))
2210 def ReportNotChanged(self, job_id, status):
2211 """Called if a job hasn't changed in a while.
2217 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2218 ToStderr("Job %s is waiting in queue", job_id)
2219 self.notified_queued = True
2221 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2222 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2223 self.notified_waitlock = True
2226 def FormatLogMessage(log_type, log_msg):
2227 """Formats a job message according to its type.
2230 if log_type != constants.ELOG_MESSAGE:
2231 log_msg = str(log_msg)
2233 return utils.SafeEncode(log_msg)
2236 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2237 """Function to poll for the result of a job.
2239 @type job_id: job identified
2240 @param job_id: the job to poll for results
2241 @type cl: luxi.Client
2242 @param cl: the luxi client to use for communicating with the master;
2243 if None, a new client will be created
2249 if reporter is None:
2251 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2253 reporter = StdioJobPollReportCb()
2255 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2257 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2260 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2261 """Legacy function to submit an opcode.
2263 This is just a simple wrapper over the construction of the processor
2264 instance. It should be extended to better handle feedback and
2265 interaction functions.
2271 SetGenericOpcodeOpts([op], opts)
2273 job_id = SendJob([op], cl=cl)
2274 if hasattr(opts, "print_jobid") and opts.print_jobid:
2275 ToStdout("%d" % job_id)
2277 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2280 return op_results[0]
2283 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2284 """Wrapper around SubmitOpCode or SendJob.
2286 This function will decide, based on the 'opts' parameter, whether to
2287 submit and wait for the result of the opcode (and return it), or
2288 whether to just send the job and print its identifier. It is used in
2289 order to simplify the implementation of the '--submit' option.
2291 It will also process the opcodes if we're sending the via SendJob
2292 (otherwise SubmitOpCode does it).
2295 if opts and opts.submit_only:
2297 SetGenericOpcodeOpts(job, opts)
2298 job_id = SendJob(job, cl=cl)
2299 if opts.print_jobid:
2300 ToStdout("%d" % job_id)
2301 raise JobSubmittedException(job_id)
2303 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2306 def _InitReasonTrail(op, opts):
2307 """Builds the first part of the reason trail
2309 Builds the initial part of the reason trail, adding the user provided reason
2310 (if it exists) and the name of the command starting the operation.
2312 @param op: the opcode the reason trail will be added to
2313 @param opts: the command line options selected by the user
2316 assert len(sys.argv) >= 2
2320 trail.append((constants.OPCODE_REASON_SRC_USER,
2324 binary = os.path.basename(sys.argv[0])
2325 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2326 command = sys.argv[1]
2327 trail.append((source, command, utils.EpochNano()))
2331 def SetGenericOpcodeOpts(opcode_list, options):
2332 """Processor for generic options.
2334 This function updates the given opcodes based on generic command
2335 line options (like debug, dry-run, etc.).
2337 @param opcode_list: list of opcodes
2338 @param options: command line options or None
2339 @return: None (in-place modification)
2344 for op in opcode_list:
2345 op.debug_level = options.debug
2346 if hasattr(options, "dry_run"):
2347 op.dry_run = options.dry_run
2348 if getattr(options, "priority", None) is not None:
2349 op.priority = options.priority
2350 _InitReasonTrail(op, options)
2353 def GetClient(query=False):
2354 """Connects to the a luxi socket and returns a client.
2356 @type query: boolean
2357 @param query: this signifies that the client will only be
2358 used for queries; if the build-time parameter
2359 enable-split-queries is enabled, then the client will be
2360 connected to the query socket instead of the masterd socket
2363 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2365 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2366 address = pathutils.MASTER_SOCKET
2367 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2368 address = pathutils.QUERY_SOCKET
2370 address = override_socket
2371 elif query and constants.ENABLE_SPLIT_QUERY:
2372 address = pathutils.QUERY_SOCKET
2375 # TODO: Cache object?
2377 client = luxi.Client(address=address)
2378 except luxi.NoMasterError:
2379 ss = ssconf.SimpleStore()
2381 # Try to read ssconf file
2384 except errors.ConfigurationError:
2385 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2386 " not part of a cluster",
2389 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2390 if master != myself:
2391 raise errors.OpPrereqError("This is not the master node, please connect"
2392 " to node '%s' and rerun the command" %
2393 master, errors.ECODE_INVAL)
2398 def FormatError(err):
2399 """Return a formatted error message for a given error.
2401 This function takes an exception instance and returns a tuple
2402 consisting of two values: first, the recommended exit code, and
2403 second, a string describing the error message (not
2404 newline-terminated).
2410 if isinstance(err, errors.ConfigurationError):
2411 txt = "Corrupt configuration file: %s" % msg
2413 obuf.write(txt + "\n")
2414 obuf.write("Aborting.")
2416 elif isinstance(err, errors.HooksAbort):
2417 obuf.write("Failure: hooks execution failed:\n")
2418 for node, script, out in err.args[0]:
2420 obuf.write(" node: %s, script: %s, output: %s\n" %
2421 (node, script, out))
2423 obuf.write(" node: %s, script: %s (no output)\n" %
2425 elif isinstance(err, errors.HooksFailure):
2426 obuf.write("Failure: hooks general failure: %s" % msg)
2427 elif isinstance(err, errors.ResolverError):
2428 this_host = netutils.Hostname.GetSysName()
2429 if err.args[0] == this_host:
2430 msg = "Failure: can't resolve my own hostname ('%s')"
2432 msg = "Failure: can't resolve hostname '%s'"
2433 obuf.write(msg % err.args[0])
2434 elif isinstance(err, errors.OpPrereqError):
2435 if len(err.args) == 2:
2436 obuf.write("Failure: prerequisites not met for this"
2437 " operation:\nerror type: %s, error details:\n%s" %
2438 (err.args[1], err.args[0]))
2440 obuf.write("Failure: prerequisites not met for this"
2441 " operation:\n%s" % msg)
2442 elif isinstance(err, errors.OpExecError):
2443 obuf.write("Failure: command execution error:\n%s" % msg)
2444 elif isinstance(err, errors.TagError):
2445 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2446 elif isinstance(err, errors.JobQueueDrainError):
2447 obuf.write("Failure: the job queue is marked for drain and doesn't"
2448 " accept new requests\n")
2449 elif isinstance(err, errors.JobQueueFull):
2450 obuf.write("Failure: the job queue is full and doesn't accept new"
2451 " job submissions until old jobs are archived\n")
2452 elif isinstance(err, errors.TypeEnforcementError):
2453 obuf.write("Parameter Error: %s" % msg)
2454 elif isinstance(err, errors.ParameterError):
2455 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2456 elif isinstance(err, luxi.NoMasterError):
2457 if err.args[0] == pathutils.MASTER_SOCKET:
2458 daemon = "the master daemon"
2459 elif err.args[0] == pathutils.QUERY_SOCKET:
2460 daemon = "the config daemon"
2462 daemon = "socket '%s'" % str(err.args[0])
2463 obuf.write("Cannot communicate with %s.\nIs the process running"
2464 " and listening for connections?" % daemon)
2465 elif isinstance(err, luxi.TimeoutError):
2466 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2467 " been submitted and will continue to run even if the call"
2468 " timed out. Useful commands in this situation are \"gnt-job"
2469 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2471 elif isinstance(err, luxi.PermissionError):
2472 obuf.write("It seems you don't have permissions to connect to the"
2473 " master daemon.\nPlease retry as a different user.")
2474 elif isinstance(err, luxi.ProtocolError):
2475 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2477 elif isinstance(err, errors.JobLost):
2478 obuf.write("Error checking job status: %s" % msg)
2479 elif isinstance(err, errors.QueryFilterParseError):
2480 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2481 obuf.write("\n".join(err.GetDetails()))
2482 elif isinstance(err, errors.GenericError):
2483 obuf.write("Unhandled Ganeti error: %s" % msg)
2484 elif isinstance(err, JobSubmittedException):
2485 obuf.write("JobID: %s\n" % err.args[0])
2488 obuf.write("Unhandled exception: %s" % msg)
2489 return retcode, obuf.getvalue().rstrip("\n")
2492 def GenericMain(commands, override=None, aliases=None,
2493 env_override=frozenset()):
2494 """Generic main function for all the gnt-* commands.
2496 @param commands: a dictionary with a special structure, see the design doc
2497 for command line handling.
2498 @param override: if not None, we expect a dictionary with keys that will
2499 override command line options; this can be used to pass
2500 options from the scripts to generic functions
2501 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2502 @param env_override: list of environment names which are allowed to submit
2503 default args for commands
2506 # save the program name and the entire command line for later logging
2508 binary = os.path.basename(sys.argv[0])
2510 binary = sys.argv[0]
2512 if len(sys.argv) >= 2:
2513 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2517 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2519 binary = "<unknown program>"
2520 cmdline = "<unknown>"
2526 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2528 except _ShowVersion:
2529 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2530 constants.RELEASE_VERSION)
2531 return constants.EXIT_SUCCESS
2532 except _ShowUsage, err:
2533 for line in _FormatUsage(binary, commands):
2537 return constants.EXIT_FAILURE
2539 return constants.EXIT_SUCCESS
2540 except errors.ParameterError, err:
2541 result, err_msg = FormatError(err)
2545 if func is None: # parse error
2548 if override is not None:
2549 for key, val in override.iteritems():
2550 setattr(options, key, val)
2552 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2553 stderr_logging=True)
2555 logging.info("Command line: %s", cmdline)
2558 result = func(options, args)
2559 except (errors.GenericError, luxi.ProtocolError,
2560 JobSubmittedException), err:
2561 result, err_msg = FormatError(err)
2562 logging.exception("Error during command processing")
2564 except KeyboardInterrupt:
2565 result = constants.EXIT_FAILURE
2566 ToStderr("Aborted. Note that if the operation created any jobs, they"
2567 " might have been submitted and"
2568 " will continue to run in the background.")
2569 except IOError, err:
2570 if err.errno == errno.EPIPE:
2571 # our terminal went away, we'll exit
2572 sys.exit(constants.EXIT_FAILURE)
2579 def ParseNicOption(optvalue):
2580 """Parses the value of the --net option(s).
2584 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2585 except (TypeError, ValueError), err:
2586 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2589 nics = [{}] * nic_max
2590 for nidx, ndict in optvalue:
2593 if not isinstance(ndict, dict):
2594 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2595 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2597 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2604 def FixHvParams(hvparams):
2605 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2606 # comma to space because commas cannot be accepted on the command line
2607 # (they already act as the separator between different hvparams). Still,
2608 # RAPI should be able to accept commas for backwards compatibility.
2609 # Therefore, we convert spaces into commas here, and we keep the old
2610 # parsing logic everywhere else.
2612 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2613 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2615 #No usb_devices, no modification required
2619 def GenericInstanceCreate(mode, opts, args):
2620 """Add an instance to the cluster via either creation or import.
2622 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2623 @param opts: the command line options selected by the user
2625 @param args: should contain only one element, the new instance name
2627 @return: the desired exit code
2632 (pnode, snode) = SplitNodeOption(opts.node)
2637 hypervisor, hvparams = opts.hypervisor
2640 nics = ParseNicOption(opts.nics)
2644 elif mode == constants.INSTANCE_CREATE:
2645 # default of one nic, all auto
2651 if opts.disk_template == constants.DT_DISKLESS:
2652 if opts.disks or opts.sd_size is not None:
2653 raise errors.OpPrereqError("Diskless instance but disk"
2654 " information passed", errors.ECODE_INVAL)
2657 if (not opts.disks and not opts.sd_size
2658 and mode == constants.INSTANCE_CREATE):
2659 raise errors.OpPrereqError("No disk information specified",
2661 if opts.disks and opts.sd_size is not None:
2662 raise errors.OpPrereqError("Please use either the '--disk' or"
2663 " '-s' option", errors.ECODE_INVAL)
2664 if opts.sd_size is not None:
2665 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2669 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2670 except ValueError, err:
2671 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2673 disks = [{}] * disk_max
2676 for didx, ddict in opts.disks:
2678 if not isinstance(ddict, dict):
2679 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2680 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2681 elif constants.IDISK_SIZE in ddict:
2682 if constants.IDISK_ADOPT in ddict:
2683 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2684 " (disk %d)" % didx, errors.ECODE_INVAL)
2686 ddict[constants.IDISK_SIZE] = \
2687 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2688 except ValueError, err:
2689 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2690 (didx, err), errors.ECODE_INVAL)
2691 elif constants.IDISK_ADOPT in ddict:
2692 if constants.IDISK_SPINDLES in ddict:
2693 raise errors.OpPrereqError("spindles is not a valid option when"
2694 " adopting a disk", errors.ECODE_INVAL)
2695 if mode == constants.INSTANCE_IMPORT:
2696 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2697 " import", errors.ECODE_INVAL)
2698 ddict[constants.IDISK_SIZE] = 0
2700 raise errors.OpPrereqError("Missing size or adoption source for"
2701 " disk %d" % didx, errors.ECODE_INVAL)
2704 if opts.tags is not None:
2705 tags = opts.tags.split(",")
2709 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2710 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2711 FixHvParams(hvparams)
2713 if mode == constants.INSTANCE_CREATE:
2716 force_variant = opts.force_variant
2719 no_install = opts.no_install
2720 identify_defaults = False
2721 elif mode == constants.INSTANCE_IMPORT:
2724 force_variant = False
2725 src_node = opts.src_node
2726 src_path = opts.src_dir
2728 identify_defaults = opts.identify_defaults
2730 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2732 op = opcodes.OpInstanceCreate(instance_name=instance,
2734 disk_template=opts.disk_template,
2736 conflicts_check=opts.conflicts_check,
2737 pnode=pnode, snode=snode,
2738 ip_check=opts.ip_check,
2739 name_check=opts.name_check,
2740 wait_for_sync=opts.wait_for_sync,
2741 file_storage_dir=opts.file_storage_dir,
2742 file_driver=opts.file_driver,
2743 iallocator=opts.iallocator,
2744 hypervisor=hypervisor,
2746 beparams=opts.beparams,
2747 osparams=opts.osparams,
2751 force_variant=force_variant,
2755 no_install=no_install,
2756 identify_defaults=identify_defaults,
2757 ignore_ipolicy=opts.ignore_ipolicy)
2759 SubmitOrSend(op, opts)
2763 class _RunWhileClusterStoppedHelper:
2764 """Helper class for L{RunWhileClusterStopped} to simplify state management
2767 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2768 """Initializes this class.
2770 @type feedback_fn: callable
2771 @param feedback_fn: Feedback function
2772 @type cluster_name: string
2773 @param cluster_name: Cluster name
2774 @type master_node: string
2775 @param master_node Master node name
2776 @type online_nodes: list
2777 @param online_nodes: List of names of online nodes
2780 self.feedback_fn = feedback_fn
2781 self.cluster_name = cluster_name
2782 self.master_node = master_node
2783 self.online_nodes = online_nodes
2785 self.ssh = ssh.SshRunner(self.cluster_name)
2787 self.nonmaster_nodes = [name for name in online_nodes
2788 if name != master_node]
2790 assert self.master_node not in self.nonmaster_nodes
2792 def _RunCmd(self, node_name, cmd):
2793 """Runs a command on the local or a remote machine.
2795 @type node_name: string
2796 @param node_name: Machine name
2801 if node_name is None or node_name == self.master_node:
2802 # No need to use SSH
2803 result = utils.RunCmd(cmd)
2805 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2806 utils.ShellQuoteArgs(cmd))
2809 errmsg = ["Failed to run command %s" % result.cmd]
2811 errmsg.append("on node %s" % node_name)
2812 errmsg.append(": exitcode %s and error %s" %
2813 (result.exit_code, result.output))
2814 raise errors.OpExecError(" ".join(errmsg))
2816 def Call(self, fn, *args):
2817 """Call function while all daemons are stopped.
2820 @param fn: Function to be called
2823 # Pause watcher by acquiring an exclusive lock on watcher state file
2824 self.feedback_fn("Blocking watcher")
2825 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2827 # TODO: Currently, this just blocks. There's no timeout.
2828 # TODO: Should it be a shared lock?
2829 watcher_block.Exclusive(blocking=True)
2831 # Stop master daemons, so that no new jobs can come in and all running
2833 self.feedback_fn("Stopping master daemons")
2834 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2836 # Stop daemons on all nodes
2837 for node_name in self.online_nodes:
2838 self.feedback_fn("Stopping daemons on %s" % node_name)
2839 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2841 # All daemons are shut down now
2843 return fn(self, *args)
2844 except Exception, err:
2845 _, errmsg = FormatError(err)
2846 logging.exception("Caught exception")
2847 self.feedback_fn(errmsg)
2850 # Start cluster again, master node last
2851 for node_name in self.nonmaster_nodes + [self.master_node]:
2852 self.feedback_fn("Starting daemons on %s" % node_name)
2853 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2856 watcher_block.Close()
2859 def RunWhileClusterStopped(feedback_fn, fn, *args):
2860 """Calls a function while all cluster daemons are stopped.
2862 @type feedback_fn: callable
2863 @param feedback_fn: Feedback function
2865 @param fn: Function to be called when daemons are stopped
2868 feedback_fn("Gathering cluster information")
2870 # This ensures we're running on the master daemon
2873 (cluster_name, master_node) = \
2874 cl.QueryConfigValues(["cluster_name", "master_node"])
2876 online_nodes = GetOnlineNodes([], cl=cl)
2878 # Don't keep a reference to the client. The master daemon will go away.
2881 assert master_node in online_nodes
2883 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2884 online_nodes).Call(fn, *args)
2887 def GenerateTable(headers, fields, separator, data,
2888 numfields=None, unitfields=None,
2890 """Prints a table with headers and different fields.
2893 @param headers: dictionary mapping field names to headers for
2896 @param fields: the field names corresponding to each row in
2898 @param separator: the separator to be used; if this is None,
2899 the default 'smart' algorithm is used which computes optimal
2900 field width, otherwise just the separator is used between
2903 @param data: a list of lists, each sublist being one row to be output
2904 @type numfields: list
2905 @param numfields: a list with the fields that hold numeric
2906 values and thus should be right-aligned
2907 @type unitfields: list
2908 @param unitfields: a list with the fields that hold numeric
2909 values that should be formatted with the units field
2910 @type units: string or None
2911 @param units: the units we should use for formatting, or None for
2912 automatic choice (human-readable for non-separator usage, otherwise
2913 megabytes); this is a one-letter string
2922 if numfields is None:
2924 if unitfields is None:
2927 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2928 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2931 for field in fields:
2932 if headers and field not in headers:
2933 # TODO: handle better unknown fields (either revert to old
2934 # style of raising exception, or deal more intelligently with
2936 headers[field] = field
2937 if separator is not None:
2938 format_fields.append("%s")
2939 elif numfields.Matches(field):
2940 format_fields.append("%*s")
2942 format_fields.append("%-*s")
2944 if separator is None:
2945 mlens = [0 for name in fields]
2946 format_str = " ".join(format_fields)
2948 format_str = separator.replace("%", "%%").join(format_fields)
2953 for idx, val in enumerate(row):
2954 if unitfields.Matches(fields[idx]):
2957 except (TypeError, ValueError):
2960 val = row[idx] = utils.FormatUnit(val, units)
2961 val = row[idx] = str(val)
2962 if separator is None:
2963 mlens[idx] = max(mlens[idx], len(val))
2968 for idx, name in enumerate(fields):
2970 if separator is None:
2971 mlens[idx] = max(mlens[idx], len(hdr))
2972 args.append(mlens[idx])
2974 result.append(format_str % tuple(args))
2976 if separator is None:
2977 assert len(mlens) == len(fields)
2979 if fields and not numfields.Matches(fields[-1]):
2985 line = ["-" for _ in fields]
2986 for idx in range(len(fields)):
2987 if separator is None:
2988 args.append(mlens[idx])
2989 args.append(line[idx])
2990 result.append(format_str % tuple(args))
2995 def _FormatBool(value):
2996 """Formats a boolean value as a string.
3004 #: Default formatting for query results; (callback, align right)
3005 _DEFAULT_FORMAT_QUERY = {
3006 constants.QFT_TEXT: (str, False),
3007 constants.QFT_BOOL: (_FormatBool, False),
3008 constants.QFT_NUMBER: (str, True),
3009 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3010 constants.QFT_OTHER: (str, False),
3011 constants.QFT_UNKNOWN: (str, False),
3015 def _GetColumnFormatter(fdef, override, unit):
3016 """Returns formatting function for a field.
3018 @type fdef: L{objects.QueryFieldDefinition}
3019 @type override: dict
3020 @param override: Dictionary for overriding field formatting functions,
3021 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3023 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3024 @rtype: tuple; (callable, bool)
3025 @return: Returns the function to format a value (takes one parameter) and a
3026 boolean for aligning the value on the right-hand side
3029 fmt = override.get(fdef.name, None)
3033 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3035 if fdef.kind == constants.QFT_UNIT:
3036 # Can't keep this information in the static dictionary
3037 return (lambda value: utils.FormatUnit(value, unit), True)
3039 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3043 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3046 class _QueryColumnFormatter:
3047 """Callable class for formatting fields of a query.
3050 def __init__(self, fn, status_fn, verbose):
3051 """Initializes this class.
3054 @param fn: Formatting function
3055 @type status_fn: callable
3056 @param status_fn: Function to report fields' status
3057 @type verbose: boolean
3058 @param verbose: whether to use verbose field descriptions or not
3062 self._status_fn = status_fn
3063 self._verbose = verbose
3065 def __call__(self, data):
3066 """Returns a field's string representation.
3069 (status, value) = data
3072 self._status_fn(status)
3074 if status == constants.RS_NORMAL:
3075 return self._fn(value)
3077 assert value is None, \
3078 "Found value %r for abnormal status %s" % (value, status)
3080 return FormatResultError(status, self._verbose)
3083 def FormatResultError(status, verbose):
3084 """Formats result status other than L{constants.RS_NORMAL}.
3086 @param status: The result status
3087 @type verbose: boolean
3088 @param verbose: Whether to return the verbose text
3089 @return: Text of result status
3092 assert status != constants.RS_NORMAL, \
3093 "FormatResultError called with status equal to constants.RS_NORMAL"
3095 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3097 raise NotImplementedError("Unknown status %s" % status)
3104 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3105 header=False, verbose=False):
3106 """Formats data in L{objects.QueryResponse}.
3108 @type result: L{objects.QueryResponse}
3109 @param result: result of query operation
3111 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3112 see L{utils.text.FormatUnit}
3113 @type format_override: dict
3114 @param format_override: Dictionary for overriding field formatting functions,
3115 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3116 @type separator: string or None
3117 @param separator: String used to separate fields
3119 @param header: Whether to output header row
3120 @type verbose: boolean
3121 @param verbose: whether to use verbose field descriptions or not
3130 if format_override is None:
3131 format_override = {}
3133 stats = dict.fromkeys(constants.RS_ALL, 0)
3135 def _RecordStatus(status):
3140 for fdef in result.fields:
3141 assert fdef.title and fdef.name
3142 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3143 columns.append(TableColumn(fdef.title,
3144 _QueryColumnFormatter(fn, _RecordStatus,
3148 table = FormatTable(result.data, columns, header, separator)
3150 # Collect statistics
3151 assert len(stats) == len(constants.RS_ALL)
3152 assert compat.all(count >= 0 for count in stats.values())
3154 # Determine overall status. If there was no data, unknown fields must be
3155 # detected via the field definitions.
3156 if (stats[constants.RS_UNKNOWN] or
3157 (not result.data and _GetUnknownFields(result.fields))):
3159 elif compat.any(count > 0 for key, count in stats.items()
3160 if key != constants.RS_NORMAL):
3161 status = QR_INCOMPLETE
3165 return (status, table)
3168 def _GetUnknownFields(fdefs):
3169 """Returns list of unknown fields included in C{fdefs}.
3171 @type fdefs: list of L{objects.QueryFieldDefinition}
3174 return [fdef for fdef in fdefs
3175 if fdef.kind == constants.QFT_UNKNOWN]
3178 def _WarnUnknownFields(fdefs):
3179 """Prints a warning to stderr if a query included unknown fields.
3181 @type fdefs: list of L{objects.QueryFieldDefinition}
3184 unknown = _GetUnknownFields(fdefs)
3186 ToStderr("Warning: Queried for unknown fields %s",
3187 utils.CommaJoin(fdef.name for fdef in unknown))
3193 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3194 format_override=None, verbose=False, force_filter=False,
3195 namefield=None, qfilter=None, isnumeric=False):
3196 """Generic implementation for listing all items of a resource.
3198 @param resource: One of L{constants.QR_VIA_LUXI}
3199 @type fields: list of strings
3200 @param fields: List of fields to query for
3201 @type names: list of strings
3202 @param names: Names of items to query for
3203 @type unit: string or None
3204 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3205 None for automatic choice (human-readable for non-separator usage,
3206 otherwise megabytes); this is a one-letter string
3207 @type separator: string or None
3208 @param separator: String used to separate fields
3210 @param header: Whether to show header row
3211 @type force_filter: bool
3212 @param force_filter: Whether to always treat names as filter
3213 @type format_override: dict
3214 @param format_override: Dictionary for overriding field formatting functions,
3215 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3216 @type verbose: boolean
3217 @param verbose: whether to use verbose field descriptions or not
3218 @type namefield: string
3219 @param namefield: Name of field to use for simple filters (see
3220 L{qlang.MakeFilter} for details)
3221 @type qfilter: list or None
3222 @param qfilter: Query filter (in addition to names)
3223 @param isnumeric: bool
3224 @param isnumeric: Whether the namefield's type is numeric, and therefore
3225 any simple filters built by namefield should use integer values to
3232 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3233 isnumeric=isnumeric)
3236 qfilter = namefilter
3237 elif namefilter is not None:
3238 qfilter = [qlang.OP_AND, namefilter, qfilter]
3243 response = cl.Query(resource, fields, qfilter)
3245 found_unknown = _WarnUnknownFields(response.fields)
3247 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3249 format_override=format_override,
3255 assert ((found_unknown and status == QR_UNKNOWN) or
3256 (not found_unknown and status != QR_UNKNOWN))
3258 if status == QR_UNKNOWN:
3259 return constants.EXIT_UNKNOWN_FIELD
3261 # TODO: Should the list command fail if not all data could be collected?
3262 return constants.EXIT_SUCCESS
3265 def _FieldDescValues(fdef):
3266 """Helper function for L{GenericListFields} to get query field description.
3268 @type fdef: L{objects.QueryFieldDefinition}
3274 _QFT_NAMES.get(fdef.kind, fdef.kind),
3280 def GenericListFields(resource, fields, separator, header, cl=None):
3281 """Generic implementation for listing fields for a resource.
3283 @param resource: One of L{constants.QR_VIA_LUXI}
3284 @type fields: list of strings
3285 @param fields: List of fields to query for
3286 @type separator: string or None
3287 @param separator: String used to separate fields
3289 @param header: Whether to show header row
3298 response = cl.QueryFields(resource, fields)
3300 found_unknown = _WarnUnknownFields(response.fields)
3303 TableColumn("Name", str, False),
3304 TableColumn("Type", str, False),
3305 TableColumn("Title", str, False),
3306 TableColumn("Description", str, False),
3309 rows = map(_FieldDescValues, response.fields)
3311 for line in FormatTable(rows, columns, header, separator):
3315 return constants.EXIT_UNKNOWN_FIELD
3317 return constants.EXIT_SUCCESS
3321 """Describes a column for L{FormatTable}.
3324 def __init__(self, title, fn, align_right):
3325 """Initializes this class.
3328 @param title: Column title
3330 @param fn: Formatting function
3331 @type align_right: bool
3332 @param align_right: Whether to align values on the right-hand side
3337 self.align_right = align_right
3340 def _GetColFormatString(width, align_right):
3341 """Returns the format string for a field.
3349 return "%%%s%ss" % (sign, width)
3352 def FormatTable(rows, columns, header, separator):
3353 """Formats data as a table.
3355 @type rows: list of lists
3356 @param rows: Row data, one list per row
3357 @type columns: list of L{TableColumn}
3358 @param columns: Column descriptions
3360 @param header: Whether to show header row
3361 @type separator: string or None
3362 @param separator: String used to separate columns
3366 data = [[col.title for col in columns]]
3367 colwidth = [len(col.title) for col in columns]
3370 colwidth = [0 for _ in columns]
3374 assert len(row) == len(columns)
3376 formatted = [col.format(value) for value, col in zip(row, columns)]
3378 if separator is None:
3379 # Update column widths
3380 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3381 # Modifying a list's items while iterating is fine
3382 colwidth[idx] = max(oldwidth, len(value))
3384 data.append(formatted)
3386 if separator is not None:
3387 # Return early if a separator is used
3388 return [separator.join(row) for row in data]
3390 if columns and not columns[-1].align_right:
3391 # Avoid unnecessary spaces at end of line
3394 # Build format string
3395 fmt = " ".join([_GetColFormatString(width, col.align_right)
3396 for col, width in zip(columns, colwidth)])
3398 return [fmt % tuple(row) for row in data]
3401 def FormatTimestamp(ts):
3402 """Formats a given timestamp.
3405 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3408 @return: a string with the formatted timestamp
3411 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3415 return utils.FormatTime(sec, usecs=usecs)
3418 def ParseTimespec(value):
3419 """Parse a time specification.
3421 The following suffixed will be recognized:
3429 Without any suffix, the value will be taken to be in seconds.
3434 raise errors.OpPrereqError("Empty time specification passed",
3443 if value[-1] not in suffix_map:
3446 except (TypeError, ValueError):
3447 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3450 multiplier = suffix_map[value[-1]]
3452 if not value: # no data left after stripping the suffix
3453 raise errors.OpPrereqError("Invalid time specification (only"
3454 " suffix passed)", errors.ECODE_INVAL)
3456 value = int(value) * multiplier
3457 except (TypeError, ValueError):
3458 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3463 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3464 filter_master=False, nodegroup=None):
3465 """Returns the names of online nodes.
3467 This function will also log a warning on stderr with the names of
3470 @param nodes: if not empty, use only this subset of nodes (minus the
3472 @param cl: if not None, luxi client to use
3473 @type nowarn: boolean
3474 @param nowarn: by default, this function will output a note with the
3475 offline nodes that are skipped; if this parameter is True the
3476 note is not displayed
3477 @type secondary_ips: boolean
3478 @param secondary_ips: if True, return the secondary IPs instead of the
3479 names, useful for doing network traffic over the replication interface
3481 @type filter_master: boolean
3482 @param filter_master: if True, do not return the master node in the list
3483 (useful in coordination with secondary_ips where we cannot check our
3484 node name against the list)
3485 @type nodegroup: string
3486 @param nodegroup: If set, only return nodes in this node group
3495 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3497 if nodegroup is not None:
3498 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3499 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3502 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3505 if len(qfilter) > 1:
3506 final_filter = [qlang.OP_AND] + qfilter
3508 assert len(qfilter) == 1
3509 final_filter = qfilter[0]
3513 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3515 def _IsOffline(row):
3516 (_, (_, offline), _) = row
3520 ((_, name), _, _) = row
3524 (_, _, (_, sip)) = row
3527 (offline, online) = compat.partition(result.data, _IsOffline)
3529 if offline and not nowarn:
3530 ToStderr("Note: skipping offline node(s): %s" %
3531 utils.CommaJoin(map(_GetName, offline)))
3538 return map(fn, online)
3541 def _ToStream(stream, txt, *args):
3542 """Write a message to a stream, bypassing the logging system
3544 @type stream: file object
3545 @param stream: the file to which we should write
3547 @param txt: the message
3553 stream.write(txt % args)
3558 except IOError, err:
3559 if err.errno == errno.EPIPE:
3560 # our terminal went away, we'll exit
3561 sys.exit(constants.EXIT_FAILURE)
3566 def ToStdout(txt, *args):
3567 """Write a message to stdout only, bypassing the logging system
3569 This is just a wrapper over _ToStream.
3572 @param txt: the message
3575 _ToStream(sys.stdout, txt, *args)
3578 def ToStderr(txt, *args):
3579 """Write a message to stderr only, bypassing the logging system
3581 This is just a wrapper over _ToStream.
3584 @param txt: the message
3587 _ToStream(sys.stderr, txt, *args)
3590 class JobExecutor(object):
3591 """Class which manages the submission and execution of multiple jobs.
3593 Note that instances of this class should not be reused between
3597 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3602 self.verbose = verbose
3605 self.feedback_fn = feedback_fn
3606 self._counter = itertools.count()
3609 def _IfName(name, fmt):
3610 """Helper function for formatting name.
3618 def QueueJob(self, name, *ops):
3619 """Record a job for later submit.
3622 @param name: a description of the job, will be used in WaitJobSet
3625 SetGenericOpcodeOpts(ops, self.opts)
3626 self.queue.append((self._counter.next(), name, ops))
3628 def AddJobId(self, name, status, job_id):
3629 """Adds a job ID to the internal queue.
3632 self.jobs.append((self._counter.next(), status, job_id, name))
3634 def SubmitPending(self, each=False):
3635 """Submit all pending jobs.
3640 for (_, _, ops) in self.queue:
3641 # SubmitJob will remove the success status, but raise an exception if
3642 # the submission fails, so we'll notice that anyway.
3643 results.append([True, self.cl.SubmitJob(ops)[0]])
3645 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3646 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3647 self.jobs.append((idx, status, data, name))
3649 def _ChooseJob(self):
3650 """Choose a non-waiting/queued job to poll next.
3653 assert self.jobs, "_ChooseJob called with empty job list"
3655 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3659 for job_data, status in zip(self.jobs, result):
3660 if (isinstance(status, list) and status and
3661 status[0] in (constants.JOB_STATUS_QUEUED,
3662 constants.JOB_STATUS_WAITING,
3663 constants.JOB_STATUS_CANCELING)):
3664 # job is still present and waiting
3666 # good candidate found (either running job or lost job)
3667 self.jobs.remove(job_data)
3671 return self.jobs.pop(0)
3673 def GetResults(self):
3674 """Wait for and return the results of all jobs.
3677 @return: list of tuples (success, job results), in the same order
3678 as the submitted jobs; if a job has failed, instead of the result
3679 there will be the error message
3683 self.SubmitPending()
3686 ok_jobs = [row[2] for row in self.jobs if row[1]]
3688 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3690 # first, remove any non-submitted jobs
3691 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3692 for idx, _, jid, name in failures:
3693 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3694 results.append((idx, False, jid))
3697 (idx, _, jid, name) = self._ChooseJob()
3698 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3700 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3702 except errors.JobLost, err:
3703 _, job_result = FormatError(err)
3704 ToStderr("Job %s%s has been archived, cannot check its result",
3705 jid, self._IfName(name, " for %s"))
3707 except (errors.GenericError, luxi.ProtocolError), err:
3708 _, job_result = FormatError(err)
3710 # the error message will always be shown, verbose or not
3711 ToStderr("Job %s%s has failed: %s",
3712 jid, self._IfName(name, " for %s"), job_result)
3714 results.append((idx, success, job_result))
3716 # sort based on the index, then drop it
3718 results = [i[1:] for i in results]
3722 def WaitOrShow(self, wait):
3723 """Wait for job results or only print the job IDs.
3726 @param wait: whether to wait or not
3730 return self.GetResults()
3733 self.SubmitPending()
3734 for _, status, result, name in self.jobs:
3736 ToStdout("%s: %s", result, name)
3738 ToStderr("Failure for %s: %s", name, result)
3739 return [row[1:3] for row in self.jobs]
3742 def FormatParamsDictInfo(param_dict, actual):
3743 """Formats a parameter dictionary.
3745 @type param_dict: dict
3746 @param param_dict: the own parameters
3748 @param actual: the current parameter set (including defaults)
3750 @return: dictionary where the value of each parameter is either a fully
3751 formatted string or a dictionary containing formatted strings
3755 for (key, data) in actual.items():
3756 if isinstance(data, dict) and data:
3757 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3759 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3763 def _FormatListInfoDefault(data, def_data):
3764 if data is not None:
3765 ret = utils.CommaJoin(data)
3767 ret = "default (%s)" % utils.CommaJoin(def_data)
3771 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3772 """Formats an instance policy.
3774 @type custom_ipolicy: dict
3775 @param custom_ipolicy: own policy
3776 @type eff_ipolicy: dict
3777 @param eff_ipolicy: effective policy (including defaults); ignored for
3779 @type iscluster: bool
3780 @param iscluster: the policy is at cluster level
3781 @rtype: list of pairs
3782 @return: formatted data, suitable for L{PrintGenericInfo}
3786 eff_ipolicy = custom_ipolicy
3789 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3791 for (k, minmax) in enumerate(custom_minmax):
3793 ("%s/%s" % (key, k),
3794 FormatParamsDictInfo(minmax[key], minmax[key]))
3795 for key in constants.ISPECS_MINMAX_KEYS
3798 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3800 ("%s/%s" % (key, k),
3801 FormatParamsDictInfo({}, minmax[key]))
3802 for key in constants.ISPECS_MINMAX_KEYS
3804 ret = [("bounds specs", minmax_out)]
3807 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3809 (constants.ISPECS_STD,
3810 FormatParamsDictInfo(stdspecs, stdspecs))
3814 ("allowed disk templates",
3815 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3816 eff_ipolicy[constants.IPOLICY_DTS]))
3819 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3820 for key in constants.IPOLICY_PARAMETERS
3825 def _PrintSpecsParameters(buf, specs):
3826 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3827 buf.write(",".join(values))
3830 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3831 """Print the command option used to generate the given instance policy.
3833 Currently only the parts dealing with specs are supported.
3836 @param buf: stream to write into
3838 @param ipolicy: instance policy
3840 @param isgroup: whether the policy is at group level
3844 stdspecs = ipolicy.get("std")
3846 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3847 _PrintSpecsParameters(buf, stdspecs)
3848 minmaxes = ipolicy.get("minmax", [])
3850 for minmax in minmaxes:
3851 minspecs = minmax.get("min")
3852 maxspecs = minmax.get("max")
3853 if minspecs and maxspecs:
3855 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3860 _PrintSpecsParameters(buf, minspecs)
3862 _PrintSpecsParameters(buf, maxspecs)
3865 def ConfirmOperation(names, list_type, text, extra=""):
3866 """Ask the user to confirm an operation on a list of list_type.
3868 This function is used to request confirmation for doing an operation
3869 on a given list of list_type.
3872 @param names: the list of names that we display when
3873 we ask for confirmation
3874 @type list_type: str
3875 @param list_type: Human readable name for elements in the list (e.g. nodes)
3877 @param text: the operation that the user should confirm
3879 @return: True or False depending on user's confirmation.
3883 msg = ("The %s will operate on %d %s.\n%s"
3884 "Do you want to continue?" % (text, count, list_type, extra))
3885 affected = (("\nAffected %s:\n" % list_type) +
3886 "\n".join([" %s" % name for name in names]))
3888 choices = [("y", True, "Yes, execute the %s" % text),
3889 ("n", False, "No, abort the %s" % text)]
3892 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3895 question = msg + affected
3897 choice = AskUser(question, choices)
3900 choice = AskUser(msg + affected, choices)
3904 def _MaybeParseUnit(elements):
3905 """Parses and returns an array of potential values with units.
3909 for k, v in elements.items():
3910 if v == constants.VALUE_DEFAULT:
3913 parsed[k] = utils.ParseUnit(v)
3917 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3918 ispecs_disk_count, ispecs_disk_size,
3919 ispecs_nic_count, group_ipolicy, fill_all):
3922 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3923 if ispecs_disk_size:
3924 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3925 except (TypeError, ValueError, errors.UnitParseError), err:
3926 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3928 (ispecs_disk_size, ispecs_mem_size, err),
3931 # prepare ipolicy dict
3932 ispecs_transposed = {
3933 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3934 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3935 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3936 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3937 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3940 # first, check that the values given are correct
3942 forced_type = TISPECS_GROUP_TYPES
3944 forced_type = TISPECS_CLUSTER_TYPES
3945 for specs in ispecs_transposed.values():
3946 assert type(specs) is dict
3947 utils.ForceDictType(specs, forced_type)
3951 constants.ISPECS_MIN: {},
3952 constants.ISPECS_MAX: {},
3953 constants.ISPECS_STD: {},
3955 for (name, specs) in ispecs_transposed.iteritems():
3956 assert name in constants.ISPECS_PARAMETERS
3957 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3958 assert key in ispecs
3959 ispecs[key][name] = val
3961 for key in constants.ISPECS_MINMAX_KEYS:
3964 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3966 minmax_out[key] = ispecs[key]
3967 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3969 ipolicy[constants.ISPECS_STD] = \
3970 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3971 ispecs[constants.ISPECS_STD])
3973 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3976 def _ParseSpecUnit(spec, keyname):
3978 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3981 ret[k] = utils.ParseUnit(ret[k])
3982 except (TypeError, ValueError, errors.UnitParseError), err:
3983 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3984 " specs: %s" % (k, ret[k], keyname, err)),
3989 def _ParseISpec(spec, keyname, required):
3990 ret = _ParseSpecUnit(spec, keyname)
3991 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3992 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3993 if required and missing:
3994 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3995 (keyname, utils.CommaJoin(missing)),
4000 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4002 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4003 len(minmax_ispecs[0]) == 1):
4004 for (key, spec) in minmax_ispecs[0].items():
4005 # This loop is executed exactly once
4006 if key in allowed_values and not spec:
4011 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4012 group_ipolicy, allowed_values):
4013 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4014 if found_allowed is not None:
4015 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4016 elif minmax_ispecs is not None:
4018 for mmpair in minmax_ispecs:
4020 for (key, spec) in mmpair.items():
4021 if key not in constants.ISPECS_MINMAX_KEYS:
4022 msg = "Invalid key in bounds instance specifications: %s" % key
4023 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4024 mmpair_out[key] = _ParseISpec(spec, key, True)
4025 minmax_out.append(mmpair_out)
4026 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4027 if std_ispecs is not None:
4028 assert not group_ipolicy # This is not an option for gnt-group
4029 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4032 def CreateIPolicyFromOpts(ispecs_mem_size=None,
4033 ispecs_cpu_count=None,
4034 ispecs_disk_count=None,
4035 ispecs_disk_size=None,
4036 ispecs_nic_count=None,
4039 ipolicy_disk_templates=None,
4040 ipolicy_vcpu_ratio=None,
4041 ipolicy_spindle_ratio=None,
4042 group_ipolicy=False,
4043 allowed_values=None,
4045 """Creation of instance policy based on command line options.
4047 @param fill_all: whether for cluster policies we should ensure that
4048 all values are filled
4051 assert not (fill_all and allowed_values)
4053 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4054 ispecs_disk_size or ispecs_nic_count)
4055 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4056 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4057 " together with any --ipolicy-xxx-specs option",
4060 ipolicy_out = objects.MakeEmptyIPolicy()
4063 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4064 ispecs_disk_count, ispecs_disk_size,
4065 ispecs_nic_count, group_ipolicy, fill_all)
4066 elif (minmax_ispecs is not None or std_ispecs is not None):
4067 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4068 group_ipolicy, allowed_values)
4070 if ipolicy_disk_templates is not None:
4071 if allowed_values and ipolicy_disk_templates in allowed_values:
4072 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4074 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4075 if ipolicy_vcpu_ratio is not None:
4076 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4077 if ipolicy_spindle_ratio is not None:
4078 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4080 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4082 if not group_ipolicy and fill_all:
4083 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4088 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4089 """Formatting core of L{PrintGenericInfo}.
4091 @param buf: (string) stream to accumulate the result into
4092 @param data: data to format
4094 @param level: depth in the data hierarchy, used for indenting
4095 @type afterkey: bool
4096 @param afterkey: True when we are in the middle of a line after a key (used
4097 to properly add newlines or indentation)
4101 if isinstance(data, dict):
4110 for key in sorted(data):
4112 buf.write(baseind * level)
4117 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4118 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4119 # list of tuples (an ordered dictionary)
4125 for (key, val) in data:
4127 buf.write(baseind * level)
4132 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4133 elif isinstance(data, list):
4144 buf.write(baseind * level)
4148 buf.write(baseind[1:])
4149 _SerializeGenericInfo(buf, item, level + 1)
4151 # This branch should be only taken for strings, but it's practically
4152 # impossible to guarantee that no other types are produced somewhere
4153 buf.write(str(data))
4157 def PrintGenericInfo(data):
4158 """Print information formatted according to the hierarchy.
4160 The output is a valid YAML string.
4162 @param data: the data to print. It's a hierarchical structure whose elements
4164 - dictionaries, where keys are strings and values are of any of the
4166 - lists of pairs (key, value), where key is a string and value is of
4167 any of the types listed here; it's a way to encode ordered
4169 - lists of any of the types listed here
4174 _SerializeGenericInfo(buf, data, 0)
4175 ToStdout(buf.getvalue().rstrip("\n"))