4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
119 "MODIFY_ETCHOSTS_OPT",
123 "NEW_CLUSTER_CERT_OPT",
124 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125 "NEW_CONFD_HMAC_KEY_OPT",
129 "NEW_SPICE_CERT_OPT",
131 "NOCONFLICTSCHECK_OPT",
132 "NODE_FORCE_JOIN_OPT",
134 "NODE_PLACEMENT_OPT",
138 "NODRBD_STORAGE_OPT",
144 "NOMODIFY_ETCHOSTS_OPT",
145 "NOMODIFY_SSH_SETUP_OPT",
149 "NORUNTIME_CHGS_OPT",
152 "NOSSH_KEYCHECK_OPT",
166 "PREALLOC_WIPE_DISKS_OPT",
167 "PRIMARY_IP_VERSION_OPT",
174 "REMOVE_INSTANCE_OPT",
175 "REMOVE_RESERVED_IPS_OPT",
181 "SECONDARY_ONLY_OPT",
186 "SHUTDOWN_TIMEOUT_OPT",
188 "SPECS_CPU_COUNT_OPT",
189 "SPECS_DISK_COUNT_OPT",
190 "SPECS_DISK_SIZE_OPT",
191 "SPECS_MEM_SIZE_OPT",
192 "SPECS_NIC_COUNT_OPT",
194 "IPOLICY_STD_SPECS_OPT",
195 "IPOLICY_DISK_TEMPLATES",
196 "IPOLICY_VCPU_RATIO",
202 "STARTUP_PAUSED_OPT",
211 "USE_EXTERNAL_MIP_SCRIPT",
219 "IGNORE_IPOLICY_OPT",
220 "INSTANCE_POLICY_OPTS",
221 # Generic functions for CLI programs
223 "CreateIPolicyFromOpts",
225 "GenericInstanceCreate",
231 "JobSubmittedException",
233 "RunWhileClusterStopped",
237 # Formatting functions
238 "ToStderr", "ToStdout",
241 "FormatParamsDictInfo",
243 "PrintIPolicyCommand",
253 # command line options support infrastructure
254 "ARGS_MANY_INSTANCES",
257 "ARGS_MANY_NETWORKS",
277 "OPT_COMPL_INST_ADD_NODES",
278 "OPT_COMPL_MANY_NODES",
279 "OPT_COMPL_ONE_IALLOCATOR",
280 "OPT_COMPL_ONE_INSTANCE",
281 "OPT_COMPL_ONE_NODE",
282 "OPT_COMPL_ONE_NODEGROUP",
283 "OPT_COMPL_ONE_NETWORK",
285 "OPT_COMPL_ONE_EXTSTORAGE",
290 "COMMON_CREATE_OPTS",
296 #: Priorities (sorted)
298 ("low", constants.OP_PRIO_LOW),
299 ("normal", constants.OP_PRIO_NORMAL),
300 ("high", constants.OP_PRIO_HIGH),
303 #: Priority dictionary for easier lookup
304 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
305 # we migrate to Python 2.6
306 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
308 # Query result status for clients
311 QR_INCOMPLETE) = range(3)
313 #: Maximum batch size for ChooseJob
317 # constants used to create InstancePolicy dictionary
318 TISPECS_GROUP_TYPES = {
319 constants.ISPECS_MIN: constants.VTYPE_INT,
320 constants.ISPECS_MAX: constants.VTYPE_INT,
323 TISPECS_CLUSTER_TYPES = {
324 constants.ISPECS_MIN: constants.VTYPE_INT,
325 constants.ISPECS_MAX: constants.VTYPE_INT,
326 constants.ISPECS_STD: constants.VTYPE_INT,
329 #: User-friendly names for query2 field types
331 constants.QFT_UNKNOWN: "Unknown",
332 constants.QFT_TEXT: "Text",
333 constants.QFT_BOOL: "Boolean",
334 constants.QFT_NUMBER: "Number",
335 constants.QFT_UNIT: "Storage size",
336 constants.QFT_TIMESTAMP: "Timestamp",
337 constants.QFT_OTHER: "Custom",
342 def __init__(self, min=0, max=None): # pylint: disable=W0622
347 return ("<%s min=%s max=%s>" %
348 (self.__class__.__name__, self.min, self.max))
351 class ArgSuggest(_Argument):
352 """Suggesting argument.
354 Value can be any of the ones passed to the constructor.
357 # pylint: disable=W0622
358 def __init__(self, min=0, max=None, choices=None):
359 _Argument.__init__(self, min=min, max=max)
360 self.choices = choices
363 return ("<%s min=%s max=%s choices=%r>" %
364 (self.__class__.__name__, self.min, self.max, self.choices))
367 class ArgChoice(ArgSuggest):
370 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
371 but value must be one of the choices.
376 class ArgUnknown(_Argument):
377 """Unknown argument to program (e.g. determined at runtime).
382 class ArgInstance(_Argument):
383 """Instances argument.
388 class ArgNode(_Argument):
394 class ArgNetwork(_Argument):
400 class ArgGroup(_Argument):
401 """Node group argument.
406 class ArgJobId(_Argument):
412 class ArgFile(_Argument):
413 """File path argument.
418 class ArgCommand(_Argument):
424 class ArgHost(_Argument):
430 class ArgOs(_Argument):
436 class ArgExtStorage(_Argument):
437 """ExtStorage argument.
443 ARGS_MANY_INSTANCES = [ArgInstance()]
444 ARGS_MANY_NETWORKS = [ArgNetwork()]
445 ARGS_MANY_NODES = [ArgNode()]
446 ARGS_MANY_GROUPS = [ArgGroup()]
447 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
448 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
449 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
451 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
452 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
455 def _ExtractTagsObject(opts, args):
456 """Extract the tag type object.
458 Note that this function will modify its args parameter.
461 if not hasattr(opts, "tag_type"):
462 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
464 if kind == constants.TAG_CLUSTER:
466 elif kind in (constants.TAG_NODEGROUP,
468 constants.TAG_NETWORK,
469 constants.TAG_INSTANCE):
471 raise errors.OpPrereqError("no arguments passed to the command",
476 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
480 def _ExtendTags(opts, args):
481 """Extend the args if a source file has been given.
483 This function will extend the tags with the contents of the file
484 passed in the 'tags_source' attribute of the opts parameter. A file
485 named '-' will be replaced by stdin.
488 fname = opts.tags_source
494 new_fh = open(fname, "r")
497 # we don't use the nice 'new_data = [line.strip() for line in fh]'
498 # because of python bug 1633941
500 line = new_fh.readline()
503 new_data.append(line.strip())
506 args.extend(new_data)
509 def ListTags(opts, args):
510 """List the tags on a given object.
512 This is a generic implementation that knows how to deal with all
513 three cases of tag objects (cluster, node, instance). The opts
514 argument is expected to contain a tag_type field denoting what
515 object type we work on.
518 kind, name = _ExtractTagsObject(opts, args)
519 cl = GetClient(query=True)
520 result = cl.QueryTags(kind, name)
521 result = list(result)
527 def AddTags(opts, args):
528 """Add tags on a given object.
530 This is a generic implementation that knows how to deal with all
531 three cases of tag objects (cluster, node, instance). The opts
532 argument is expected to contain a tag_type field denoting what
533 object type we work on.
536 kind, name = _ExtractTagsObject(opts, args)
537 _ExtendTags(opts, args)
539 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
540 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
541 SubmitOrSend(op, opts)
544 def RemoveTags(opts, args):
545 """Remove tags from a given object.
547 This is a generic implementation that knows how to deal with all
548 three cases of tag objects (cluster, node, instance). The opts
549 argument is expected to contain a tag_type field denoting what
550 object type we work on.
553 kind, name = _ExtractTagsObject(opts, args)
554 _ExtendTags(opts, args)
556 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
557 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
558 SubmitOrSend(op, opts)
561 def check_unit(option, opt, value): # pylint: disable=W0613
562 """OptParsers custom converter for units.
566 return utils.ParseUnit(value)
567 except errors.UnitParseError, err:
568 raise OptionValueError("option %s: %s" % (opt, err))
571 def _SplitKeyVal(opt, data, parse_prefixes):
572 """Convert a KeyVal string into a dict.
574 This function will convert a key=val[,...] string into a dict. Empty
575 values will be converted specially: keys which have the prefix 'no_'
576 will have the value=False and the prefix stripped, keys with the prefix
577 "-" will have value=None and the prefix stripped, and the others will
581 @param opt: a string holding the option name for which we process the
582 data, used in building error messages
584 @param data: a string of the format key=val,key=val,...
585 @type parse_prefixes: bool
586 @param parse_prefixes: whether to handle prefixes specially
588 @return: {key=val, key=val}
589 @raises errors.ParameterError: if there are duplicate keys
594 for elem in utils.UnescapeAndSplit(data, sep=","):
596 key, val = elem.split("=", 1)
598 if elem.startswith(NO_PREFIX):
599 key, val = elem[len(NO_PREFIX):], False
600 elif elem.startswith(UN_PREFIX):
601 key, val = elem[len(UN_PREFIX):], None
603 key, val = elem, True
605 raise errors.ParameterError("Missing value for key '%s' in option %s" %
608 raise errors.ParameterError("Duplicate key '%s' in option %s" %
614 def _SplitIdentKeyVal(opt, value, parse_prefixes):
615 """Helper function to parse "ident:key=val,key=val" options.
618 @param opt: option name, used in error messages
620 @param value: expected to be in the format "ident:key=val,key=val,..."
621 @type parse_prefixes: bool
622 @param parse_prefixes: whether to handle prefixes specially (see
625 @return: (ident, {key=val, key=val})
626 @raises errors.ParameterError: in case of duplicates or other parsing errors
630 ident, rest = value, ""
632 ident, rest = value.split(":", 1)
634 if parse_prefixes and ident.startswith(NO_PREFIX):
636 msg = "Cannot pass options when removing parameter groups: %s" % value
637 raise errors.ParameterError(msg)
638 retval = (ident[len(NO_PREFIX):], False)
639 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
640 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
642 msg = "Cannot pass options when removing parameter groups: %s" % value
643 raise errors.ParameterError(msg)
644 retval = (ident[len(UN_PREFIX):], None)
646 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
647 retval = (ident, kv_dict)
651 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
652 """Custom parser for ident:key=val,key=val options.
654 This will store the parsed values as a tuple (ident, {key: val}). As such,
655 multiple uses of this option via action=append is possible.
658 return _SplitIdentKeyVal(opt, value, True)
661 def check_key_val(option, opt, value): # pylint: disable=W0613
662 """Custom parser class for key=val,key=val options.
664 This will store the parsed values as a dict {key: val}.
667 return _SplitKeyVal(opt, value, True)
670 def _SplitListKeyVal(opt, value):
672 for elem in value.split("/"):
674 raise errors.ParameterError("Empty section in option '%s'" % opt)
675 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
677 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
679 raise errors.ParameterError(msg)
680 retval[ident] = valdict
684 def check_multilist_ident_key_val(_, opt, value):
685 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
687 @rtype: list of dictionary
688 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
692 for line in value.split("//"):
693 retval.append(_SplitListKeyVal(opt, line))
697 def check_bool(option, opt, value): # pylint: disable=W0613
698 """Custom parser for yes/no options.
700 This will store the parsed value as either True or False.
703 value = value.lower()
704 if value == constants.VALUE_FALSE or value == "no":
706 elif value == constants.VALUE_TRUE or value == "yes":
709 raise errors.ParameterError("Invalid boolean value '%s'" % value)
712 def check_list(option, opt, value): # pylint: disable=W0613
713 """Custom parser for comma-separated lists.
716 # we have to make this explicit check since "".split(",") is [""],
717 # not an empty list :(
721 return utils.UnescapeAndSplit(value)
724 def check_maybefloat(option, opt, value): # pylint: disable=W0613
725 """Custom parser for float numbers which might be also defaults.
728 value = value.lower()
730 if value == constants.VALUE_DEFAULT:
736 # completion_suggestion is normally a list. Using numeric values not evaluating
737 # to False for dynamic completion.
738 (OPT_COMPL_MANY_NODES,
740 OPT_COMPL_ONE_INSTANCE,
742 OPT_COMPL_ONE_EXTSTORAGE,
743 OPT_COMPL_ONE_IALLOCATOR,
744 OPT_COMPL_ONE_NETWORK,
745 OPT_COMPL_INST_ADD_NODES,
746 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
748 OPT_COMPL_ALL = compat.UniqueFrozenset([
749 OPT_COMPL_MANY_NODES,
751 OPT_COMPL_ONE_INSTANCE,
753 OPT_COMPL_ONE_EXTSTORAGE,
754 OPT_COMPL_ONE_IALLOCATOR,
755 OPT_COMPL_ONE_NETWORK,
756 OPT_COMPL_INST_ADD_NODES,
757 OPT_COMPL_ONE_NODEGROUP,
761 class CliOption(Option):
762 """Custom option class for optparse.
765 ATTRS = Option.ATTRS + [
766 "completion_suggest",
768 TYPES = Option.TYPES + (
769 "multilistidentkeyval",
777 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
778 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
779 TYPE_CHECKER["identkeyval"] = check_ident_key_val
780 TYPE_CHECKER["keyval"] = check_key_val
781 TYPE_CHECKER["unit"] = check_unit
782 TYPE_CHECKER["bool"] = check_bool
783 TYPE_CHECKER["list"] = check_list
784 TYPE_CHECKER["maybefloat"] = check_maybefloat
787 # optparse.py sets make_option, so we do it for our own option class, too
788 cli_option = CliOption
793 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
794 help="Increase debugging level")
796 NOHDR_OPT = cli_option("--no-headers", default=False,
797 action="store_true", dest="no_headers",
798 help="Don't display column headers")
800 SEP_OPT = cli_option("--separator", default=None,
801 action="store", dest="separator",
802 help=("Separator between output fields"
803 " (defaults to one space)"))
805 USEUNITS_OPT = cli_option("--units", default=None,
806 dest="units", choices=("h", "m", "g", "t"),
807 help="Specify units for output (one of h/m/g/t)")
809 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
810 type="string", metavar="FIELDS",
811 help="Comma separated list of output fields")
813 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
814 default=False, help="Force the operation")
816 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
817 default=False, help="Do not require confirmation")
819 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
820 action="store_true", default=False,
821 help=("Ignore offline nodes and do as much"
824 TAG_ADD_OPT = cli_option("--tags", dest="tags",
825 default=None, help="Comma-separated list of instance"
828 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
829 default=None, help="File with tag names")
831 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
832 default=False, action="store_true",
833 help=("Submit the job and return the job ID, but"
834 " don't wait for the job to finish"))
836 SYNC_OPT = cli_option("--sync", dest="do_locking",
837 default=False, action="store_true",
838 help=("Grab locks while doing the queries"
839 " in order to ensure more consistent results"))
841 DRY_RUN_OPT = cli_option("--dry-run", default=False,
843 help=("Do not execute the operation, just run the"
844 " check steps and verify if it could be"
847 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
849 help="Increase the verbosity of the operation")
851 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
852 action="store_true", dest="simulate_errors",
853 help="Debugging option that makes the operation"
854 " treat most runtime checks as failed")
856 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
857 default=True, action="store_false",
858 help="Don't wait for sync (DANGEROUS!)")
860 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
861 default=False, action="store_true",
862 help="Wait for disks to sync")
864 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
865 action="store_true", default=False,
866 help="Enable offline instance")
868 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
869 action="store_true", default=False,
870 help="Disable down instance")
872 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
873 help=("Custom disk setup (%s)" %
874 utils.CommaJoin(constants.DISK_TEMPLATES)),
875 default=None, metavar="TEMPL",
876 choices=list(constants.DISK_TEMPLATES))
878 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
879 help="Do not create any network cards for"
882 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
883 help="Relative path under default cluster-wide"
884 " file storage dir to store file-based disks",
885 default=None, metavar="<DIR>")
887 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
888 help="Driver to use for image files",
889 default="loop", metavar="<DRIVER>",
890 choices=list(constants.FILE_DRIVER))
892 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
893 help="Select nodes for the instance automatically"
894 " using the <NAME> iallocator plugin",
895 default=None, type="string",
896 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
898 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
900 help="Set the default instance"
902 default=None, type="string",
903 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
905 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
907 completion_suggest=OPT_COMPL_ONE_OS)
909 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
910 type="keyval", default={},
911 help="OS parameters")
913 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
914 action="store_true", default=False,
915 help="Force an unknown variant")
917 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
918 action="store_true", default=False,
919 help="Do not install the OS (will"
922 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
923 dest="allow_runtime_chgs",
924 default=True, action="store_false",
925 help="Don't allow runtime changes")
927 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
928 type="keyval", default={},
929 help="Backend parameters")
931 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
932 default={}, dest="hvparams",
933 help="Hypervisor parameters")
935 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
936 help="Disk template parameters, in the format"
937 " template:option=value,option=value,...",
938 type="identkeyval", action="append", default=[])
940 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
941 type="keyval", default={},
942 help="Memory size specs: list of key=value,"
943 " where key is one of min, max, std"
944 " (in MB or using a unit)")
946 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
947 type="keyval", default={},
948 help="CPU count specs: list of key=value,"
949 " where key is one of min, max, std")
951 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
952 dest="ispecs_disk_count",
953 type="keyval", default={},
954 help="Disk count specs: list of key=value,"
955 " where key is one of min, max, std")
957 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
958 type="keyval", default={},
959 help="Disk size specs: list of key=value,"
960 " where key is one of min, max, std"
961 " (in MB or using a unit)")
963 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
964 type="keyval", default={},
965 help="NIC count specs: list of key=value,"
966 " where key is one of min, max, std")
968 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
969 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
970 dest="ipolicy_bounds_specs",
971 type="multilistidentkeyval", default=None,
972 help="Complete instance specs limits")
974 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
975 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
976 dest="ipolicy_std_specs",
977 type="keyval", default=None,
978 help="Complte standard instance specs")
980 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
981 dest="ipolicy_disk_templates",
982 type="list", default=None,
983 help="Comma-separated list of"
984 " enabled disk templates")
986 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
987 dest="ipolicy_vcpu_ratio",
988 type="maybefloat", default=None,
989 help="The maximum allowed vcpu-to-cpu ratio")
991 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
992 dest="ipolicy_spindle_ratio",
993 type="maybefloat", default=None,
994 help=("The maximum allowed instances to"
997 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
998 help="Hypervisor and hypervisor options, in the"
999 " format hypervisor:option=value,option=value,...",
1000 default=None, type="identkeyval")
1002 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1003 help="Hypervisor and hypervisor options, in the"
1004 " format hypervisor:option=value,option=value,...",
1005 default=[], action="append", type="identkeyval")
1007 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1008 action="store_false",
1009 help="Don't check that the instance's IP"
1012 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1013 default=True, action="store_false",
1014 help="Don't check that the instance's name"
1017 NET_OPT = cli_option("--net",
1018 help="NIC parameters", default=[],
1019 dest="nics", action="append", type="identkeyval")
1021 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1022 dest="disks", action="append", type="identkeyval")
1024 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1025 help="Comma-separated list of disks"
1026 " indices to act on (e.g. 0,2) (optional,"
1027 " defaults to all disks)")
1029 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1030 help="Enforces a single-disk configuration using the"
1031 " given disk size, in MiB unless a suffix is used",
1032 default=None, type="unit", metavar="<size>")
1034 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1035 dest="ignore_consistency",
1036 action="store_true", default=False,
1037 help="Ignore the consistency of the disks on"
1040 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1041 dest="allow_failover",
1042 action="store_true", default=False,
1043 help="If migration is not possible fallback to"
1046 NONLIVE_OPT = cli_option("--non-live", dest="live",
1047 default=True, action="store_false",
1048 help="Do a non-live migration (this usually means"
1049 " freeze the instance, save the state, transfer and"
1050 " only then resume running on the secondary node)")
1052 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1054 choices=list(constants.HT_MIGRATION_MODES),
1055 help="Override default migration mode (choose"
1056 " either live or non-live")
1058 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1059 help="Target node and optional secondary node",
1060 metavar="<pnode>[:<snode>]",
1061 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1063 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1064 action="append", metavar="<node>",
1065 help="Use only this node (can be used multiple"
1066 " times, if not given defaults to all nodes)",
1067 completion_suggest=OPT_COMPL_ONE_NODE)
1069 NODEGROUP_OPT_NAME = "--node-group"
1070 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1072 help="Node group (name or uuid)",
1073 metavar="<nodegroup>",
1074 default=None, type="string",
1075 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1077 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1079 completion_suggest=OPT_COMPL_ONE_NODE)
1081 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1082 action="store_false",
1083 help="Don't start the instance after creation")
1085 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1086 action="store_true", default=False,
1087 help="Show command instead of executing it")
1089 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1090 default=False, action="store_true",
1091 help="Instead of performing the migration, try to"
1092 " recover from a failed cleanup. This is safe"
1093 " to run even if the instance is healthy, but it"
1094 " will create extra replication traffic and "
1095 " disrupt briefly the replication (like during the"
1098 STATIC_OPT = cli_option("-s", "--static", dest="static",
1099 action="store_true", default=False,
1100 help="Only show configuration data, not runtime data")
1102 ALL_OPT = cli_option("--all", dest="show_all",
1103 default=False, action="store_true",
1104 help="Show info on all instances on the cluster."
1105 " This can take a long time to run, use wisely")
1107 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1108 action="store_true", default=False,
1109 help="Interactive OS reinstall, lists available"
1110 " OS templates for selection")
1112 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1113 action="store_true", default=False,
1114 help="Remove the instance from the cluster"
1115 " configuration even if there are failures"
1116 " during the removal process")
1118 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1119 dest="ignore_remove_failures",
1120 action="store_true", default=False,
1121 help="Remove the instance from the"
1122 " cluster configuration even if there"
1123 " are failures during the removal"
1126 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1127 action="store_true", default=False,
1128 help="Remove the instance from the cluster")
1130 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1131 help="Specifies the new node for the instance",
1132 metavar="NODE", default=None,
1133 completion_suggest=OPT_COMPL_ONE_NODE)
1135 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1136 help="Specifies the new secondary node",
1137 metavar="NODE", default=None,
1138 completion_suggest=OPT_COMPL_ONE_NODE)
1140 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1141 help="Specifies the new primary node",
1142 metavar="<node>", default=None,
1143 completion_suggest=OPT_COMPL_ONE_NODE)
1145 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1146 default=False, action="store_true",
1147 help="Replace the disk(s) on the primary"
1148 " node (applies only to internally mirrored"
1149 " disk templates, e.g. %s)" %
1150 utils.CommaJoin(constants.DTS_INT_MIRROR))
1152 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1153 default=False, action="store_true",
1154 help="Replace the disk(s) on the secondary"
1155 " node (applies only to internally mirrored"
1156 " disk templates, e.g. %s)" %
1157 utils.CommaJoin(constants.DTS_INT_MIRROR))
1159 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1160 default=False, action="store_true",
1161 help="Lock all nodes and auto-promote as needed"
1164 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1165 default=False, action="store_true",
1166 help="Automatically replace faulty disks"
1167 " (applies only to internally mirrored"
1168 " disk templates, e.g. %s)" %
1169 utils.CommaJoin(constants.DTS_INT_MIRROR))
1171 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1172 default=False, action="store_true",
1173 help="Ignore current recorded size"
1174 " (useful for forcing activation when"
1175 " the recorded size is wrong)")
1177 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1179 completion_suggest=OPT_COMPL_ONE_NODE)
1181 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1184 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1185 help="Specify the secondary ip for the node",
1186 metavar="ADDRESS", default=None)
1188 READD_OPT = cli_option("--readd", dest="readd",
1189 default=False, action="store_true",
1190 help="Readd old node after replacing it")
1192 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1193 default=True, action="store_false",
1194 help="Disable SSH key fingerprint checking")
1196 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1197 default=False, action="store_true",
1198 help="Force the joining of a node")
1200 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1201 type="bool", default=None, metavar=_YORNO,
1202 help="Set the master_candidate flag on the node")
1204 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1205 type="bool", default=None,
1206 help=("Set the offline flag on the node"
1207 " (cluster does not communicate with offline"
1210 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1211 type="bool", default=None,
1212 help=("Set the drained flag on the node"
1213 " (excluded from allocation operations)"))
1215 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1216 type="bool", default=None, metavar=_YORNO,
1217 help="Set the master_capable flag on the node")
1219 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1220 type="bool", default=None, metavar=_YORNO,
1221 help="Set the vm_capable flag on the node")
1223 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1224 type="bool", default=None, metavar=_YORNO,
1225 help="Set the allocatable flag on a volume")
1227 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1228 help="Disable support for lvm based instances"
1230 action="store_false", default=True)
1232 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1233 dest="enabled_hypervisors",
1234 help="Comma-separated list of hypervisors",
1235 type="string", default=None)
1237 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1238 dest="enabled_disk_templates",
1239 help="Comma-separated list of "
1241 type="string", default=None)
1243 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1244 type="keyval", default={},
1245 help="NIC parameters")
1247 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1248 dest="candidate_pool_size", type="int",
1249 help="Set the candidate pool size")
1251 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1252 help=("Enables LVM and specifies the volume group"
1253 " name (cluster-wide) for disk allocation"
1254 " [%s]" % constants.DEFAULT_VG),
1255 metavar="VG", default=None)
1257 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1258 help="Destroy cluster", action="store_true")
1260 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1261 help="Skip node agreement check (dangerous)",
1262 action="store_true", default=False)
1264 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1265 help="Specify the mac prefix for the instance IP"
1266 " addresses, in the format XX:XX:XX",
1270 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1271 help="Specify the node interface (cluster-wide)"
1272 " on which the master IP address will be added"
1273 " (cluster init default: %s)" %
1274 constants.DEFAULT_BRIDGE,
1278 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1279 help="Specify the netmask of the master IP",
1283 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1284 dest="use_external_mip_script",
1285 help="Specify whether to run a"
1286 " user-provided script for the master"
1287 " IP address turnup and"
1288 " turndown operations",
1289 type="bool", metavar=_YORNO, default=None)
1291 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1292 help="Specify the default directory (cluster-"
1293 "wide) for storing the file-based disks [%s]" %
1294 pathutils.DEFAULT_FILE_STORAGE_DIR,
1296 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1298 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1299 "--shared-file-storage-dir",
1300 dest="shared_file_storage_dir",
1301 help="Specify the default directory (cluster-wide) for storing the"
1302 " shared file-based disks [%s]" %
1303 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1304 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1306 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1307 help="Don't modify %s" % pathutils.ETC_HOSTS,
1308 action="store_false", default=True)
1310 MODIFY_ETCHOSTS_OPT = \
1311 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1312 default=None, type="bool",
1313 help="Defines whether the cluster should autonomously modify"
1314 " and keep in sync the /etc/hosts file of the nodes")
1316 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1317 help="Don't initialize SSH keys",
1318 action="store_false", default=True)
1320 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1321 help="Enable parseable error messages",
1322 action="store_true", default=False)
1324 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1325 help="Skip N+1 memory redundancy tests",
1326 action="store_true", default=False)
1328 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1329 help="Type of reboot: soft/hard/full",
1330 default=constants.INSTANCE_REBOOT_HARD,
1332 choices=list(constants.REBOOT_TYPES))
1334 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1335 dest="ignore_secondaries",
1336 default=False, action="store_true",
1337 help="Ignore errors from secondaries")
1339 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1340 action="store_false", default=True,
1341 help="Don't shutdown the instance (unsafe)")
1343 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1344 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1345 help="Maximum time to wait")
1347 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1348 dest="shutdown_timeout", type="int",
1349 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350 help="Maximum time to wait for instance"
1353 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1355 help=("Number of seconds between repetions of the"
1358 EARLY_RELEASE_OPT = cli_option("--early-release",
1359 dest="early_release", default=False,
1360 action="store_true",
1361 help="Release the locks on the secondary"
1364 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1365 dest="new_cluster_cert",
1366 default=False, action="store_true",
1367 help="Generate a new cluster certificate")
1369 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1371 help="File containing new RAPI certificate")
1373 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1374 default=None, action="store_true",
1375 help=("Generate a new self-signed RAPI"
1378 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1380 help="File containing new SPICE certificate")
1382 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1384 help="File containing the certificate of the CA"
1385 " which signed the SPICE certificate")
1387 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1388 dest="new_spice_cert", default=None,
1389 action="store_true",
1390 help=("Generate a new self-signed SPICE"
1393 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1394 dest="new_confd_hmac_key",
1395 default=False, action="store_true",
1396 help=("Create a new HMAC key for %s" %
1399 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1400 dest="cluster_domain_secret",
1402 help=("Load new new cluster domain"
1403 " secret from file"))
1405 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1406 dest="new_cluster_domain_secret",
1407 default=False, action="store_true",
1408 help=("Create a new cluster domain"
1411 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1412 dest="use_replication_network",
1413 help="Whether to use the replication network"
1414 " for talking to the nodes",
1415 action="store_true", default=False)
1417 MAINTAIN_NODE_HEALTH_OPT = \
1418 cli_option("--maintain-node-health", dest="maintain_node_health",
1419 metavar=_YORNO, default=None, type="bool",
1420 help="Configure the cluster to automatically maintain node"
1421 " health, by shutting down unknown instances, shutting down"
1422 " unknown DRBD devices, etc.")
1424 IDENTIFY_DEFAULTS_OPT = \
1425 cli_option("--identify-defaults", dest="identify_defaults",
1426 default=False, action="store_true",
1427 help="Identify which saved instance parameters are equal to"
1428 " the current cluster defaults and set them as such, instead"
1429 " of marking them as overridden")
1431 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1432 action="store", dest="uid_pool",
1433 help=("A list of user-ids or user-id"
1434 " ranges separated by commas"))
1436 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1437 action="store", dest="add_uids",
1438 help=("A list of user-ids or user-id"
1439 " ranges separated by commas, to be"
1440 " added to the user-id pool"))
1442 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1443 action="store", dest="remove_uids",
1444 help=("A list of user-ids or user-id"
1445 " ranges separated by commas, to be"
1446 " removed from the user-id pool"))
1448 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1449 action="store", dest="reserved_lvs",
1450 help=("A comma-separated list of reserved"
1451 " logical volumes names, that will be"
1452 " ignored by cluster verify"))
1454 ROMAN_OPT = cli_option("--roman",
1455 dest="roman_integers", default=False,
1456 action="store_true",
1457 help="Use roman numbers for positive integers")
1459 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1460 action="store", default=None,
1461 help="Specifies usermode helper for DRBD")
1463 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1464 action="store_false", default=True,
1465 help="Disable support for DRBD")
1467 PRIMARY_IP_VERSION_OPT = \
1468 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1469 action="store", dest="primary_ip_version",
1470 metavar="%d|%d" % (constants.IP4_VERSION,
1471 constants.IP6_VERSION),
1472 help="Cluster-wide IP version for primary IP")
1474 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1475 action="store_true",
1476 help="Show machine name for every line in output")
1478 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1479 action="store_true",
1480 help=("Hide successful results and show failures"
1481 " only (determined by the exit code)"))
1483 REASON_OPT = cli_option("--reason", default=None,
1484 help="The reason for executing the command")
1487 def _PriorityOptionCb(option, _, value, parser):
1488 """Callback for processing C{--priority} option.
1491 value = _PRIONAME_TO_VALUE[value]
1493 setattr(parser.values, option.dest, value)
1496 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1497 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1498 choices=_PRIONAME_TO_VALUE.keys(),
1499 action="callback", type="choice",
1500 callback=_PriorityOptionCb,
1501 help="Priority for opcode processing")
1503 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1504 type="bool", default=None, metavar=_YORNO,
1505 help="Sets the hidden flag on the OS")
1507 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1508 type="bool", default=None, metavar=_YORNO,
1509 help="Sets the blacklisted flag on the OS")
1511 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1512 type="bool", metavar=_YORNO,
1513 dest="prealloc_wipe_disks",
1514 help=("Wipe disks prior to instance"
1517 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1518 type="keyval", default=None,
1519 help="Node parameters")
1521 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1522 action="store", metavar="POLICY", default=None,
1523 help="Allocation policy for the node group")
1525 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1526 type="bool", metavar=_YORNO,
1527 dest="node_powered",
1528 help="Specify if the SoR for node is powered")
1530 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1531 default=constants.OOB_TIMEOUT,
1532 help="Maximum time to wait for out-of-band helper")
1534 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1535 default=constants.OOB_POWER_DELAY,
1536 help="Time in seconds to wait between power-ons")
1538 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1539 action="store_true", default=False,
1540 help=("Whether command argument should be treated"
1543 NO_REMEMBER_OPT = cli_option("--no-remember",
1545 action="store_true", default=False,
1546 help="Perform but do not record the change"
1547 " in the configuration")
1549 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1550 default=False, action="store_true",
1551 help="Evacuate primary instances only")
1553 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1554 default=False, action="store_true",
1555 help="Evacuate secondary instances only"
1556 " (applies only to internally mirrored"
1557 " disk templates, e.g. %s)" %
1558 utils.CommaJoin(constants.DTS_INT_MIRROR))
1560 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1561 action="store_true", default=False,
1562 help="Pause instance at startup")
1564 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1565 help="Destination node group (name or uuid)",
1566 default=None, action="append",
1567 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1569 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1570 action="append", dest="ignore_errors",
1571 choices=list(constants.CV_ALL_ECODES_STRINGS),
1572 help="Error code to be ignored")
1574 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1576 help=("Specify disk state information in the"
1578 " storage_type/identifier:option=value,...;"
1579 " note this is unused for now"),
1582 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1584 help=("Specify hypervisor state information in the"
1585 " format hypervisor:option=value,...;"
1586 " note this is unused for now"),
1589 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1590 action="store_true", default=False,
1591 help="Ignore instance policy violations")
1593 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1594 help="Sets the instance's runtime memory,"
1595 " ballooning it up or down to the new value",
1596 default=None, type="unit", metavar="<size>")
1598 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1599 action="store_true", default=False,
1600 help="Marks the grow as absolute instead of the"
1601 " (default) relative mode")
1603 NETWORK_OPT = cli_option("--network",
1604 action="store", default=None, dest="network",
1605 help="IP network in CIDR notation")
1607 GATEWAY_OPT = cli_option("--gateway",
1608 action="store", default=None, dest="gateway",
1609 help="IP address of the router (gateway)")
1611 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1612 action="store", default=None,
1613 dest="add_reserved_ips",
1614 help="Comma-separated list of"
1615 " reserved IPs to add")
1617 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1618 action="store", default=None,
1619 dest="remove_reserved_ips",
1620 help="Comma-delimited list of"
1621 " reserved IPs to remove")
1623 NETWORK6_OPT = cli_option("--network6",
1624 action="store", default=None, dest="network6",
1625 help="IP network in CIDR notation")
1627 GATEWAY6_OPT = cli_option("--gateway6",
1628 action="store", default=None, dest="gateway6",
1629 help="IP6 address of the router (gateway)")
1631 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1632 dest="conflicts_check",
1634 action="store_false",
1635 help="Don't check for conflicting IPs")
1637 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1638 default=False, action="store_true",
1639 help="Include default values")
1641 #: Options provided by all commands
1642 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1644 # common options for creating instances. add and import then add their own
1646 COMMON_CREATE_OPTS = [
1651 FILESTORE_DRIVER_OPT,
1657 NOCONFLICTSCHECK_OPT,
1669 # common instance policy options
1670 INSTANCE_POLICY_OPTS = [
1671 IPOLICY_BOUNDS_SPECS_OPT,
1672 IPOLICY_DISK_TEMPLATES,
1674 IPOLICY_SPINDLE_RATIO,
1677 # instance policy split specs options
1678 SPLIT_ISPECS_OPTS = [
1679 SPECS_CPU_COUNT_OPT,
1680 SPECS_DISK_COUNT_OPT,
1681 SPECS_DISK_SIZE_OPT,
1683 SPECS_NIC_COUNT_OPT,
1687 class _ShowUsage(Exception):
1688 """Exception class for L{_ParseArgs}.
1691 def __init__(self, exit_error):
1692 """Initializes instances of this class.
1694 @type exit_error: bool
1695 @param exit_error: Whether to report failure on exit
1698 Exception.__init__(self)
1699 self.exit_error = exit_error
1702 class _ShowVersion(Exception):
1703 """Exception class for L{_ParseArgs}.
1708 def _ParseArgs(binary, argv, commands, aliases, env_override):
1709 """Parser for the command line arguments.
1711 This function parses the arguments and returns the function which
1712 must be executed together with its (modified) arguments.
1714 @param binary: Script name
1715 @param argv: Command line arguments
1716 @param commands: Dictionary containing command definitions
1717 @param aliases: dictionary with command aliases {"alias": "target", ...}
1718 @param env_override: list of env variables allowed for default args
1719 @raise _ShowUsage: If usage description should be shown
1720 @raise _ShowVersion: If version should be shown
1723 assert not (env_override - set(commands))
1724 assert not (set(aliases.keys()) & set(commands.keys()))
1729 # No option or command given
1730 raise _ShowUsage(exit_error=True)
1732 if cmd == "--version":
1733 raise _ShowVersion()
1734 elif cmd == "--help":
1735 raise _ShowUsage(exit_error=False)
1736 elif not (cmd in commands or cmd in aliases):
1737 raise _ShowUsage(exit_error=True)
1739 # get command, unalias it, and look it up in commands
1741 if aliases[cmd] not in commands:
1742 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1743 " command '%s'" % (cmd, aliases[cmd]))
1747 if cmd in env_override:
1748 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1749 env_args = os.environ.get(args_env_name)
1751 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1753 func, args_def, parser_opts, usage, description = commands[cmd]
1754 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1755 description=description,
1756 formatter=TitledHelpFormatter(),
1757 usage="%%prog %s %s" % (cmd, usage))
1758 parser.disable_interspersed_args()
1759 options, args = parser.parse_args(args=argv[2:])
1761 if not _CheckArguments(cmd, args_def, args):
1762 return None, None, None
1764 return func, options, args
1767 def _FormatUsage(binary, commands):
1768 """Generates a nice description of all commands.
1770 @param binary: Script name
1771 @param commands: Dictionary containing command definitions
1774 # compute the max line length for cmd + usage
1775 mlen = min(60, max(map(len, commands)))
1777 yield "Usage: %s {command} [options...] [argument...]" % binary
1778 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1782 # and format a nice command list
1783 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1784 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1785 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1786 for line in help_lines:
1787 yield " %-*s %s" % (mlen, "", line)
1792 def _CheckArguments(cmd, args_def, args):
1793 """Verifies the arguments using the argument definition.
1797 1. Abort with error if values specified by user but none expected.
1799 1. For each argument in definition
1801 1. Keep running count of minimum number of values (min_count)
1802 1. Keep running count of maximum number of values (max_count)
1803 1. If it has an unlimited number of values
1805 1. Abort with error if it's not the last argument in the definition
1807 1. If last argument has limited number of values
1809 1. Abort with error if number of values doesn't match or is too large
1811 1. Abort with error if user didn't pass enough values (min_count)
1814 if args and not args_def:
1815 ToStderr("Error: Command %s expects no arguments", cmd)
1822 last_idx = len(args_def) - 1
1824 for idx, arg in enumerate(args_def):
1825 if min_count is None:
1827 elif arg.min is not None:
1828 min_count += arg.min
1830 if max_count is None:
1832 elif arg.max is not None:
1833 max_count += arg.max
1836 check_max = (arg.max is not None)
1838 elif arg.max is None:
1839 raise errors.ProgrammerError("Only the last argument can have max=None")
1842 # Command with exact number of arguments
1843 if (min_count is not None and max_count is not None and
1844 min_count == max_count and len(args) != min_count):
1845 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1848 # Command with limited number of arguments
1849 if max_count is not None and len(args) > max_count:
1850 ToStderr("Error: Command %s expects only %d argument(s)",
1854 # Command with some required arguments
1855 if min_count is not None and len(args) < min_count:
1856 ToStderr("Error: Command %s expects at least %d argument(s)",
1863 def SplitNodeOption(value):
1864 """Splits the value of a --node option.
1867 if value and ":" in value:
1868 return value.split(":", 1)
1870 return (value, None)
1873 def CalculateOSNames(os_name, os_variants):
1874 """Calculates all the names an OS can be called, according to its variants.
1876 @type os_name: string
1877 @param os_name: base name of the os
1878 @type os_variants: list or None
1879 @param os_variants: list of supported variants
1881 @return: list of valid names
1885 return ["%s+%s" % (os_name, v) for v in os_variants]
1890 def ParseFields(selected, default):
1891 """Parses the values of "--field"-like options.
1893 @type selected: string or None
1894 @param selected: User-selected options
1896 @param default: Default fields
1899 if selected is None:
1902 if selected.startswith("+"):
1903 return default + selected[1:].split(",")
1905 return selected.split(",")
1908 UsesRPC = rpc.RunWithRPC
1911 def AskUser(text, choices=None):
1912 """Ask the user a question.
1914 @param text: the question to ask
1916 @param choices: list with elements tuples (input_char, return_value,
1917 description); if not given, it will default to: [('y', True,
1918 'Perform the operation'), ('n', False, 'Do no do the operation')];
1919 note that the '?' char is reserved for help
1921 @return: one of the return values from the choices list; if input is
1922 not possible (i.e. not running with a tty, we return the last
1927 choices = [("y", True, "Perform the operation"),
1928 ("n", False, "Do not perform the operation")]
1929 if not choices or not isinstance(choices, list):
1930 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1931 for entry in choices:
1932 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1933 raise errors.ProgrammerError("Invalid choices element to AskUser")
1935 answer = choices[-1][1]
1937 for line in text.splitlines():
1938 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1939 text = "\n".join(new_text)
1941 f = file("/dev/tty", "a+")
1945 chars = [entry[0] for entry in choices]
1946 chars[-1] = "[%s]" % chars[-1]
1948 maps = dict([(entry[0], entry[1]) for entry in choices])
1952 f.write("/".join(chars))
1954 line = f.readline(2).strip().lower()
1959 for entry in choices:
1960 f.write(" %s - %s\n" % (entry[0], entry[2]))
1968 class JobSubmittedException(Exception):
1969 """Job was submitted, client should exit.
1971 This exception has one argument, the ID of the job that was
1972 submitted. The handler should print this ID.
1974 This is not an error, just a structured way to exit from clients.
1979 def SendJob(ops, cl=None):
1980 """Function to submit an opcode without waiting for the results.
1983 @param ops: list of opcodes
1984 @type cl: luxi.Client
1985 @param cl: the luxi client to use for communicating with the master;
1986 if None, a new client will be created
1992 job_id = cl.SubmitJob(ops)
1997 def GenericPollJob(job_id, cbs, report_cbs):
1998 """Generic job-polling function.
2000 @type job_id: number
2001 @param job_id: Job ID
2002 @type cbs: Instance of L{JobPollCbBase}
2003 @param cbs: Data callbacks
2004 @type report_cbs: Instance of L{JobPollReportCbBase}
2005 @param report_cbs: Reporting callbacks
2008 prev_job_info = None
2009 prev_logmsg_serial = None
2014 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2017 # job not found, go away!
2018 raise errors.JobLost("Job with id %s lost" % job_id)
2020 if result == constants.JOB_NOTCHANGED:
2021 report_cbs.ReportNotChanged(job_id, status)
2026 # Split result, a tuple of (field values, log entries)
2027 (job_info, log_entries) = result
2028 (status, ) = job_info
2031 for log_entry in log_entries:
2032 (serial, timestamp, log_type, message) = log_entry
2033 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2035 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2037 # TODO: Handle canceled and archived jobs
2038 elif status in (constants.JOB_STATUS_SUCCESS,
2039 constants.JOB_STATUS_ERROR,
2040 constants.JOB_STATUS_CANCELING,
2041 constants.JOB_STATUS_CANCELED):
2044 prev_job_info = job_info
2046 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2048 raise errors.JobLost("Job with id %s lost" % job_id)
2050 status, opstatus, result = jobs[0]
2052 if status == constants.JOB_STATUS_SUCCESS:
2055 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2056 raise errors.OpExecError("Job was canceled")
2059 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2060 if status == constants.OP_STATUS_SUCCESS:
2062 elif status == constants.OP_STATUS_ERROR:
2063 errors.MaybeRaise(msg)
2066 raise errors.OpExecError("partial failure (opcode %d): %s" %
2069 raise errors.OpExecError(str(msg))
2071 # default failure mode
2072 raise errors.OpExecError(result)
2075 class JobPollCbBase:
2076 """Base class for L{GenericPollJob} callbacks.
2080 """Initializes this class.
2084 def WaitForJobChangeOnce(self, job_id, fields,
2085 prev_job_info, prev_log_serial):
2086 """Waits for changes on a job.
2089 raise NotImplementedError()
2091 def QueryJobs(self, job_ids, fields):
2092 """Returns the selected fields for the selected job IDs.
2094 @type job_ids: list of numbers
2095 @param job_ids: Job IDs
2096 @type fields: list of strings
2097 @param fields: Fields
2100 raise NotImplementedError()
2103 class JobPollReportCbBase:
2104 """Base class for L{GenericPollJob} reporting callbacks.
2108 """Initializes this class.
2112 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2113 """Handles a log message.
2116 raise NotImplementedError()
2118 def ReportNotChanged(self, job_id, status):
2119 """Called for if a job hasn't changed in a while.
2121 @type job_id: number
2122 @param job_id: Job ID
2123 @type status: string or None
2124 @param status: Job status if available
2127 raise NotImplementedError()
2130 class _LuxiJobPollCb(JobPollCbBase):
2131 def __init__(self, cl):
2132 """Initializes this class.
2135 JobPollCbBase.__init__(self)
2138 def WaitForJobChangeOnce(self, job_id, fields,
2139 prev_job_info, prev_log_serial):
2140 """Waits for changes on a job.
2143 return self.cl.WaitForJobChangeOnce(job_id, fields,
2144 prev_job_info, prev_log_serial)
2146 def QueryJobs(self, job_ids, fields):
2147 """Returns the selected fields for the selected job IDs.
2150 return self.cl.QueryJobs(job_ids, fields)
2153 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2154 def __init__(self, feedback_fn):
2155 """Initializes this class.
2158 JobPollReportCbBase.__init__(self)
2160 self.feedback_fn = feedback_fn
2162 assert callable(feedback_fn)
2164 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2165 """Handles a log message.
2168 self.feedback_fn((timestamp, log_type, log_msg))
2170 def ReportNotChanged(self, job_id, status):
2171 """Called if a job hasn't changed in a while.
2177 class StdioJobPollReportCb(JobPollReportCbBase):
2179 """Initializes this class.
2182 JobPollReportCbBase.__init__(self)
2184 self.notified_queued = False
2185 self.notified_waitlock = False
2187 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2188 """Handles a log message.
2191 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2192 FormatLogMessage(log_type, log_msg))
2194 def ReportNotChanged(self, job_id, status):
2195 """Called if a job hasn't changed in a while.
2201 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2202 ToStderr("Job %s is waiting in queue", job_id)
2203 self.notified_queued = True
2205 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2206 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2207 self.notified_waitlock = True
2210 def FormatLogMessage(log_type, log_msg):
2211 """Formats a job message according to its type.
2214 if log_type != constants.ELOG_MESSAGE:
2215 log_msg = str(log_msg)
2217 return utils.SafeEncode(log_msg)
2220 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2221 """Function to poll for the result of a job.
2223 @type job_id: job identified
2224 @param job_id: the job to poll for results
2225 @type cl: luxi.Client
2226 @param cl: the luxi client to use for communicating with the master;
2227 if None, a new client will be created
2233 if reporter is None:
2235 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2237 reporter = StdioJobPollReportCb()
2239 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2241 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2244 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2245 """Legacy function to submit an opcode.
2247 This is just a simple wrapper over the construction of the processor
2248 instance. It should be extended to better handle feedback and
2249 interaction functions.
2255 SetGenericOpcodeOpts([op], opts)
2257 job_id = SendJob([op], cl=cl)
2259 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2262 return op_results[0]
2265 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2266 """Wrapper around SubmitOpCode or SendJob.
2268 This function will decide, based on the 'opts' parameter, whether to
2269 submit and wait for the result of the opcode (and return it), or
2270 whether to just send the job and print its identifier. It is used in
2271 order to simplify the implementation of the '--submit' option.
2273 It will also process the opcodes if we're sending the via SendJob
2274 (otherwise SubmitOpCode does it).
2277 if opts and opts.submit_only:
2279 SetGenericOpcodeOpts(job, opts)
2280 job_id = SendJob(job, cl=cl)
2281 raise JobSubmittedException(job_id)
2283 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2286 def _InitReasonTrail(op, opts):
2287 """Builds the first part of the reason trail
2289 Builds the initial part of the reason trail, adding the user provided reason
2290 (if it exists) and the name of the command starting the operation.
2292 @param op: the opcode the reason trail will be added to
2293 @param opts: the command line options selected by the user
2296 assert len(sys.argv) >= 2
2300 trail.append((constants.OPCODE_REASON_SRC_USER,
2304 binary = os.path.basename(sys.argv[0])
2305 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2306 command = sys.argv[1]
2307 trail.append((source, command, utils.EpochNano()))
2311 def SetGenericOpcodeOpts(opcode_list, options):
2312 """Processor for generic options.
2314 This function updates the given opcodes based on generic command
2315 line options (like debug, dry-run, etc.).
2317 @param opcode_list: list of opcodes
2318 @param options: command line options or None
2319 @return: None (in-place modification)
2324 for op in opcode_list:
2325 op.debug_level = options.debug
2326 if hasattr(options, "dry_run"):
2327 op.dry_run = options.dry_run
2328 if getattr(options, "priority", None) is not None:
2329 op.priority = options.priority
2330 _InitReasonTrail(op, options)
2333 def GetClient(query=False):
2334 """Connects to the a luxi socket and returns a client.
2336 @type query: boolean
2337 @param query: this signifies that the client will only be
2338 used for queries; if the build-time parameter
2339 enable-split-queries is enabled, then the client will be
2340 connected to the query socket instead of the masterd socket
2343 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2345 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2346 address = pathutils.MASTER_SOCKET
2347 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2348 address = pathutils.QUERY_SOCKET
2350 address = override_socket
2351 elif query and constants.ENABLE_SPLIT_QUERY:
2352 address = pathutils.QUERY_SOCKET
2355 # TODO: Cache object?
2357 client = luxi.Client(address=address)
2358 except luxi.NoMasterError:
2359 ss = ssconf.SimpleStore()
2361 # Try to read ssconf file
2364 except errors.ConfigurationError:
2365 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2366 " not part of a cluster",
2369 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2370 if master != myself:
2371 raise errors.OpPrereqError("This is not the master node, please connect"
2372 " to node '%s' and rerun the command" %
2373 master, errors.ECODE_INVAL)
2378 def FormatError(err):
2379 """Return a formatted error message for a given error.
2381 This function takes an exception instance and returns a tuple
2382 consisting of two values: first, the recommended exit code, and
2383 second, a string describing the error message (not
2384 newline-terminated).
2390 if isinstance(err, errors.ConfigurationError):
2391 txt = "Corrupt configuration file: %s" % msg
2393 obuf.write(txt + "\n")
2394 obuf.write("Aborting.")
2396 elif isinstance(err, errors.HooksAbort):
2397 obuf.write("Failure: hooks execution failed:\n")
2398 for node, script, out in err.args[0]:
2400 obuf.write(" node: %s, script: %s, output: %s\n" %
2401 (node, script, out))
2403 obuf.write(" node: %s, script: %s (no output)\n" %
2405 elif isinstance(err, errors.HooksFailure):
2406 obuf.write("Failure: hooks general failure: %s" % msg)
2407 elif isinstance(err, errors.ResolverError):
2408 this_host = netutils.Hostname.GetSysName()
2409 if err.args[0] == this_host:
2410 msg = "Failure: can't resolve my own hostname ('%s')"
2412 msg = "Failure: can't resolve hostname '%s'"
2413 obuf.write(msg % err.args[0])
2414 elif isinstance(err, errors.OpPrereqError):
2415 if len(err.args) == 2:
2416 obuf.write("Failure: prerequisites not met for this"
2417 " operation:\nerror type: %s, error details:\n%s" %
2418 (err.args[1], err.args[0]))
2420 obuf.write("Failure: prerequisites not met for this"
2421 " operation:\n%s" % msg)
2422 elif isinstance(err, errors.OpExecError):
2423 obuf.write("Failure: command execution error:\n%s" % msg)
2424 elif isinstance(err, errors.TagError):
2425 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2426 elif isinstance(err, errors.JobQueueDrainError):
2427 obuf.write("Failure: the job queue is marked for drain and doesn't"
2428 " accept new requests\n")
2429 elif isinstance(err, errors.JobQueueFull):
2430 obuf.write("Failure: the job queue is full and doesn't accept new"
2431 " job submissions until old jobs are archived\n")
2432 elif isinstance(err, errors.TypeEnforcementError):
2433 obuf.write("Parameter Error: %s" % msg)
2434 elif isinstance(err, errors.ParameterError):
2435 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2436 elif isinstance(err, luxi.NoMasterError):
2437 if err.args[0] == pathutils.MASTER_SOCKET:
2438 daemon = "the master daemon"
2439 elif err.args[0] == pathutils.QUERY_SOCKET:
2440 daemon = "the config daemon"
2442 daemon = "socket '%s'" % str(err.args[0])
2443 obuf.write("Cannot communicate with %s.\nIs the process running"
2444 " and listening for connections?" % daemon)
2445 elif isinstance(err, luxi.TimeoutError):
2446 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2447 " been submitted and will continue to run even if the call"
2448 " timed out. Useful commands in this situation are \"gnt-job"
2449 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2451 elif isinstance(err, luxi.PermissionError):
2452 obuf.write("It seems you don't have permissions to connect to the"
2453 " master daemon.\nPlease retry as a different user.")
2454 elif isinstance(err, luxi.ProtocolError):
2455 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2457 elif isinstance(err, errors.JobLost):
2458 obuf.write("Error checking job status: %s" % msg)
2459 elif isinstance(err, errors.QueryFilterParseError):
2460 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2461 obuf.write("\n".join(err.GetDetails()))
2462 elif isinstance(err, errors.GenericError):
2463 obuf.write("Unhandled Ganeti error: %s" % msg)
2464 elif isinstance(err, JobSubmittedException):
2465 obuf.write("JobID: %s\n" % err.args[0])
2468 obuf.write("Unhandled exception: %s" % msg)
2469 return retcode, obuf.getvalue().rstrip("\n")
2472 def GenericMain(commands, override=None, aliases=None,
2473 env_override=frozenset()):
2474 """Generic main function for all the gnt-* commands.
2476 @param commands: a dictionary with a special structure, see the design doc
2477 for command line handling.
2478 @param override: if not None, we expect a dictionary with keys that will
2479 override command line options; this can be used to pass
2480 options from the scripts to generic functions
2481 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2482 @param env_override: list of environment names which are allowed to submit
2483 default args for commands
2486 # save the program name and the entire command line for later logging
2488 binary = os.path.basename(sys.argv[0])
2490 binary = sys.argv[0]
2492 if len(sys.argv) >= 2:
2493 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2497 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2499 binary = "<unknown program>"
2500 cmdline = "<unknown>"
2506 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2508 except _ShowVersion:
2509 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2510 constants.RELEASE_VERSION)
2511 return constants.EXIT_SUCCESS
2512 except _ShowUsage, err:
2513 for line in _FormatUsage(binary, commands):
2517 return constants.EXIT_FAILURE
2519 return constants.EXIT_SUCCESS
2520 except errors.ParameterError, err:
2521 result, err_msg = FormatError(err)
2525 if func is None: # parse error
2528 if override is not None:
2529 for key, val in override.iteritems():
2530 setattr(options, key, val)
2532 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2533 stderr_logging=True)
2535 logging.info("Command line: %s", cmdline)
2538 result = func(options, args)
2539 except (errors.GenericError, luxi.ProtocolError,
2540 JobSubmittedException), err:
2541 result, err_msg = FormatError(err)
2542 logging.exception("Error during command processing")
2544 except KeyboardInterrupt:
2545 result = constants.EXIT_FAILURE
2546 ToStderr("Aborted. Note that if the operation created any jobs, they"
2547 " might have been submitted and"
2548 " will continue to run in the background.")
2549 except IOError, err:
2550 if err.errno == errno.EPIPE:
2551 # our terminal went away, we'll exit
2552 sys.exit(constants.EXIT_FAILURE)
2559 def ParseNicOption(optvalue):
2560 """Parses the value of the --net option(s).
2564 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2565 except (TypeError, ValueError), err:
2566 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2569 nics = [{}] * nic_max
2570 for nidx, ndict in optvalue:
2573 if not isinstance(ndict, dict):
2574 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2575 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2577 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2584 def GenericInstanceCreate(mode, opts, args):
2585 """Add an instance to the cluster via either creation or import.
2587 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2588 @param opts: the command line options selected by the user
2590 @param args: should contain only one element, the new instance name
2592 @return: the desired exit code
2597 (pnode, snode) = SplitNodeOption(opts.node)
2602 hypervisor, hvparams = opts.hypervisor
2605 nics = ParseNicOption(opts.nics)
2609 elif mode == constants.INSTANCE_CREATE:
2610 # default of one nic, all auto
2616 if opts.disk_template == constants.DT_DISKLESS:
2617 if opts.disks or opts.sd_size is not None:
2618 raise errors.OpPrereqError("Diskless instance but disk"
2619 " information passed", errors.ECODE_INVAL)
2622 if (not opts.disks and not opts.sd_size
2623 and mode == constants.INSTANCE_CREATE):
2624 raise errors.OpPrereqError("No disk information specified",
2626 if opts.disks and opts.sd_size is not None:
2627 raise errors.OpPrereqError("Please use either the '--disk' or"
2628 " '-s' option", errors.ECODE_INVAL)
2629 if opts.sd_size is not None:
2630 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2634 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2635 except ValueError, err:
2636 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2638 disks = [{}] * disk_max
2641 for didx, ddict in opts.disks:
2643 if not isinstance(ddict, dict):
2644 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2645 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2646 elif constants.IDISK_SIZE in ddict:
2647 if constants.IDISK_ADOPT in ddict:
2648 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2649 " (disk %d)" % didx, errors.ECODE_INVAL)
2651 ddict[constants.IDISK_SIZE] = \
2652 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2653 except ValueError, err:
2654 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2655 (didx, err), errors.ECODE_INVAL)
2656 elif constants.IDISK_ADOPT in ddict:
2657 if mode == constants.INSTANCE_IMPORT:
2658 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2659 " import", errors.ECODE_INVAL)
2660 ddict[constants.IDISK_SIZE] = 0
2662 raise errors.OpPrereqError("Missing size or adoption source for"
2663 " disk %d" % didx, errors.ECODE_INVAL)
2666 if opts.tags is not None:
2667 tags = opts.tags.split(",")
2671 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2672 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2674 if mode == constants.INSTANCE_CREATE:
2677 force_variant = opts.force_variant
2680 no_install = opts.no_install
2681 identify_defaults = False
2682 elif mode == constants.INSTANCE_IMPORT:
2685 force_variant = False
2686 src_node = opts.src_node
2687 src_path = opts.src_dir
2689 identify_defaults = opts.identify_defaults
2691 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2693 op = opcodes.OpInstanceCreate(instance_name=instance,
2695 disk_template=opts.disk_template,
2697 conflicts_check=opts.conflicts_check,
2698 pnode=pnode, snode=snode,
2699 ip_check=opts.ip_check,
2700 name_check=opts.name_check,
2701 wait_for_sync=opts.wait_for_sync,
2702 file_storage_dir=opts.file_storage_dir,
2703 file_driver=opts.file_driver,
2704 iallocator=opts.iallocator,
2705 hypervisor=hypervisor,
2707 beparams=opts.beparams,
2708 osparams=opts.osparams,
2712 force_variant=force_variant,
2716 no_install=no_install,
2717 identify_defaults=identify_defaults,
2718 ignore_ipolicy=opts.ignore_ipolicy)
2720 SubmitOrSend(op, opts)
2724 class _RunWhileClusterStoppedHelper:
2725 """Helper class for L{RunWhileClusterStopped} to simplify state management
2728 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2729 """Initializes this class.
2731 @type feedback_fn: callable
2732 @param feedback_fn: Feedback function
2733 @type cluster_name: string
2734 @param cluster_name: Cluster name
2735 @type master_node: string
2736 @param master_node Master node name
2737 @type online_nodes: list
2738 @param online_nodes: List of names of online nodes
2741 self.feedback_fn = feedback_fn
2742 self.cluster_name = cluster_name
2743 self.master_node = master_node
2744 self.online_nodes = online_nodes
2746 self.ssh = ssh.SshRunner(self.cluster_name)
2748 self.nonmaster_nodes = [name for name in online_nodes
2749 if name != master_node]
2751 assert self.master_node not in self.nonmaster_nodes
2753 def _RunCmd(self, node_name, cmd):
2754 """Runs a command on the local or a remote machine.
2756 @type node_name: string
2757 @param node_name: Machine name
2762 if node_name is None or node_name == self.master_node:
2763 # No need to use SSH
2764 result = utils.RunCmd(cmd)
2766 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2767 utils.ShellQuoteArgs(cmd))
2770 errmsg = ["Failed to run command %s" % result.cmd]
2772 errmsg.append("on node %s" % node_name)
2773 errmsg.append(": exitcode %s and error %s" %
2774 (result.exit_code, result.output))
2775 raise errors.OpExecError(" ".join(errmsg))
2777 def Call(self, fn, *args):
2778 """Call function while all daemons are stopped.
2781 @param fn: Function to be called
2784 # Pause watcher by acquiring an exclusive lock on watcher state file
2785 self.feedback_fn("Blocking watcher")
2786 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2788 # TODO: Currently, this just blocks. There's no timeout.
2789 # TODO: Should it be a shared lock?
2790 watcher_block.Exclusive(blocking=True)
2792 # Stop master daemons, so that no new jobs can come in and all running
2794 self.feedback_fn("Stopping master daemons")
2795 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2797 # Stop daemons on all nodes
2798 for node_name in self.online_nodes:
2799 self.feedback_fn("Stopping daemons on %s" % node_name)
2800 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2802 # All daemons are shut down now
2804 return fn(self, *args)
2805 except Exception, err:
2806 _, errmsg = FormatError(err)
2807 logging.exception("Caught exception")
2808 self.feedback_fn(errmsg)
2811 # Start cluster again, master node last
2812 for node_name in self.nonmaster_nodes + [self.master_node]:
2813 self.feedback_fn("Starting daemons on %s" % node_name)
2814 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2817 watcher_block.Close()
2820 def RunWhileClusterStopped(feedback_fn, fn, *args):
2821 """Calls a function while all cluster daemons are stopped.
2823 @type feedback_fn: callable
2824 @param feedback_fn: Feedback function
2826 @param fn: Function to be called when daemons are stopped
2829 feedback_fn("Gathering cluster information")
2831 # This ensures we're running on the master daemon
2834 (cluster_name, master_node) = \
2835 cl.QueryConfigValues(["cluster_name", "master_node"])
2837 online_nodes = GetOnlineNodes([], cl=cl)
2839 # Don't keep a reference to the client. The master daemon will go away.
2842 assert master_node in online_nodes
2844 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2845 online_nodes).Call(fn, *args)
2848 def GenerateTable(headers, fields, separator, data,
2849 numfields=None, unitfields=None,
2851 """Prints a table with headers and different fields.
2854 @param headers: dictionary mapping field names to headers for
2857 @param fields: the field names corresponding to each row in
2859 @param separator: the separator to be used; if this is None,
2860 the default 'smart' algorithm is used which computes optimal
2861 field width, otherwise just the separator is used between
2864 @param data: a list of lists, each sublist being one row to be output
2865 @type numfields: list
2866 @param numfields: a list with the fields that hold numeric
2867 values and thus should be right-aligned
2868 @type unitfields: list
2869 @param unitfields: a list with the fields that hold numeric
2870 values that should be formatted with the units field
2871 @type units: string or None
2872 @param units: the units we should use for formatting, or None for
2873 automatic choice (human-readable for non-separator usage, otherwise
2874 megabytes); this is a one-letter string
2883 if numfields is None:
2885 if unitfields is None:
2888 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2889 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2892 for field in fields:
2893 if headers and field not in headers:
2894 # TODO: handle better unknown fields (either revert to old
2895 # style of raising exception, or deal more intelligently with
2897 headers[field] = field
2898 if separator is not None:
2899 format_fields.append("%s")
2900 elif numfields.Matches(field):
2901 format_fields.append("%*s")
2903 format_fields.append("%-*s")
2905 if separator is None:
2906 mlens = [0 for name in fields]
2907 format_str = " ".join(format_fields)
2909 format_str = separator.replace("%", "%%").join(format_fields)
2914 for idx, val in enumerate(row):
2915 if unitfields.Matches(fields[idx]):
2918 except (TypeError, ValueError):
2921 val = row[idx] = utils.FormatUnit(val, units)
2922 val = row[idx] = str(val)
2923 if separator is None:
2924 mlens[idx] = max(mlens[idx], len(val))
2929 for idx, name in enumerate(fields):
2931 if separator is None:
2932 mlens[idx] = max(mlens[idx], len(hdr))
2933 args.append(mlens[idx])
2935 result.append(format_str % tuple(args))
2937 if separator is None:
2938 assert len(mlens) == len(fields)
2940 if fields and not numfields.Matches(fields[-1]):
2946 line = ["-" for _ in fields]
2947 for idx in range(len(fields)):
2948 if separator is None:
2949 args.append(mlens[idx])
2950 args.append(line[idx])
2951 result.append(format_str % tuple(args))
2956 def _FormatBool(value):
2957 """Formats a boolean value as a string.
2965 #: Default formatting for query results; (callback, align right)
2966 _DEFAULT_FORMAT_QUERY = {
2967 constants.QFT_TEXT: (str, False),
2968 constants.QFT_BOOL: (_FormatBool, False),
2969 constants.QFT_NUMBER: (str, True),
2970 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2971 constants.QFT_OTHER: (str, False),
2972 constants.QFT_UNKNOWN: (str, False),
2976 def _GetColumnFormatter(fdef, override, unit):
2977 """Returns formatting function for a field.
2979 @type fdef: L{objects.QueryFieldDefinition}
2980 @type override: dict
2981 @param override: Dictionary for overriding field formatting functions,
2982 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2984 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2985 @rtype: tuple; (callable, bool)
2986 @return: Returns the function to format a value (takes one parameter) and a
2987 boolean for aligning the value on the right-hand side
2990 fmt = override.get(fdef.name, None)
2994 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2996 if fdef.kind == constants.QFT_UNIT:
2997 # Can't keep this information in the static dictionary
2998 return (lambda value: utils.FormatUnit(value, unit), True)
3000 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3004 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3007 class _QueryColumnFormatter:
3008 """Callable class for formatting fields of a query.
3011 def __init__(self, fn, status_fn, verbose):
3012 """Initializes this class.
3015 @param fn: Formatting function
3016 @type status_fn: callable
3017 @param status_fn: Function to report fields' status
3018 @type verbose: boolean
3019 @param verbose: whether to use verbose field descriptions or not
3023 self._status_fn = status_fn
3024 self._verbose = verbose
3026 def __call__(self, data):
3027 """Returns a field's string representation.
3030 (status, value) = data
3033 self._status_fn(status)
3035 if status == constants.RS_NORMAL:
3036 return self._fn(value)
3038 assert value is None, \
3039 "Found value %r for abnormal status %s" % (value, status)
3041 return FormatResultError(status, self._verbose)
3044 def FormatResultError(status, verbose):
3045 """Formats result status other than L{constants.RS_NORMAL}.
3047 @param status: The result status
3048 @type verbose: boolean
3049 @param verbose: Whether to return the verbose text
3050 @return: Text of result status
3053 assert status != constants.RS_NORMAL, \
3054 "FormatResultError called with status equal to constants.RS_NORMAL"
3056 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3058 raise NotImplementedError("Unknown status %s" % status)
3065 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3066 header=False, verbose=False):
3067 """Formats data in L{objects.QueryResponse}.
3069 @type result: L{objects.QueryResponse}
3070 @param result: result of query operation
3072 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3073 see L{utils.text.FormatUnit}
3074 @type format_override: dict
3075 @param format_override: Dictionary for overriding field formatting functions,
3076 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3077 @type separator: string or None
3078 @param separator: String used to separate fields
3080 @param header: Whether to output header row
3081 @type verbose: boolean
3082 @param verbose: whether to use verbose field descriptions or not
3091 if format_override is None:
3092 format_override = {}
3094 stats = dict.fromkeys(constants.RS_ALL, 0)
3096 def _RecordStatus(status):
3101 for fdef in result.fields:
3102 assert fdef.title and fdef.name
3103 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3104 columns.append(TableColumn(fdef.title,
3105 _QueryColumnFormatter(fn, _RecordStatus,
3109 table = FormatTable(result.data, columns, header, separator)
3111 # Collect statistics
3112 assert len(stats) == len(constants.RS_ALL)
3113 assert compat.all(count >= 0 for count in stats.values())
3115 # Determine overall status. If there was no data, unknown fields must be
3116 # detected via the field definitions.
3117 if (stats[constants.RS_UNKNOWN] or
3118 (not result.data and _GetUnknownFields(result.fields))):
3120 elif compat.any(count > 0 for key, count in stats.items()
3121 if key != constants.RS_NORMAL):
3122 status = QR_INCOMPLETE
3126 return (status, table)
3129 def _GetUnknownFields(fdefs):
3130 """Returns list of unknown fields included in C{fdefs}.
3132 @type fdefs: list of L{objects.QueryFieldDefinition}
3135 return [fdef for fdef in fdefs
3136 if fdef.kind == constants.QFT_UNKNOWN]
3139 def _WarnUnknownFields(fdefs):
3140 """Prints a warning to stderr if a query included unknown fields.
3142 @type fdefs: list of L{objects.QueryFieldDefinition}
3145 unknown = _GetUnknownFields(fdefs)
3147 ToStderr("Warning: Queried for unknown fields %s",
3148 utils.CommaJoin(fdef.name for fdef in unknown))
3154 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3155 format_override=None, verbose=False, force_filter=False,
3156 namefield=None, qfilter=None, isnumeric=False):
3157 """Generic implementation for listing all items of a resource.
3159 @param resource: One of L{constants.QR_VIA_LUXI}
3160 @type fields: list of strings
3161 @param fields: List of fields to query for
3162 @type names: list of strings
3163 @param names: Names of items to query for
3164 @type unit: string or None
3165 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3166 None for automatic choice (human-readable for non-separator usage,
3167 otherwise megabytes); this is a one-letter string
3168 @type separator: string or None
3169 @param separator: String used to separate fields
3171 @param header: Whether to show header row
3172 @type force_filter: bool
3173 @param force_filter: Whether to always treat names as filter
3174 @type format_override: dict
3175 @param format_override: Dictionary for overriding field formatting functions,
3176 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3177 @type verbose: boolean
3178 @param verbose: whether to use verbose field descriptions or not
3179 @type namefield: string
3180 @param namefield: Name of field to use for simple filters (see
3181 L{qlang.MakeFilter} for details)
3182 @type qfilter: list or None
3183 @param qfilter: Query filter (in addition to names)
3184 @param isnumeric: bool
3185 @param isnumeric: Whether the namefield's type is numeric, and therefore
3186 any simple filters built by namefield should use integer values to
3193 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3194 isnumeric=isnumeric)
3197 qfilter = namefilter
3198 elif namefilter is not None:
3199 qfilter = [qlang.OP_AND, namefilter, qfilter]
3204 response = cl.Query(resource, fields, qfilter)
3206 found_unknown = _WarnUnknownFields(response.fields)
3208 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3210 format_override=format_override,
3216 assert ((found_unknown and status == QR_UNKNOWN) or
3217 (not found_unknown and status != QR_UNKNOWN))
3219 if status == QR_UNKNOWN:
3220 return constants.EXIT_UNKNOWN_FIELD
3222 # TODO: Should the list command fail if not all data could be collected?
3223 return constants.EXIT_SUCCESS
3226 def _FieldDescValues(fdef):
3227 """Helper function for L{GenericListFields} to get query field description.
3229 @type fdef: L{objects.QueryFieldDefinition}
3235 _QFT_NAMES.get(fdef.kind, fdef.kind),
3241 def GenericListFields(resource, fields, separator, header, cl=None):
3242 """Generic implementation for listing fields for a resource.
3244 @param resource: One of L{constants.QR_VIA_LUXI}
3245 @type fields: list of strings
3246 @param fields: List of fields to query for
3247 @type separator: string or None
3248 @param separator: String used to separate fields
3250 @param header: Whether to show header row
3259 response = cl.QueryFields(resource, fields)
3261 found_unknown = _WarnUnknownFields(response.fields)
3264 TableColumn("Name", str, False),
3265 TableColumn("Type", str, False),
3266 TableColumn("Title", str, False),
3267 TableColumn("Description", str, False),
3270 rows = map(_FieldDescValues, response.fields)
3272 for line in FormatTable(rows, columns, header, separator):
3276 return constants.EXIT_UNKNOWN_FIELD
3278 return constants.EXIT_SUCCESS
3282 """Describes a column for L{FormatTable}.
3285 def __init__(self, title, fn, align_right):
3286 """Initializes this class.
3289 @param title: Column title
3291 @param fn: Formatting function
3292 @type align_right: bool
3293 @param align_right: Whether to align values on the right-hand side
3298 self.align_right = align_right
3301 def _GetColFormatString(width, align_right):
3302 """Returns the format string for a field.
3310 return "%%%s%ss" % (sign, width)
3313 def FormatTable(rows, columns, header, separator):
3314 """Formats data as a table.
3316 @type rows: list of lists
3317 @param rows: Row data, one list per row
3318 @type columns: list of L{TableColumn}
3319 @param columns: Column descriptions
3321 @param header: Whether to show header row
3322 @type separator: string or None
3323 @param separator: String used to separate columns
3327 data = [[col.title for col in columns]]
3328 colwidth = [len(col.title) for col in columns]
3331 colwidth = [0 for _ in columns]
3335 assert len(row) == len(columns)
3337 formatted = [col.format(value) for value, col in zip(row, columns)]
3339 if separator is None:
3340 # Update column widths
3341 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3342 # Modifying a list's items while iterating is fine
3343 colwidth[idx] = max(oldwidth, len(value))
3345 data.append(formatted)
3347 if separator is not None:
3348 # Return early if a separator is used
3349 return [separator.join(row) for row in data]
3351 if columns and not columns[-1].align_right:
3352 # Avoid unnecessary spaces at end of line
3355 # Build format string
3356 fmt = " ".join([_GetColFormatString(width, col.align_right)
3357 for col, width in zip(columns, colwidth)])
3359 return [fmt % tuple(row) for row in data]
3362 def FormatTimestamp(ts):
3363 """Formats a given timestamp.
3366 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3369 @return: a string with the formatted timestamp
3372 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3376 return utils.FormatTime(sec, usecs=usecs)
3379 def ParseTimespec(value):
3380 """Parse a time specification.
3382 The following suffixed will be recognized:
3390 Without any suffix, the value will be taken to be in seconds.
3395 raise errors.OpPrereqError("Empty time specification passed",
3404 if value[-1] not in suffix_map:
3407 except (TypeError, ValueError):
3408 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3411 multiplier = suffix_map[value[-1]]
3413 if not value: # no data left after stripping the suffix
3414 raise errors.OpPrereqError("Invalid time specification (only"
3415 " suffix passed)", errors.ECODE_INVAL)
3417 value = int(value) * multiplier
3418 except (TypeError, ValueError):
3419 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3424 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3425 filter_master=False, nodegroup=None):
3426 """Returns the names of online nodes.
3428 This function will also log a warning on stderr with the names of
3431 @param nodes: if not empty, use only this subset of nodes (minus the
3433 @param cl: if not None, luxi client to use
3434 @type nowarn: boolean
3435 @param nowarn: by default, this function will output a note with the
3436 offline nodes that are skipped; if this parameter is True the
3437 note is not displayed
3438 @type secondary_ips: boolean
3439 @param secondary_ips: if True, return the secondary IPs instead of the
3440 names, useful for doing network traffic over the replication interface
3442 @type filter_master: boolean
3443 @param filter_master: if True, do not return the master node in the list
3444 (useful in coordination with secondary_ips where we cannot check our
3445 node name against the list)
3446 @type nodegroup: string
3447 @param nodegroup: If set, only return nodes in this node group
3456 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3458 if nodegroup is not None:
3459 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3460 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3463 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3466 if len(qfilter) > 1:
3467 final_filter = [qlang.OP_AND] + qfilter
3469 assert len(qfilter) == 1
3470 final_filter = qfilter[0]
3474 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3476 def _IsOffline(row):
3477 (_, (_, offline), _) = row
3481 ((_, name), _, _) = row
3485 (_, _, (_, sip)) = row
3488 (offline, online) = compat.partition(result.data, _IsOffline)
3490 if offline and not nowarn:
3491 ToStderr("Note: skipping offline node(s): %s" %
3492 utils.CommaJoin(map(_GetName, offline)))
3499 return map(fn, online)
3502 def _ToStream(stream, txt, *args):
3503 """Write a message to a stream, bypassing the logging system
3505 @type stream: file object
3506 @param stream: the file to which we should write
3508 @param txt: the message
3514 stream.write(txt % args)
3519 except IOError, err:
3520 if err.errno == errno.EPIPE:
3521 # our terminal went away, we'll exit
3522 sys.exit(constants.EXIT_FAILURE)
3527 def ToStdout(txt, *args):
3528 """Write a message to stdout only, bypassing the logging system
3530 This is just a wrapper over _ToStream.
3533 @param txt: the message
3536 _ToStream(sys.stdout, txt, *args)
3539 def ToStderr(txt, *args):
3540 """Write a message to stderr only, bypassing the logging system
3542 This is just a wrapper over _ToStream.
3545 @param txt: the message
3548 _ToStream(sys.stderr, txt, *args)
3551 class JobExecutor(object):
3552 """Class which manages the submission and execution of multiple jobs.
3554 Note that instances of this class should not be reused between
3558 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3563 self.verbose = verbose
3566 self.feedback_fn = feedback_fn
3567 self._counter = itertools.count()
3570 def _IfName(name, fmt):
3571 """Helper function for formatting name.
3579 def QueueJob(self, name, *ops):
3580 """Record a job for later submit.
3583 @param name: a description of the job, will be used in WaitJobSet
3586 SetGenericOpcodeOpts(ops, self.opts)
3587 self.queue.append((self._counter.next(), name, ops))
3589 def AddJobId(self, name, status, job_id):
3590 """Adds a job ID to the internal queue.
3593 self.jobs.append((self._counter.next(), status, job_id, name))
3595 def SubmitPending(self, each=False):
3596 """Submit all pending jobs.
3601 for (_, _, ops) in self.queue:
3602 # SubmitJob will remove the success status, but raise an exception if
3603 # the submission fails, so we'll notice that anyway.
3604 results.append([True, self.cl.SubmitJob(ops)[0]])
3606 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3607 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3608 self.jobs.append((idx, status, data, name))
3610 def _ChooseJob(self):
3611 """Choose a non-waiting/queued job to poll next.
3614 assert self.jobs, "_ChooseJob called with empty job list"
3616 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3620 for job_data, status in zip(self.jobs, result):
3621 if (isinstance(status, list) and status and
3622 status[0] in (constants.JOB_STATUS_QUEUED,
3623 constants.JOB_STATUS_WAITING,
3624 constants.JOB_STATUS_CANCELING)):
3625 # job is still present and waiting
3627 # good candidate found (either running job or lost job)
3628 self.jobs.remove(job_data)
3632 return self.jobs.pop(0)
3634 def GetResults(self):
3635 """Wait for and return the results of all jobs.
3638 @return: list of tuples (success, job results), in the same order
3639 as the submitted jobs; if a job has failed, instead of the result
3640 there will be the error message
3644 self.SubmitPending()
3647 ok_jobs = [row[2] for row in self.jobs if row[1]]
3649 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3651 # first, remove any non-submitted jobs
3652 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3653 for idx, _, jid, name in failures:
3654 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3655 results.append((idx, False, jid))
3658 (idx, _, jid, name) = self._ChooseJob()
3659 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3661 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3663 except errors.JobLost, err:
3664 _, job_result = FormatError(err)
3665 ToStderr("Job %s%s has been archived, cannot check its result",
3666 jid, self._IfName(name, " for %s"))
3668 except (errors.GenericError, luxi.ProtocolError), err:
3669 _, job_result = FormatError(err)
3671 # the error message will always be shown, verbose or not
3672 ToStderr("Job %s%s has failed: %s",
3673 jid, self._IfName(name, " for %s"), job_result)
3675 results.append((idx, success, job_result))
3677 # sort based on the index, then drop it
3679 results = [i[1:] for i in results]
3683 def WaitOrShow(self, wait):
3684 """Wait for job results or only print the job IDs.
3687 @param wait: whether to wait or not
3691 return self.GetResults()
3694 self.SubmitPending()
3695 for _, status, result, name in self.jobs:
3697 ToStdout("%s: %s", result, name)
3699 ToStderr("Failure for %s: %s", name, result)
3700 return [row[1:3] for row in self.jobs]
3703 def FormatParamsDictInfo(param_dict, actual):
3704 """Formats a parameter dictionary.
3706 @type param_dict: dict
3707 @param param_dict: the own parameters
3709 @param actual: the current parameter set (including defaults)
3711 @return: dictionary where the value of each parameter is either a fully
3712 formatted string or a dictionary containing formatted strings
3716 for (key, data) in actual.items():
3717 if isinstance(data, dict) and data:
3718 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3720 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3724 def _FormatListInfoDefault(data, def_data):
3725 if data is not None:
3726 ret = utils.CommaJoin(data)
3728 ret = "default (%s)" % utils.CommaJoin(def_data)
3732 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3733 """Formats an instance policy.
3735 @type custom_ipolicy: dict
3736 @param custom_ipolicy: own policy
3737 @type eff_ipolicy: dict
3738 @param eff_ipolicy: effective policy (including defaults); ignored for
3740 @type iscluster: bool
3741 @param iscluster: the policy is at cluster level
3742 @rtype: list of pairs
3743 @return: formatted data, suitable for L{PrintGenericInfo}
3747 eff_ipolicy = custom_ipolicy
3750 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3752 for (k, minmax) in enumerate(custom_minmax):
3754 ("%s/%s" % (key, k),
3755 FormatParamsDictInfo(minmax[key], minmax[key]))
3756 for key in constants.ISPECS_MINMAX_KEYS
3759 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3761 ("%s/%s" % (key, k),
3762 FormatParamsDictInfo({}, minmax[key]))
3763 for key in constants.ISPECS_MINMAX_KEYS
3765 ret = [("bounds specs", minmax_out)]
3768 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3770 (constants.ISPECS_STD,
3771 FormatParamsDictInfo(stdspecs, stdspecs))
3775 ("allowed disk templates",
3776 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3777 eff_ipolicy[constants.IPOLICY_DTS]))
3780 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3781 for key in constants.IPOLICY_PARAMETERS
3786 def _PrintSpecsParameters(buf, specs):
3787 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3788 buf.write(",".join(values))
3791 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3792 """Print the command option used to generate the given instance policy.
3794 Currently only the parts dealing with specs are supported.
3797 @param buf: stream to write into
3799 @param ipolicy: instance policy
3801 @param isgroup: whether the policy is at group level
3805 stdspecs = ipolicy.get("std")
3807 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3808 _PrintSpecsParameters(buf, stdspecs)
3809 minmaxes = ipolicy.get("minmax", [])
3811 for minmax in minmaxes:
3812 minspecs = minmax.get("min")
3813 maxspecs = minmax.get("max")
3814 if minspecs and maxspecs:
3816 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3821 _PrintSpecsParameters(buf, minspecs)
3823 _PrintSpecsParameters(buf, maxspecs)
3826 def ConfirmOperation(names, list_type, text, extra=""):
3827 """Ask the user to confirm an operation on a list of list_type.
3829 This function is used to request confirmation for doing an operation
3830 on a given list of list_type.
3833 @param names: the list of names that we display when
3834 we ask for confirmation
3835 @type list_type: str
3836 @param list_type: Human readable name for elements in the list (e.g. nodes)
3838 @param text: the operation that the user should confirm
3840 @return: True or False depending on user's confirmation.
3844 msg = ("The %s will operate on %d %s.\n%s"
3845 "Do you want to continue?" % (text, count, list_type, extra))
3846 affected = (("\nAffected %s:\n" % list_type) +
3847 "\n".join([" %s" % name for name in names]))
3849 choices = [("y", True, "Yes, execute the %s" % text),
3850 ("n", False, "No, abort the %s" % text)]
3853 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3856 question = msg + affected
3858 choice = AskUser(question, choices)
3861 choice = AskUser(msg + affected, choices)
3865 def _MaybeParseUnit(elements):
3866 """Parses and returns an array of potential values with units.
3870 for k, v in elements.items():
3871 if v == constants.VALUE_DEFAULT:
3874 parsed[k] = utils.ParseUnit(v)
3878 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3879 ispecs_disk_count, ispecs_disk_size,
3880 ispecs_nic_count, group_ipolicy, fill_all):
3883 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3884 if ispecs_disk_size:
3885 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3886 except (TypeError, ValueError, errors.UnitParseError), err:
3887 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3889 (ispecs_disk_size, ispecs_mem_size, err),
3892 # prepare ipolicy dict
3893 ispecs_transposed = {
3894 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3895 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3896 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3897 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3898 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3901 # first, check that the values given are correct
3903 forced_type = TISPECS_GROUP_TYPES
3905 forced_type = TISPECS_CLUSTER_TYPES
3906 for specs in ispecs_transposed.values():
3907 assert type(specs) is dict
3908 utils.ForceDictType(specs, forced_type)
3912 constants.ISPECS_MIN: {},
3913 constants.ISPECS_MAX: {},
3914 constants.ISPECS_STD: {},
3916 for (name, specs) in ispecs_transposed.iteritems():
3917 assert name in constants.ISPECS_PARAMETERS
3918 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3919 assert key in ispecs
3920 ispecs[key][name] = val
3922 for key in constants.ISPECS_MINMAX_KEYS:
3925 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3927 minmax_out[key] = ispecs[key]
3928 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3930 ipolicy[constants.ISPECS_STD] = \
3931 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3932 ispecs[constants.ISPECS_STD])
3934 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3937 def _ParseSpecUnit(spec, keyname):
3939 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3942 ret[k] = utils.ParseUnit(ret[k])
3943 except (TypeError, ValueError, errors.UnitParseError), err:
3944 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3945 " specs: %s" % (k, ret[k], keyname, err)),
3950 def _ParseISpec(spec, keyname, required):
3951 ret = _ParseSpecUnit(spec, keyname)
3952 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3953 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3954 if required and missing:
3955 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3956 (keyname, utils.CommaJoin(missing)),
3961 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3963 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3964 len(minmax_ispecs[0]) == 1):
3965 for (key, spec) in minmax_ispecs[0].items():
3966 # This loop is executed exactly once
3967 if key in allowed_values and not spec:
3972 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3973 group_ipolicy, allowed_values):
3974 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3975 if found_allowed is not None:
3976 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3977 elif minmax_ispecs is not None:
3979 for mmpair in minmax_ispecs:
3981 for (key, spec) in mmpair.items():
3982 if key not in constants.ISPECS_MINMAX_KEYS:
3983 msg = "Invalid key in bounds instance specifications: %s" % key
3984 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3985 mmpair_out[key] = _ParseISpec(spec, key, True)
3986 minmax_out.append(mmpair_out)
3987 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3988 if std_ispecs is not None:
3989 assert not group_ipolicy # This is not an option for gnt-group
3990 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3993 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3994 ispecs_cpu_count=None,
3995 ispecs_disk_count=None,
3996 ispecs_disk_size=None,
3997 ispecs_nic_count=None,
4000 ipolicy_disk_templates=None,
4001 ipolicy_vcpu_ratio=None,
4002 ipolicy_spindle_ratio=None,
4003 group_ipolicy=False,
4004 allowed_values=None,
4006 """Creation of instance policy based on command line options.
4008 @param fill_all: whether for cluster policies we should ensure that
4009 all values are filled
4012 assert not (fill_all and allowed_values)
4014 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4015 ispecs_disk_size or ispecs_nic_count)
4016 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4017 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4018 " together with any --ipolicy-xxx-specs option",
4021 ipolicy_out = objects.MakeEmptyIPolicy()
4024 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4025 ispecs_disk_count, ispecs_disk_size,
4026 ispecs_nic_count, group_ipolicy, fill_all)
4027 elif (minmax_ispecs is not None or std_ispecs is not None):
4028 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4029 group_ipolicy, allowed_values)
4031 if ipolicy_disk_templates is not None:
4032 if allowed_values and ipolicy_disk_templates in allowed_values:
4033 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4035 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4036 if ipolicy_vcpu_ratio is not None:
4037 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4038 if ipolicy_spindle_ratio is not None:
4039 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4041 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4043 if not group_ipolicy and fill_all:
4044 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4049 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4050 """Formatting core of L{PrintGenericInfo}.
4052 @param buf: (string) stream to accumulate the result into
4053 @param data: data to format
4055 @param level: depth in the data hierarchy, used for indenting
4056 @type afterkey: bool
4057 @param afterkey: True when we are in the middle of a line after a key (used
4058 to properly add newlines or indentation)
4062 if isinstance(data, dict):
4071 for key in sorted(data):
4073 buf.write(baseind * level)
4078 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4079 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4080 # list of tuples (an ordered dictionary)
4086 for (key, val) in data:
4088 buf.write(baseind * level)
4093 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4094 elif isinstance(data, list):
4105 buf.write(baseind * level)
4109 buf.write(baseind[1:])
4110 _SerializeGenericInfo(buf, item, level + 1)
4112 # This branch should be only taken for strings, but it's practically
4113 # impossible to guarantee that no other types are produced somewhere
4114 buf.write(str(data))
4118 def PrintGenericInfo(data):
4119 """Print information formatted according to the hierarchy.
4121 The output is a valid YAML string.
4123 @param data: the data to print. It's a hierarchical structure whose elements
4125 - dictionaries, where keys are strings and values are of any of the
4127 - lists of pairs (key, value), where key is a string and value is of
4128 any of the types listed here; it's a way to encode ordered
4130 - lists of any of the types listed here
4135 _SerializeGenericInfo(buf, data, 0)
4136 ToStdout(buf.getvalue().rstrip("\n"))