4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
122 "NEW_CLUSTER_CERT_OPT",
123 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124 "NEW_CONFD_HMAC_KEY_OPT",
128 "NEW_SPICE_CERT_OPT",
130 "NOCONFLICTSCHECK_OPT",
131 "NODE_FORCE_JOIN_OPT",
133 "NODE_PLACEMENT_OPT",
137 "NODRBD_STORAGE_OPT",
143 "NOMODIFY_ETCHOSTS_OPT",
144 "NOMODIFY_SSH_SETUP_OPT",
148 "NORUNTIME_CHGS_OPT",
151 "NOSSH_KEYCHECK_OPT",
165 "PREALLOC_WIPE_DISKS_OPT",
166 "PRIMARY_IP_VERSION_OPT",
174 "REMOVE_INSTANCE_OPT",
175 "REMOVE_RESERVED_IPS_OPT",
181 "SECONDARY_ONLY_OPT",
186 "SHUTDOWN_TIMEOUT_OPT",
188 "SPECS_CPU_COUNT_OPT",
189 "SPECS_DISK_COUNT_OPT",
190 "SPECS_DISK_SIZE_OPT",
191 "SPECS_MEM_SIZE_OPT",
192 "SPECS_NIC_COUNT_OPT",
194 "IPOLICY_STD_SPECS_OPT",
195 "IPOLICY_DISK_TEMPLATES",
196 "IPOLICY_VCPU_RATIO",
203 "STARTUP_PAUSED_OPT",
212 "USE_EXTERNAL_MIP_SCRIPT",
220 "IGNORE_IPOLICY_OPT",
221 "INSTANCE_POLICY_OPTS",
222 # Generic functions for CLI programs
224 "CreateIPolicyFromOpts",
226 "GenericInstanceCreate",
232 "JobSubmittedException",
234 "RunWhileClusterStopped",
238 # Formatting functions
239 "ToStderr", "ToStdout",
242 "FormatParamsDictInfo",
244 "PrintIPolicyCommand",
254 # command line options support infrastructure
255 "ARGS_MANY_INSTANCES",
258 "ARGS_MANY_NETWORKS",
278 "OPT_COMPL_INST_ADD_NODES",
279 "OPT_COMPL_MANY_NODES",
280 "OPT_COMPL_ONE_IALLOCATOR",
281 "OPT_COMPL_ONE_INSTANCE",
282 "OPT_COMPL_ONE_NODE",
283 "OPT_COMPL_ONE_NODEGROUP",
284 "OPT_COMPL_ONE_NETWORK",
286 "OPT_COMPL_ONE_EXTSTORAGE",
291 "COMMON_CREATE_OPTS",
297 #: Priorities (sorted)
299 ("low", constants.OP_PRIO_LOW),
300 ("normal", constants.OP_PRIO_NORMAL),
301 ("high", constants.OP_PRIO_HIGH),
304 #: Priority dictionary for easier lookup
305 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
306 # we migrate to Python 2.6
307 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309 # Query result status for clients
312 QR_INCOMPLETE) = range(3)
314 #: Maximum batch size for ChooseJob
318 # constants used to create InstancePolicy dictionary
319 TISPECS_GROUP_TYPES = {
320 constants.ISPECS_MIN: constants.VTYPE_INT,
321 constants.ISPECS_MAX: constants.VTYPE_INT,
324 TISPECS_CLUSTER_TYPES = {
325 constants.ISPECS_MIN: constants.VTYPE_INT,
326 constants.ISPECS_MAX: constants.VTYPE_INT,
327 constants.ISPECS_STD: constants.VTYPE_INT,
330 #: User-friendly names for query2 field types
332 constants.QFT_UNKNOWN: "Unknown",
333 constants.QFT_TEXT: "Text",
334 constants.QFT_BOOL: "Boolean",
335 constants.QFT_NUMBER: "Number",
336 constants.QFT_UNIT: "Storage size",
337 constants.QFT_TIMESTAMP: "Timestamp",
338 constants.QFT_OTHER: "Custom",
343 def __init__(self, min=0, max=None): # pylint: disable=W0622
348 return ("<%s min=%s max=%s>" %
349 (self.__class__.__name__, self.min, self.max))
352 class ArgSuggest(_Argument):
353 """Suggesting argument.
355 Value can be any of the ones passed to the constructor.
358 # pylint: disable=W0622
359 def __init__(self, min=0, max=None, choices=None):
360 _Argument.__init__(self, min=min, max=max)
361 self.choices = choices
364 return ("<%s min=%s max=%s choices=%r>" %
365 (self.__class__.__name__, self.min, self.max, self.choices))
368 class ArgChoice(ArgSuggest):
371 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
372 but value must be one of the choices.
377 class ArgUnknown(_Argument):
378 """Unknown argument to program (e.g. determined at runtime).
383 class ArgInstance(_Argument):
384 """Instances argument.
389 class ArgNode(_Argument):
395 class ArgNetwork(_Argument):
401 class ArgGroup(_Argument):
402 """Node group argument.
407 class ArgJobId(_Argument):
413 class ArgFile(_Argument):
414 """File path argument.
419 class ArgCommand(_Argument):
425 class ArgHost(_Argument):
431 class ArgOs(_Argument):
437 class ArgExtStorage(_Argument):
438 """ExtStorage argument.
444 ARGS_MANY_INSTANCES = [ArgInstance()]
445 ARGS_MANY_NETWORKS = [ArgNetwork()]
446 ARGS_MANY_NODES = [ArgNode()]
447 ARGS_MANY_GROUPS = [ArgGroup()]
448 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
449 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
450 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
453 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456 def _ExtractTagsObject(opts, args):
457 """Extract the tag type object.
459 Note that this function will modify its args parameter.
462 if not hasattr(opts, "tag_type"):
463 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465 if kind == constants.TAG_CLUSTER:
467 elif kind in (constants.TAG_NODEGROUP,
469 constants.TAG_NETWORK,
470 constants.TAG_INSTANCE):
472 raise errors.OpPrereqError("no arguments passed to the command",
477 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
481 def _ExtendTags(opts, args):
482 """Extend the args if a source file has been given.
484 This function will extend the tags with the contents of the file
485 passed in the 'tags_source' attribute of the opts parameter. A file
486 named '-' will be replaced by stdin.
489 fname = opts.tags_source
495 new_fh = open(fname, "r")
498 # we don't use the nice 'new_data = [line.strip() for line in fh]'
499 # because of python bug 1633941
501 line = new_fh.readline()
504 new_data.append(line.strip())
507 args.extend(new_data)
510 def ListTags(opts, args):
511 """List the tags on a given object.
513 This is a generic implementation that knows how to deal with all
514 three cases of tag objects (cluster, node, instance). The opts
515 argument is expected to contain a tag_type field denoting what
516 object type we work on.
519 kind, name = _ExtractTagsObject(opts, args)
520 cl = GetClient(query=True)
521 result = cl.QueryTags(kind, name)
522 result = list(result)
528 def AddTags(opts, args):
529 """Add tags on a given object.
531 This is a generic implementation that knows how to deal with all
532 three cases of tag objects (cluster, node, instance). The opts
533 argument is expected to contain a tag_type field denoting what
534 object type we work on.
537 kind, name = _ExtractTagsObject(opts, args)
538 _ExtendTags(opts, args)
540 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
541 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
542 SubmitOrSend(op, opts)
545 def RemoveTags(opts, args):
546 """Remove tags from a given object.
548 This is a generic implementation that knows how to deal with all
549 three cases of tag objects (cluster, node, instance). The opts
550 argument is expected to contain a tag_type field denoting what
551 object type we work on.
554 kind, name = _ExtractTagsObject(opts, args)
555 _ExtendTags(opts, args)
557 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
558 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
559 SubmitOrSend(op, opts)
562 def check_unit(option, opt, value): # pylint: disable=W0613
563 """OptParsers custom converter for units.
567 return utils.ParseUnit(value)
568 except errors.UnitParseError, err:
569 raise OptionValueError("option %s: %s" % (opt, err))
572 def _SplitKeyVal(opt, data, parse_prefixes):
573 """Convert a KeyVal string into a dict.
575 This function will convert a key=val[,...] string into a dict. Empty
576 values will be converted specially: keys which have the prefix 'no_'
577 will have the value=False and the prefix stripped, keys with the prefix
578 "-" will have value=None and the prefix stripped, and the others will
582 @param opt: a string holding the option name for which we process the
583 data, used in building error messages
585 @param data: a string of the format key=val,key=val,...
586 @type parse_prefixes: bool
587 @param parse_prefixes: whether to handle prefixes specially
589 @return: {key=val, key=val}
590 @raises errors.ParameterError: if there are duplicate keys
595 for elem in utils.UnescapeAndSplit(data, sep=","):
597 key, val = elem.split("=", 1)
599 if elem.startswith(NO_PREFIX):
600 key, val = elem[len(NO_PREFIX):], False
601 elif elem.startswith(UN_PREFIX):
602 key, val = elem[len(UN_PREFIX):], None
604 key, val = elem, True
606 raise errors.ParameterError("Missing value for key '%s' in option %s" %
609 raise errors.ParameterError("Duplicate key '%s' in option %s" %
615 def _SplitIdentKeyVal(opt, value, parse_prefixes):
616 """Helper function to parse "ident:key=val,key=val" options.
619 @param opt: option name, used in error messages
621 @param value: expected to be in the format "ident:key=val,key=val,..."
622 @type parse_prefixes: bool
623 @param parse_prefixes: whether to handle prefixes specially (see
626 @return: (ident, {key=val, key=val})
627 @raises errors.ParameterError: in case of duplicates or other parsing errors
631 ident, rest = value, ""
633 ident, rest = value.split(":", 1)
635 if parse_prefixes and ident.startswith(NO_PREFIX):
637 msg = "Cannot pass options when removing parameter groups: %s" % value
638 raise errors.ParameterError(msg)
639 retval = (ident[len(NO_PREFIX):], False)
640 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
641 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643 msg = "Cannot pass options when removing parameter groups: %s" % value
644 raise errors.ParameterError(msg)
645 retval = (ident[len(UN_PREFIX):], None)
647 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
648 retval = (ident, kv_dict)
652 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
653 """Custom parser for ident:key=val,key=val options.
655 This will store the parsed values as a tuple (ident, {key: val}). As such,
656 multiple uses of this option via action=append is possible.
659 return _SplitIdentKeyVal(opt, value, True)
662 def check_key_val(option, opt, value): # pylint: disable=W0613
663 """Custom parser class for key=val,key=val options.
665 This will store the parsed values as a dict {key: val}.
668 return _SplitKeyVal(opt, value, True)
671 def _SplitListKeyVal(opt, value):
673 for elem in value.split("/"):
675 raise errors.ParameterError("Empty section in option '%s'" % opt)
676 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680 raise errors.ParameterError(msg)
681 retval[ident] = valdict
685 def check_multilist_ident_key_val(_, opt, value):
686 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688 @rtype: list of dictionary
689 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
693 for line in value.split("//"):
694 retval.append(_SplitListKeyVal(opt, line))
698 def check_bool(option, opt, value): # pylint: disable=W0613
699 """Custom parser for yes/no options.
701 This will store the parsed value as either True or False.
704 value = value.lower()
705 if value == constants.VALUE_FALSE or value == "no":
707 elif value == constants.VALUE_TRUE or value == "yes":
710 raise errors.ParameterError("Invalid boolean value '%s'" % value)
713 def check_list(option, opt, value): # pylint: disable=W0613
714 """Custom parser for comma-separated lists.
717 # we have to make this explicit check since "".split(",") is [""],
718 # not an empty list :(
722 return utils.UnescapeAndSplit(value)
725 def check_maybefloat(option, opt, value): # pylint: disable=W0613
726 """Custom parser for float numbers which might be also defaults.
729 value = value.lower()
731 if value == constants.VALUE_DEFAULT:
737 # completion_suggestion is normally a list. Using numeric values not evaluating
738 # to False for dynamic completion.
739 (OPT_COMPL_MANY_NODES,
741 OPT_COMPL_ONE_INSTANCE,
743 OPT_COMPL_ONE_EXTSTORAGE,
744 OPT_COMPL_ONE_IALLOCATOR,
745 OPT_COMPL_ONE_NETWORK,
746 OPT_COMPL_INST_ADD_NODES,
747 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749 OPT_COMPL_ALL = compat.UniqueFrozenset([
750 OPT_COMPL_MANY_NODES,
752 OPT_COMPL_ONE_INSTANCE,
754 OPT_COMPL_ONE_EXTSTORAGE,
755 OPT_COMPL_ONE_IALLOCATOR,
756 OPT_COMPL_ONE_NETWORK,
757 OPT_COMPL_INST_ADD_NODES,
758 OPT_COMPL_ONE_NODEGROUP,
762 class CliOption(Option):
763 """Custom option class for optparse.
766 ATTRS = Option.ATTRS + [
767 "completion_suggest",
769 TYPES = Option.TYPES + (
770 "multilistidentkeyval",
778 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
779 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
780 TYPE_CHECKER["identkeyval"] = check_ident_key_val
781 TYPE_CHECKER["keyval"] = check_key_val
782 TYPE_CHECKER["unit"] = check_unit
783 TYPE_CHECKER["bool"] = check_bool
784 TYPE_CHECKER["list"] = check_list
785 TYPE_CHECKER["maybefloat"] = check_maybefloat
788 # optparse.py sets make_option, so we do it for our own option class, too
789 cli_option = CliOption
794 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
795 help="Increase debugging level")
797 NOHDR_OPT = cli_option("--no-headers", default=False,
798 action="store_true", dest="no_headers",
799 help="Don't display column headers")
801 SEP_OPT = cli_option("--separator", default=None,
802 action="store", dest="separator",
803 help=("Separator between output fields"
804 " (defaults to one space)"))
806 USEUNITS_OPT = cli_option("--units", default=None,
807 dest="units", choices=("h", "m", "g", "t"),
808 help="Specify units for output (one of h/m/g/t)")
810 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
811 type="string", metavar="FIELDS",
812 help="Comma separated list of output fields")
814 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
815 default=False, help="Force the operation")
817 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
818 default=False, help="Do not require confirmation")
820 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
821 action="store_true", default=False,
822 help=("Ignore offline nodes and do as much"
825 TAG_ADD_OPT = cli_option("--tags", dest="tags",
826 default=None, help="Comma-separated list of instance"
829 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
830 default=None, help="File with tag names")
832 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
833 default=False, action="store_true",
834 help=("Submit the job and return the job ID, but"
835 " don't wait for the job to finish"))
837 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
838 default=False, action="store_true",
839 help=("Additionally print the job as first line"
840 " on stdout (for scripting)."))
842 SYNC_OPT = cli_option("--sync", dest="do_locking",
843 default=False, action="store_true",
844 help=("Grab locks while doing the queries"
845 " in order to ensure more consistent results"))
847 DRY_RUN_OPT = cli_option("--dry-run", default=False,
849 help=("Do not execute the operation, just run the"
850 " check steps and verify if it could be"
853 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
855 help="Increase the verbosity of the operation")
857 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
858 action="store_true", dest="simulate_errors",
859 help="Debugging option that makes the operation"
860 " treat most runtime checks as failed")
862 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
863 default=True, action="store_false",
864 help="Don't wait for sync (DANGEROUS!)")
866 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
867 default=False, action="store_true",
868 help="Wait for disks to sync")
870 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
871 action="store_true", default=False,
872 help="Enable offline instance")
874 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
875 action="store_true", default=False,
876 help="Disable down instance")
878 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
879 help=("Custom disk setup (%s)" %
880 utils.CommaJoin(constants.DISK_TEMPLATES)),
881 default=None, metavar="TEMPL",
882 choices=list(constants.DISK_TEMPLATES))
884 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
885 help="Do not create any network cards for"
888 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
889 help="Relative path under default cluster-wide"
890 " file storage dir to store file-based disks",
891 default=None, metavar="<DIR>")
893 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
894 help="Driver to use for image files",
895 default="loop", metavar="<DRIVER>",
896 choices=list(constants.FILE_DRIVER))
898 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
899 help="Select nodes for the instance automatically"
900 " using the <NAME> iallocator plugin",
901 default=None, type="string",
902 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
906 help="Set the default instance"
908 default=None, type="string",
909 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
911 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
913 completion_suggest=OPT_COMPL_ONE_OS)
915 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
916 type="keyval", default={},
917 help="OS parameters")
919 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
920 action="store_true", default=False,
921 help="Force an unknown variant")
923 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
924 action="store_true", default=False,
925 help="Do not install the OS (will"
928 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
929 dest="allow_runtime_chgs",
930 default=True, action="store_false",
931 help="Don't allow runtime changes")
933 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
934 type="keyval", default={},
935 help="Backend parameters")
937 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
938 default={}, dest="hvparams",
939 help="Hypervisor parameters")
941 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
942 help="Disk template parameters, in the format"
943 " template:option=value,option=value,...",
944 type="identkeyval", action="append", default=[])
946 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
947 type="keyval", default={},
948 help="Memory size specs: list of key=value,"
949 " where key is one of min, max, std"
950 " (in MB or using a unit)")
952 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
953 type="keyval", default={},
954 help="CPU count specs: list of key=value,"
955 " where key is one of min, max, std")
957 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
958 dest="ispecs_disk_count",
959 type="keyval", default={},
960 help="Disk count specs: list of key=value,"
961 " where key is one of min, max, std")
963 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
964 type="keyval", default={},
965 help="Disk size specs: list of key=value,"
966 " where key is one of min, max, std"
967 " (in MB or using a unit)")
969 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
970 type="keyval", default={},
971 help="NIC count specs: list of key=value,"
972 " where key is one of min, max, std")
974 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
975 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
976 dest="ipolicy_bounds_specs",
977 type="multilistidentkeyval", default=None,
978 help="Complete instance specs limits")
980 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
981 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
982 dest="ipolicy_std_specs",
983 type="keyval", default=None,
984 help="Complte standard instance specs")
986 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
987 dest="ipolicy_disk_templates",
988 type="list", default=None,
989 help="Comma-separated list of"
990 " enabled disk templates")
992 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
993 dest="ipolicy_vcpu_ratio",
994 type="maybefloat", default=None,
995 help="The maximum allowed vcpu-to-cpu ratio")
997 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
998 dest="ipolicy_spindle_ratio",
999 type="maybefloat", default=None,
1000 help=("The maximum allowed instances to"
1003 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1004 help="Hypervisor and hypervisor options, in the"
1005 " format hypervisor:option=value,option=value,...",
1006 default=None, type="identkeyval")
1008 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1009 help="Hypervisor and hypervisor options, in the"
1010 " format hypervisor:option=value,option=value,...",
1011 default=[], action="append", type="identkeyval")
1013 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1014 action="store_false",
1015 help="Don't check that the instance's IP"
1018 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1019 default=True, action="store_false",
1020 help="Don't check that the instance's name"
1023 NET_OPT = cli_option("--net",
1024 help="NIC parameters", default=[],
1025 dest="nics", action="append", type="identkeyval")
1027 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1028 dest="disks", action="append", type="identkeyval")
1030 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1031 help="Comma-separated list of disks"
1032 " indices to act on (e.g. 0,2) (optional,"
1033 " defaults to all disks)")
1035 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1036 help="Enforces a single-disk configuration using the"
1037 " given disk size, in MiB unless a suffix is used",
1038 default=None, type="unit", metavar="<size>")
1040 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1041 dest="ignore_consistency",
1042 action="store_true", default=False,
1043 help="Ignore the consistency of the disks on"
1046 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1047 dest="allow_failover",
1048 action="store_true", default=False,
1049 help="If migration is not possible fallback to"
1052 NONLIVE_OPT = cli_option("--non-live", dest="live",
1053 default=True, action="store_false",
1054 help="Do a non-live migration (this usually means"
1055 " freeze the instance, save the state, transfer and"
1056 " only then resume running on the secondary node)")
1058 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1060 choices=list(constants.HT_MIGRATION_MODES),
1061 help="Override default migration mode (choose"
1062 " either live or non-live")
1064 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1065 help="Target node and optional secondary node",
1066 metavar="<pnode>[:<snode>]",
1067 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1069 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1070 action="append", metavar="<node>",
1071 help="Use only this node (can be used multiple"
1072 " times, if not given defaults to all nodes)",
1073 completion_suggest=OPT_COMPL_ONE_NODE)
1075 NODEGROUP_OPT_NAME = "--node-group"
1076 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1078 help="Node group (name or uuid)",
1079 metavar="<nodegroup>",
1080 default=None, type="string",
1081 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1083 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1085 completion_suggest=OPT_COMPL_ONE_NODE)
1087 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1088 action="store_false",
1089 help="Don't start the instance after creation")
1091 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1092 action="store_true", default=False,
1093 help="Show command instead of executing it")
1095 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1096 default=False, action="store_true",
1097 help="Instead of performing the migration, try to"
1098 " recover from a failed cleanup. This is safe"
1099 " to run even if the instance is healthy, but it"
1100 " will create extra replication traffic and "
1101 " disrupt briefly the replication (like during the"
1104 STATIC_OPT = cli_option("-s", "--static", dest="static",
1105 action="store_true", default=False,
1106 help="Only show configuration data, not runtime data")
1108 ALL_OPT = cli_option("--all", dest="show_all",
1109 default=False, action="store_true",
1110 help="Show info on all instances on the cluster."
1111 " This can take a long time to run, use wisely")
1113 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1114 action="store_true", default=False,
1115 help="Interactive OS reinstall, lists available"
1116 " OS templates for selection")
1118 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1119 action="store_true", default=False,
1120 help="Remove the instance from the cluster"
1121 " configuration even if there are failures"
1122 " during the removal process")
1124 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1125 dest="ignore_remove_failures",
1126 action="store_true", default=False,
1127 help="Remove the instance from the"
1128 " cluster configuration even if there"
1129 " are failures during the removal"
1132 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1133 action="store_true", default=False,
1134 help="Remove the instance from the cluster")
1136 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1137 help="Specifies the new node for the instance",
1138 metavar="NODE", default=None,
1139 completion_suggest=OPT_COMPL_ONE_NODE)
1141 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1142 help="Specifies the new secondary node",
1143 metavar="NODE", default=None,
1144 completion_suggest=OPT_COMPL_ONE_NODE)
1146 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1147 help="Specifies the new primary node",
1148 metavar="<node>", default=None,
1149 completion_suggest=OPT_COMPL_ONE_NODE)
1151 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1152 default=False, action="store_true",
1153 help="Replace the disk(s) on the primary"
1154 " node (applies only to internally mirrored"
1155 " disk templates, e.g. %s)" %
1156 utils.CommaJoin(constants.DTS_INT_MIRROR))
1158 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1159 default=False, action="store_true",
1160 help="Replace the disk(s) on the secondary"
1161 " node (applies only to internally mirrored"
1162 " disk templates, e.g. %s)" %
1163 utils.CommaJoin(constants.DTS_INT_MIRROR))
1165 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1166 default=False, action="store_true",
1167 help="Lock all nodes and auto-promote as needed"
1170 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1171 default=False, action="store_true",
1172 help="Automatically replace faulty disks"
1173 " (applies only to internally mirrored"
1174 " disk templates, e.g. %s)" %
1175 utils.CommaJoin(constants.DTS_INT_MIRROR))
1177 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1178 default=False, action="store_true",
1179 help="Ignore current recorded size"
1180 " (useful for forcing activation when"
1181 " the recorded size is wrong)")
1183 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1185 completion_suggest=OPT_COMPL_ONE_NODE)
1187 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1190 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1191 help="Specify the secondary ip for the node",
1192 metavar="ADDRESS", default=None)
1194 READD_OPT = cli_option("--readd", dest="readd",
1195 default=False, action="store_true",
1196 help="Readd old node after replacing it")
1198 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1199 default=True, action="store_false",
1200 help="Disable SSH key fingerprint checking")
1202 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1203 default=False, action="store_true",
1204 help="Force the joining of a node")
1206 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1207 type="bool", default=None, metavar=_YORNO,
1208 help="Set the master_candidate flag on the node")
1210 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1211 type="bool", default=None,
1212 help=("Set the offline flag on the node"
1213 " (cluster does not communicate with offline"
1216 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1217 type="bool", default=None,
1218 help=("Set the drained flag on the node"
1219 " (excluded from allocation operations)"))
1221 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1222 type="bool", default=None, metavar=_YORNO,
1223 help="Set the master_capable flag on the node")
1225 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1226 type="bool", default=None, metavar=_YORNO,
1227 help="Set the vm_capable flag on the node")
1229 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1230 type="bool", default=None, metavar=_YORNO,
1231 help="Set the allocatable flag on a volume")
1233 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1234 help="Disable support for lvm based instances"
1236 action="store_false", default=True)
1238 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1239 dest="enabled_hypervisors",
1240 help="Comma-separated list of hypervisors",
1241 type="string", default=None)
1243 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1244 dest="enabled_disk_templates",
1245 help="Comma-separated list of "
1247 type="string", default=None)
1249 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1250 type="keyval", default={},
1251 help="NIC parameters")
1253 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1254 dest="candidate_pool_size", type="int",
1255 help="Set the candidate pool size")
1257 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1258 help=("Enables LVM and specifies the volume group"
1259 " name (cluster-wide) for disk allocation"
1260 " [%s]" % constants.DEFAULT_VG),
1261 metavar="VG", default=None)
1263 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1264 help="Destroy cluster", action="store_true")
1266 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1267 help="Skip node agreement check (dangerous)",
1268 action="store_true", default=False)
1270 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1271 help="Specify the mac prefix for the instance IP"
1272 " addresses, in the format XX:XX:XX",
1276 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1277 help="Specify the node interface (cluster-wide)"
1278 " on which the master IP address will be added"
1279 " (cluster init default: %s)" %
1280 constants.DEFAULT_BRIDGE,
1284 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1285 help="Specify the netmask of the master IP",
1289 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1290 dest="use_external_mip_script",
1291 help="Specify whether to run a"
1292 " user-provided script for the master"
1293 " IP address turnup and"
1294 " turndown operations",
1295 type="bool", metavar=_YORNO, default=None)
1297 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1298 help="Specify the default directory (cluster-"
1299 "wide) for storing the file-based disks [%s]" %
1300 pathutils.DEFAULT_FILE_STORAGE_DIR,
1302 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1304 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1305 "--shared-file-storage-dir",
1306 dest="shared_file_storage_dir",
1307 help="Specify the default directory (cluster-wide) for storing the"
1308 " shared file-based disks [%s]" %
1309 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1310 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1312 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1313 help="Don't modify %s" % pathutils.ETC_HOSTS,
1314 action="store_false", default=True)
1316 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1317 help="Don't initialize SSH keys",
1318 action="store_false", default=True)
1320 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1321 help="Enable parseable error messages",
1322 action="store_true", default=False)
1324 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1325 help="Skip N+1 memory redundancy tests",
1326 action="store_true", default=False)
1328 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1329 help="Type of reboot: soft/hard/full",
1330 default=constants.INSTANCE_REBOOT_HARD,
1332 choices=list(constants.REBOOT_TYPES))
1334 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1335 dest="ignore_secondaries",
1336 default=False, action="store_true",
1337 help="Ignore errors from secondaries")
1339 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1340 action="store_false", default=True,
1341 help="Don't shutdown the instance (unsafe)")
1343 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1344 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1345 help="Maximum time to wait")
1347 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1348 dest="shutdown_timeout", type="int",
1349 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350 help="Maximum time to wait for instance"
1353 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1355 help=("Number of seconds between repetions of the"
1358 EARLY_RELEASE_OPT = cli_option("--early-release",
1359 dest="early_release", default=False,
1360 action="store_true",
1361 help="Release the locks on the secondary"
1364 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1365 dest="new_cluster_cert",
1366 default=False, action="store_true",
1367 help="Generate a new cluster certificate")
1369 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1371 help="File containing new RAPI certificate")
1373 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1374 default=None, action="store_true",
1375 help=("Generate a new self-signed RAPI"
1378 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1380 help="File containing new SPICE certificate")
1382 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1384 help="File containing the certificate of the CA"
1385 " which signed the SPICE certificate")
1387 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1388 dest="new_spice_cert", default=None,
1389 action="store_true",
1390 help=("Generate a new self-signed SPICE"
1393 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1394 dest="new_confd_hmac_key",
1395 default=False, action="store_true",
1396 help=("Create a new HMAC key for %s" %
1399 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1400 dest="cluster_domain_secret",
1402 help=("Load new new cluster domain"
1403 " secret from file"))
1405 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1406 dest="new_cluster_domain_secret",
1407 default=False, action="store_true",
1408 help=("Create a new cluster domain"
1411 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1412 dest="use_replication_network",
1413 help="Whether to use the replication network"
1414 " for talking to the nodes",
1415 action="store_true", default=False)
1417 MAINTAIN_NODE_HEALTH_OPT = \
1418 cli_option("--maintain-node-health", dest="maintain_node_health",
1419 metavar=_YORNO, default=None, type="bool",
1420 help="Configure the cluster to automatically maintain node"
1421 " health, by shutting down unknown instances, shutting down"
1422 " unknown DRBD devices, etc.")
1424 IDENTIFY_DEFAULTS_OPT = \
1425 cli_option("--identify-defaults", dest="identify_defaults",
1426 default=False, action="store_true",
1427 help="Identify which saved instance parameters are equal to"
1428 " the current cluster defaults and set them as such, instead"
1429 " of marking them as overridden")
1431 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1432 action="store", dest="uid_pool",
1433 help=("A list of user-ids or user-id"
1434 " ranges separated by commas"))
1436 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1437 action="store", dest="add_uids",
1438 help=("A list of user-ids or user-id"
1439 " ranges separated by commas, to be"
1440 " added to the user-id pool"))
1442 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1443 action="store", dest="remove_uids",
1444 help=("A list of user-ids or user-id"
1445 " ranges separated by commas, to be"
1446 " removed from the user-id pool"))
1448 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1449 action="store", dest="reserved_lvs",
1450 help=("A comma-separated list of reserved"
1451 " logical volumes names, that will be"
1452 " ignored by cluster verify"))
1454 ROMAN_OPT = cli_option("--roman",
1455 dest="roman_integers", default=False,
1456 action="store_true",
1457 help="Use roman numbers for positive integers")
1459 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1460 action="store", default=None,
1461 help="Specifies usermode helper for DRBD")
1463 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1464 action="store_false", default=True,
1465 help="Disable support for DRBD")
1467 PRIMARY_IP_VERSION_OPT = \
1468 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1469 action="store", dest="primary_ip_version",
1470 metavar="%d|%d" % (constants.IP4_VERSION,
1471 constants.IP6_VERSION),
1472 help="Cluster-wide IP version for primary IP")
1474 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1475 action="store_true",
1476 help="Show machine name for every line in output")
1478 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1479 action="store_true",
1480 help=("Hide successful results and show failures"
1481 " only (determined by the exit code)"))
1483 REASON_OPT = cli_option("--reason", default=None,
1484 help="The reason for executing the command")
1487 def _PriorityOptionCb(option, _, value, parser):
1488 """Callback for processing C{--priority} option.
1491 value = _PRIONAME_TO_VALUE[value]
1493 setattr(parser.values, option.dest, value)
1496 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1497 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1498 choices=_PRIONAME_TO_VALUE.keys(),
1499 action="callback", type="choice",
1500 callback=_PriorityOptionCb,
1501 help="Priority for opcode processing")
1503 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1504 type="bool", default=None, metavar=_YORNO,
1505 help="Sets the hidden flag on the OS")
1507 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1508 type="bool", default=None, metavar=_YORNO,
1509 help="Sets the blacklisted flag on the OS")
1511 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1512 type="bool", metavar=_YORNO,
1513 dest="prealloc_wipe_disks",
1514 help=("Wipe disks prior to instance"
1517 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1518 type="keyval", default=None,
1519 help="Node parameters")
1521 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1522 action="store", metavar="POLICY", default=None,
1523 help="Allocation policy for the node group")
1525 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1526 type="bool", metavar=_YORNO,
1527 dest="node_powered",
1528 help="Specify if the SoR for node is powered")
1530 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1531 default=constants.OOB_TIMEOUT,
1532 help="Maximum time to wait for out-of-band helper")
1534 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1535 default=constants.OOB_POWER_DELAY,
1536 help="Time in seconds to wait between power-ons")
1538 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1539 action="store_true", default=False,
1540 help=("Whether command argument should be treated"
1543 NO_REMEMBER_OPT = cli_option("--no-remember",
1545 action="store_true", default=False,
1546 help="Perform but do not record the change"
1547 " in the configuration")
1549 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1550 default=False, action="store_true",
1551 help="Evacuate primary instances only")
1553 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1554 default=False, action="store_true",
1555 help="Evacuate secondary instances only"
1556 " (applies only to internally mirrored"
1557 " disk templates, e.g. %s)" %
1558 utils.CommaJoin(constants.DTS_INT_MIRROR))
1560 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1561 action="store_true", default=False,
1562 help="Pause instance at startup")
1564 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1565 help="Destination node group (name or uuid)",
1566 default=None, action="append",
1567 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1569 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1570 action="append", dest="ignore_errors",
1571 choices=list(constants.CV_ALL_ECODES_STRINGS),
1572 help="Error code to be ignored")
1574 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1576 help=("Specify disk state information in the"
1578 " storage_type/identifier:option=value,...;"
1579 " note this is unused for now"),
1582 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1584 help=("Specify hypervisor state information in the"
1585 " format hypervisor:option=value,...;"
1586 " note this is unused for now"),
1589 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1590 action="store_true", default=False,
1591 help="Ignore instance policy violations")
1593 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1594 help="Sets the instance's runtime memory,"
1595 " ballooning it up or down to the new value",
1596 default=None, type="unit", metavar="<size>")
1598 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1599 action="store_true", default=False,
1600 help="Marks the grow as absolute instead of the"
1601 " (default) relative mode")
1603 NETWORK_OPT = cli_option("--network",
1604 action="store", default=None, dest="network",
1605 help="IP network in CIDR notation")
1607 GATEWAY_OPT = cli_option("--gateway",
1608 action="store", default=None, dest="gateway",
1609 help="IP address of the router (gateway)")
1611 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1612 action="store", default=None,
1613 dest="add_reserved_ips",
1614 help="Comma-separated list of"
1615 " reserved IPs to add")
1617 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1618 action="store", default=None,
1619 dest="remove_reserved_ips",
1620 help="Comma-delimited list of"
1621 " reserved IPs to remove")
1623 NETWORK6_OPT = cli_option("--network6",
1624 action="store", default=None, dest="network6",
1625 help="IP network in CIDR notation")
1627 GATEWAY6_OPT = cli_option("--gateway6",
1628 action="store", default=None, dest="gateway6",
1629 help="IP6 address of the router (gateway)")
1631 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1632 dest="conflicts_check",
1634 action="store_false",
1635 help="Don't check for conflicting IPs")
1637 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1638 default=False, action="store_true",
1639 help="Include default values")
1641 #: Options provided by all commands
1642 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1644 # options related to asynchronous job handling
1651 # common options for creating instances. add and import then add their own
1653 COMMON_CREATE_OPTS = [
1658 FILESTORE_DRIVER_OPT,
1664 NOCONFLICTSCHECK_OPT,
1677 # common instance policy options
1678 INSTANCE_POLICY_OPTS = [
1679 IPOLICY_BOUNDS_SPECS_OPT,
1680 IPOLICY_DISK_TEMPLATES,
1682 IPOLICY_SPINDLE_RATIO,
1685 # instance policy split specs options
1686 SPLIT_ISPECS_OPTS = [
1687 SPECS_CPU_COUNT_OPT,
1688 SPECS_DISK_COUNT_OPT,
1689 SPECS_DISK_SIZE_OPT,
1691 SPECS_NIC_COUNT_OPT,
1695 class _ShowUsage(Exception):
1696 """Exception class for L{_ParseArgs}.
1699 def __init__(self, exit_error):
1700 """Initializes instances of this class.
1702 @type exit_error: bool
1703 @param exit_error: Whether to report failure on exit
1706 Exception.__init__(self)
1707 self.exit_error = exit_error
1710 class _ShowVersion(Exception):
1711 """Exception class for L{_ParseArgs}.
1716 def _ParseArgs(binary, argv, commands, aliases, env_override):
1717 """Parser for the command line arguments.
1719 This function parses the arguments and returns the function which
1720 must be executed together with its (modified) arguments.
1722 @param binary: Script name
1723 @param argv: Command line arguments
1724 @param commands: Dictionary containing command definitions
1725 @param aliases: dictionary with command aliases {"alias": "target", ...}
1726 @param env_override: list of env variables allowed for default args
1727 @raise _ShowUsage: If usage description should be shown
1728 @raise _ShowVersion: If version should be shown
1731 assert not (env_override - set(commands))
1732 assert not (set(aliases.keys()) & set(commands.keys()))
1737 # No option or command given
1738 raise _ShowUsage(exit_error=True)
1740 if cmd == "--version":
1741 raise _ShowVersion()
1742 elif cmd == "--help":
1743 raise _ShowUsage(exit_error=False)
1744 elif not (cmd in commands or cmd in aliases):
1745 raise _ShowUsage(exit_error=True)
1747 # get command, unalias it, and look it up in commands
1749 if aliases[cmd] not in commands:
1750 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1751 " command '%s'" % (cmd, aliases[cmd]))
1755 if cmd in env_override:
1756 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1757 env_args = os.environ.get(args_env_name)
1759 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1761 func, args_def, parser_opts, usage, description = commands[cmd]
1762 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1763 description=description,
1764 formatter=TitledHelpFormatter(),
1765 usage="%%prog %s %s" % (cmd, usage))
1766 parser.disable_interspersed_args()
1767 options, args = parser.parse_args(args=argv[2:])
1769 if not _CheckArguments(cmd, args_def, args):
1770 return None, None, None
1772 return func, options, args
1775 def _FormatUsage(binary, commands):
1776 """Generates a nice description of all commands.
1778 @param binary: Script name
1779 @param commands: Dictionary containing command definitions
1782 # compute the max line length for cmd + usage
1783 mlen = min(60, max(map(len, commands)))
1785 yield "Usage: %s {command} [options...] [argument...]" % binary
1786 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1790 # and format a nice command list
1791 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1792 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1793 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1794 for line in help_lines:
1795 yield " %-*s %s" % (mlen, "", line)
1800 def _CheckArguments(cmd, args_def, args):
1801 """Verifies the arguments using the argument definition.
1805 1. Abort with error if values specified by user but none expected.
1807 1. For each argument in definition
1809 1. Keep running count of minimum number of values (min_count)
1810 1. Keep running count of maximum number of values (max_count)
1811 1. If it has an unlimited number of values
1813 1. Abort with error if it's not the last argument in the definition
1815 1. If last argument has limited number of values
1817 1. Abort with error if number of values doesn't match or is too large
1819 1. Abort with error if user didn't pass enough values (min_count)
1822 if args and not args_def:
1823 ToStderr("Error: Command %s expects no arguments", cmd)
1830 last_idx = len(args_def) - 1
1832 for idx, arg in enumerate(args_def):
1833 if min_count is None:
1835 elif arg.min is not None:
1836 min_count += arg.min
1838 if max_count is None:
1840 elif arg.max is not None:
1841 max_count += arg.max
1844 check_max = (arg.max is not None)
1846 elif arg.max is None:
1847 raise errors.ProgrammerError("Only the last argument can have max=None")
1850 # Command with exact number of arguments
1851 if (min_count is not None and max_count is not None and
1852 min_count == max_count and len(args) != min_count):
1853 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1856 # Command with limited number of arguments
1857 if max_count is not None and len(args) > max_count:
1858 ToStderr("Error: Command %s expects only %d argument(s)",
1862 # Command with some required arguments
1863 if min_count is not None and len(args) < min_count:
1864 ToStderr("Error: Command %s expects at least %d argument(s)",
1871 def SplitNodeOption(value):
1872 """Splits the value of a --node option.
1875 if value and ":" in value:
1876 return value.split(":", 1)
1878 return (value, None)
1881 def CalculateOSNames(os_name, os_variants):
1882 """Calculates all the names an OS can be called, according to its variants.
1884 @type os_name: string
1885 @param os_name: base name of the os
1886 @type os_variants: list or None
1887 @param os_variants: list of supported variants
1889 @return: list of valid names
1893 return ["%s+%s" % (os_name, v) for v in os_variants]
1898 def ParseFields(selected, default):
1899 """Parses the values of "--field"-like options.
1901 @type selected: string or None
1902 @param selected: User-selected options
1904 @param default: Default fields
1907 if selected is None:
1910 if selected.startswith("+"):
1911 return default + selected[1:].split(",")
1913 return selected.split(",")
1916 UsesRPC = rpc.RunWithRPC
1919 def AskUser(text, choices=None):
1920 """Ask the user a question.
1922 @param text: the question to ask
1924 @param choices: list with elements tuples (input_char, return_value,
1925 description); if not given, it will default to: [('y', True,
1926 'Perform the operation'), ('n', False, 'Do no do the operation')];
1927 note that the '?' char is reserved for help
1929 @return: one of the return values from the choices list; if input is
1930 not possible (i.e. not running with a tty, we return the last
1935 choices = [("y", True, "Perform the operation"),
1936 ("n", False, "Do not perform the operation")]
1937 if not choices or not isinstance(choices, list):
1938 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1939 for entry in choices:
1940 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1941 raise errors.ProgrammerError("Invalid choices element to AskUser")
1943 answer = choices[-1][1]
1945 for line in text.splitlines():
1946 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1947 text = "\n".join(new_text)
1949 f = file("/dev/tty", "a+")
1953 chars = [entry[0] for entry in choices]
1954 chars[-1] = "[%s]" % chars[-1]
1956 maps = dict([(entry[0], entry[1]) for entry in choices])
1960 f.write("/".join(chars))
1962 line = f.readline(2).strip().lower()
1967 for entry in choices:
1968 f.write(" %s - %s\n" % (entry[0], entry[2]))
1976 class JobSubmittedException(Exception):
1977 """Job was submitted, client should exit.
1979 This exception has one argument, the ID of the job that was
1980 submitted. The handler should print this ID.
1982 This is not an error, just a structured way to exit from clients.
1987 def SendJob(ops, cl=None):
1988 """Function to submit an opcode without waiting for the results.
1991 @param ops: list of opcodes
1992 @type cl: luxi.Client
1993 @param cl: the luxi client to use for communicating with the master;
1994 if None, a new client will be created
2000 job_id = cl.SubmitJob(ops)
2005 def GenericPollJob(job_id, cbs, report_cbs):
2006 """Generic job-polling function.
2008 @type job_id: number
2009 @param job_id: Job ID
2010 @type cbs: Instance of L{JobPollCbBase}
2011 @param cbs: Data callbacks
2012 @type report_cbs: Instance of L{JobPollReportCbBase}
2013 @param report_cbs: Reporting callbacks
2016 prev_job_info = None
2017 prev_logmsg_serial = None
2022 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2025 # job not found, go away!
2026 raise errors.JobLost("Job with id %s lost" % job_id)
2028 if result == constants.JOB_NOTCHANGED:
2029 report_cbs.ReportNotChanged(job_id, status)
2034 # Split result, a tuple of (field values, log entries)
2035 (job_info, log_entries) = result
2036 (status, ) = job_info
2039 for log_entry in log_entries:
2040 (serial, timestamp, log_type, message) = log_entry
2041 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2043 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2045 # TODO: Handle canceled and archived jobs
2046 elif status in (constants.JOB_STATUS_SUCCESS,
2047 constants.JOB_STATUS_ERROR,
2048 constants.JOB_STATUS_CANCELING,
2049 constants.JOB_STATUS_CANCELED):
2052 prev_job_info = job_info
2054 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2056 raise errors.JobLost("Job with id %s lost" % job_id)
2058 status, opstatus, result = jobs[0]
2060 if status == constants.JOB_STATUS_SUCCESS:
2063 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2064 raise errors.OpExecError("Job was canceled")
2067 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2068 if status == constants.OP_STATUS_SUCCESS:
2070 elif status == constants.OP_STATUS_ERROR:
2071 errors.MaybeRaise(msg)
2074 raise errors.OpExecError("partial failure (opcode %d): %s" %
2077 raise errors.OpExecError(str(msg))
2079 # default failure mode
2080 raise errors.OpExecError(result)
2083 class JobPollCbBase:
2084 """Base class for L{GenericPollJob} callbacks.
2088 """Initializes this class.
2092 def WaitForJobChangeOnce(self, job_id, fields,
2093 prev_job_info, prev_log_serial):
2094 """Waits for changes on a job.
2097 raise NotImplementedError()
2099 def QueryJobs(self, job_ids, fields):
2100 """Returns the selected fields for the selected job IDs.
2102 @type job_ids: list of numbers
2103 @param job_ids: Job IDs
2104 @type fields: list of strings
2105 @param fields: Fields
2108 raise NotImplementedError()
2111 class JobPollReportCbBase:
2112 """Base class for L{GenericPollJob} reporting callbacks.
2116 """Initializes this class.
2120 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2121 """Handles a log message.
2124 raise NotImplementedError()
2126 def ReportNotChanged(self, job_id, status):
2127 """Called for if a job hasn't changed in a while.
2129 @type job_id: number
2130 @param job_id: Job ID
2131 @type status: string or None
2132 @param status: Job status if available
2135 raise NotImplementedError()
2138 class _LuxiJobPollCb(JobPollCbBase):
2139 def __init__(self, cl):
2140 """Initializes this class.
2143 JobPollCbBase.__init__(self)
2146 def WaitForJobChangeOnce(self, job_id, fields,
2147 prev_job_info, prev_log_serial):
2148 """Waits for changes on a job.
2151 return self.cl.WaitForJobChangeOnce(job_id, fields,
2152 prev_job_info, prev_log_serial)
2154 def QueryJobs(self, job_ids, fields):
2155 """Returns the selected fields for the selected job IDs.
2158 return self.cl.QueryJobs(job_ids, fields)
2161 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2162 def __init__(self, feedback_fn):
2163 """Initializes this class.
2166 JobPollReportCbBase.__init__(self)
2168 self.feedback_fn = feedback_fn
2170 assert callable(feedback_fn)
2172 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2173 """Handles a log message.
2176 self.feedback_fn((timestamp, log_type, log_msg))
2178 def ReportNotChanged(self, job_id, status):
2179 """Called if a job hasn't changed in a while.
2185 class StdioJobPollReportCb(JobPollReportCbBase):
2187 """Initializes this class.
2190 JobPollReportCbBase.__init__(self)
2192 self.notified_queued = False
2193 self.notified_waitlock = False
2195 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2196 """Handles a log message.
2199 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2200 FormatLogMessage(log_type, log_msg))
2202 def ReportNotChanged(self, job_id, status):
2203 """Called if a job hasn't changed in a while.
2209 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2210 ToStderr("Job %s is waiting in queue", job_id)
2211 self.notified_queued = True
2213 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2214 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2215 self.notified_waitlock = True
2218 def FormatLogMessage(log_type, log_msg):
2219 """Formats a job message according to its type.
2222 if log_type != constants.ELOG_MESSAGE:
2223 log_msg = str(log_msg)
2225 return utils.SafeEncode(log_msg)
2228 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2229 """Function to poll for the result of a job.
2231 @type job_id: job identified
2232 @param job_id: the job to poll for results
2233 @type cl: luxi.Client
2234 @param cl: the luxi client to use for communicating with the master;
2235 if None, a new client will be created
2241 if reporter is None:
2243 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2245 reporter = StdioJobPollReportCb()
2247 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2249 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2252 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2253 """Legacy function to submit an opcode.
2255 This is just a simple wrapper over the construction of the processor
2256 instance. It should be extended to better handle feedback and
2257 interaction functions.
2263 SetGenericOpcodeOpts([op], opts)
2265 job_id = SendJob([op], cl=cl)
2266 if hasattr(opts, "print_jobid") and opts.print_jobid:
2267 ToStdout("%d" % job_id)
2269 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2272 return op_results[0]
2275 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2276 """Wrapper around SubmitOpCode or SendJob.
2278 This function will decide, based on the 'opts' parameter, whether to
2279 submit and wait for the result of the opcode (and return it), or
2280 whether to just send the job and print its identifier. It is used in
2281 order to simplify the implementation of the '--submit' option.
2283 It will also process the opcodes if we're sending the via SendJob
2284 (otherwise SubmitOpCode does it).
2287 if opts and opts.submit_only:
2289 SetGenericOpcodeOpts(job, opts)
2290 job_id = SendJob(job, cl=cl)
2291 if opts.print_jobid:
2292 ToStdout("%d" % job_id)
2293 raise JobSubmittedException(job_id)
2295 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2298 def _InitReasonTrail(op, opts):
2299 """Builds the first part of the reason trail
2301 Builds the initial part of the reason trail, adding the user provided reason
2302 (if it exists) and the name of the command starting the operation.
2304 @param op: the opcode the reason trail will be added to
2305 @param opts: the command line options selected by the user
2308 assert len(sys.argv) >= 2
2312 trail.append((constants.OPCODE_REASON_SRC_USER,
2316 binary = os.path.basename(sys.argv[0])
2317 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2318 command = sys.argv[1]
2319 trail.append((source, command, utils.EpochNano()))
2323 def SetGenericOpcodeOpts(opcode_list, options):
2324 """Processor for generic options.
2326 This function updates the given opcodes based on generic command
2327 line options (like debug, dry-run, etc.).
2329 @param opcode_list: list of opcodes
2330 @param options: command line options or None
2331 @return: None (in-place modification)
2336 for op in opcode_list:
2337 op.debug_level = options.debug
2338 if hasattr(options, "dry_run"):
2339 op.dry_run = options.dry_run
2340 if getattr(options, "priority", None) is not None:
2341 op.priority = options.priority
2342 _InitReasonTrail(op, options)
2345 def GetClient(query=False):
2346 """Connects to the a luxi socket and returns a client.
2348 @type query: boolean
2349 @param query: this signifies that the client will only be
2350 used for queries; if the build-time parameter
2351 enable-split-queries is enabled, then the client will be
2352 connected to the query socket instead of the masterd socket
2355 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2357 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2358 address = pathutils.MASTER_SOCKET
2359 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2360 address = pathutils.QUERY_SOCKET
2362 address = override_socket
2363 elif query and constants.ENABLE_SPLIT_QUERY:
2364 address = pathutils.QUERY_SOCKET
2367 # TODO: Cache object?
2369 client = luxi.Client(address=address)
2370 except luxi.NoMasterError:
2371 ss = ssconf.SimpleStore()
2373 # Try to read ssconf file
2376 except errors.ConfigurationError:
2377 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2378 " not part of a cluster",
2381 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2382 if master != myself:
2383 raise errors.OpPrereqError("This is not the master node, please connect"
2384 " to node '%s' and rerun the command" %
2385 master, errors.ECODE_INVAL)
2390 def FormatError(err):
2391 """Return a formatted error message for a given error.
2393 This function takes an exception instance and returns a tuple
2394 consisting of two values: first, the recommended exit code, and
2395 second, a string describing the error message (not
2396 newline-terminated).
2402 if isinstance(err, errors.ConfigurationError):
2403 txt = "Corrupt configuration file: %s" % msg
2405 obuf.write(txt + "\n")
2406 obuf.write("Aborting.")
2408 elif isinstance(err, errors.HooksAbort):
2409 obuf.write("Failure: hooks execution failed:\n")
2410 for node, script, out in err.args[0]:
2412 obuf.write(" node: %s, script: %s, output: %s\n" %
2413 (node, script, out))
2415 obuf.write(" node: %s, script: %s (no output)\n" %
2417 elif isinstance(err, errors.HooksFailure):
2418 obuf.write("Failure: hooks general failure: %s" % msg)
2419 elif isinstance(err, errors.ResolverError):
2420 this_host = netutils.Hostname.GetSysName()
2421 if err.args[0] == this_host:
2422 msg = "Failure: can't resolve my own hostname ('%s')"
2424 msg = "Failure: can't resolve hostname '%s'"
2425 obuf.write(msg % err.args[0])
2426 elif isinstance(err, errors.OpPrereqError):
2427 if len(err.args) == 2:
2428 obuf.write("Failure: prerequisites not met for this"
2429 " operation:\nerror type: %s, error details:\n%s" %
2430 (err.args[1], err.args[0]))
2432 obuf.write("Failure: prerequisites not met for this"
2433 " operation:\n%s" % msg)
2434 elif isinstance(err, errors.OpExecError):
2435 obuf.write("Failure: command execution error:\n%s" % msg)
2436 elif isinstance(err, errors.TagError):
2437 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2438 elif isinstance(err, errors.JobQueueDrainError):
2439 obuf.write("Failure: the job queue is marked for drain and doesn't"
2440 " accept new requests\n")
2441 elif isinstance(err, errors.JobQueueFull):
2442 obuf.write("Failure: the job queue is full and doesn't accept new"
2443 " job submissions until old jobs are archived\n")
2444 elif isinstance(err, errors.TypeEnforcementError):
2445 obuf.write("Parameter Error: %s" % msg)
2446 elif isinstance(err, errors.ParameterError):
2447 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2448 elif isinstance(err, luxi.NoMasterError):
2449 if err.args[0] == pathutils.MASTER_SOCKET:
2450 daemon = "the master daemon"
2451 elif err.args[0] == pathutils.QUERY_SOCKET:
2452 daemon = "the config daemon"
2454 daemon = "socket '%s'" % str(err.args[0])
2455 obuf.write("Cannot communicate with %s.\nIs the process running"
2456 " and listening for connections?" % daemon)
2457 elif isinstance(err, luxi.TimeoutError):
2458 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2459 " been submitted and will continue to run even if the call"
2460 " timed out. Useful commands in this situation are \"gnt-job"
2461 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2463 elif isinstance(err, luxi.PermissionError):
2464 obuf.write("It seems you don't have permissions to connect to the"
2465 " master daemon.\nPlease retry as a different user.")
2466 elif isinstance(err, luxi.ProtocolError):
2467 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2469 elif isinstance(err, errors.JobLost):
2470 obuf.write("Error checking job status: %s" % msg)
2471 elif isinstance(err, errors.QueryFilterParseError):
2472 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2473 obuf.write("\n".join(err.GetDetails()))
2474 elif isinstance(err, errors.GenericError):
2475 obuf.write("Unhandled Ganeti error: %s" % msg)
2476 elif isinstance(err, JobSubmittedException):
2477 obuf.write("JobID: %s\n" % err.args[0])
2480 obuf.write("Unhandled exception: %s" % msg)
2481 return retcode, obuf.getvalue().rstrip("\n")
2484 def GenericMain(commands, override=None, aliases=None,
2485 env_override=frozenset()):
2486 """Generic main function for all the gnt-* commands.
2488 @param commands: a dictionary with a special structure, see the design doc
2489 for command line handling.
2490 @param override: if not None, we expect a dictionary with keys that will
2491 override command line options; this can be used to pass
2492 options from the scripts to generic functions
2493 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2494 @param env_override: list of environment names which are allowed to submit
2495 default args for commands
2498 # save the program name and the entire command line for later logging
2500 binary = os.path.basename(sys.argv[0])
2502 binary = sys.argv[0]
2504 if len(sys.argv) >= 2:
2505 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2509 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2511 binary = "<unknown program>"
2512 cmdline = "<unknown>"
2518 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2520 except _ShowVersion:
2521 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2522 constants.RELEASE_VERSION)
2523 return constants.EXIT_SUCCESS
2524 except _ShowUsage, err:
2525 for line in _FormatUsage(binary, commands):
2529 return constants.EXIT_FAILURE
2531 return constants.EXIT_SUCCESS
2532 except errors.ParameterError, err:
2533 result, err_msg = FormatError(err)
2537 if func is None: # parse error
2540 if override is not None:
2541 for key, val in override.iteritems():
2542 setattr(options, key, val)
2544 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2545 stderr_logging=True)
2547 logging.info("Command line: %s", cmdline)
2550 result = func(options, args)
2551 except (errors.GenericError, luxi.ProtocolError,
2552 JobSubmittedException), err:
2553 result, err_msg = FormatError(err)
2554 logging.exception("Error during command processing")
2556 except KeyboardInterrupt:
2557 result = constants.EXIT_FAILURE
2558 ToStderr("Aborted. Note that if the operation created any jobs, they"
2559 " might have been submitted and"
2560 " will continue to run in the background.")
2561 except IOError, err:
2562 if err.errno == errno.EPIPE:
2563 # our terminal went away, we'll exit
2564 sys.exit(constants.EXIT_FAILURE)
2571 def ParseNicOption(optvalue):
2572 """Parses the value of the --net option(s).
2576 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2577 except (TypeError, ValueError), err:
2578 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2581 nics = [{}] * nic_max
2582 for nidx, ndict in optvalue:
2585 if not isinstance(ndict, dict):
2586 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2587 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2589 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2596 def GenericInstanceCreate(mode, opts, args):
2597 """Add an instance to the cluster via either creation or import.
2599 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2600 @param opts: the command line options selected by the user
2602 @param args: should contain only one element, the new instance name
2604 @return: the desired exit code
2609 (pnode, snode) = SplitNodeOption(opts.node)
2614 hypervisor, hvparams = opts.hypervisor
2617 nics = ParseNicOption(opts.nics)
2621 elif mode == constants.INSTANCE_CREATE:
2622 # default of one nic, all auto
2628 if opts.disk_template == constants.DT_DISKLESS:
2629 if opts.disks or opts.sd_size is not None:
2630 raise errors.OpPrereqError("Diskless instance but disk"
2631 " information passed", errors.ECODE_INVAL)
2634 if (not opts.disks and not opts.sd_size
2635 and mode == constants.INSTANCE_CREATE):
2636 raise errors.OpPrereqError("No disk information specified",
2638 if opts.disks and opts.sd_size is not None:
2639 raise errors.OpPrereqError("Please use either the '--disk' or"
2640 " '-s' option", errors.ECODE_INVAL)
2641 if opts.sd_size is not None:
2642 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2646 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2647 except ValueError, err:
2648 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2650 disks = [{}] * disk_max
2653 for didx, ddict in opts.disks:
2655 if not isinstance(ddict, dict):
2656 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2657 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2658 elif constants.IDISK_SIZE in ddict:
2659 if constants.IDISK_ADOPT in ddict:
2660 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2661 " (disk %d)" % didx, errors.ECODE_INVAL)
2663 ddict[constants.IDISK_SIZE] = \
2664 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2665 except ValueError, err:
2666 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2667 (didx, err), errors.ECODE_INVAL)
2668 elif constants.IDISK_ADOPT in ddict:
2669 if constants.IDISK_SPINDLES in ddict:
2670 raise errors.OpPrereqError("spindles is not a valid option when"
2671 " adopting a disk", errors.ECODE_INVAL)
2672 if mode == constants.INSTANCE_IMPORT:
2673 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2674 " import", errors.ECODE_INVAL)
2675 ddict[constants.IDISK_SIZE] = 0
2677 raise errors.OpPrereqError("Missing size or adoption source for"
2678 " disk %d" % didx, errors.ECODE_INVAL)
2681 if opts.tags is not None:
2682 tags = opts.tags.split(",")
2686 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2687 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2689 if mode == constants.INSTANCE_CREATE:
2692 force_variant = opts.force_variant
2695 no_install = opts.no_install
2696 identify_defaults = False
2697 elif mode == constants.INSTANCE_IMPORT:
2700 force_variant = False
2701 src_node = opts.src_node
2702 src_path = opts.src_dir
2704 identify_defaults = opts.identify_defaults
2706 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2708 op = opcodes.OpInstanceCreate(instance_name=instance,
2710 disk_template=opts.disk_template,
2712 conflicts_check=opts.conflicts_check,
2713 pnode=pnode, snode=snode,
2714 ip_check=opts.ip_check,
2715 name_check=opts.name_check,
2716 wait_for_sync=opts.wait_for_sync,
2717 file_storage_dir=opts.file_storage_dir,
2718 file_driver=opts.file_driver,
2719 iallocator=opts.iallocator,
2720 hypervisor=hypervisor,
2722 beparams=opts.beparams,
2723 osparams=opts.osparams,
2727 force_variant=force_variant,
2731 no_install=no_install,
2732 identify_defaults=identify_defaults,
2733 ignore_ipolicy=opts.ignore_ipolicy)
2735 SubmitOrSend(op, opts)
2739 class _RunWhileClusterStoppedHelper:
2740 """Helper class for L{RunWhileClusterStopped} to simplify state management
2743 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2744 """Initializes this class.
2746 @type feedback_fn: callable
2747 @param feedback_fn: Feedback function
2748 @type cluster_name: string
2749 @param cluster_name: Cluster name
2750 @type master_node: string
2751 @param master_node Master node name
2752 @type online_nodes: list
2753 @param online_nodes: List of names of online nodes
2756 self.feedback_fn = feedback_fn
2757 self.cluster_name = cluster_name
2758 self.master_node = master_node
2759 self.online_nodes = online_nodes
2761 self.ssh = ssh.SshRunner(self.cluster_name)
2763 self.nonmaster_nodes = [name for name in online_nodes
2764 if name != master_node]
2766 assert self.master_node not in self.nonmaster_nodes
2768 def _RunCmd(self, node_name, cmd):
2769 """Runs a command on the local or a remote machine.
2771 @type node_name: string
2772 @param node_name: Machine name
2777 if node_name is None or node_name == self.master_node:
2778 # No need to use SSH
2779 result = utils.RunCmd(cmd)
2781 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2782 utils.ShellQuoteArgs(cmd))
2785 errmsg = ["Failed to run command %s" % result.cmd]
2787 errmsg.append("on node %s" % node_name)
2788 errmsg.append(": exitcode %s and error %s" %
2789 (result.exit_code, result.output))
2790 raise errors.OpExecError(" ".join(errmsg))
2792 def Call(self, fn, *args):
2793 """Call function while all daemons are stopped.
2796 @param fn: Function to be called
2799 # Pause watcher by acquiring an exclusive lock on watcher state file
2800 self.feedback_fn("Blocking watcher")
2801 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2803 # TODO: Currently, this just blocks. There's no timeout.
2804 # TODO: Should it be a shared lock?
2805 watcher_block.Exclusive(blocking=True)
2807 # Stop master daemons, so that no new jobs can come in and all running
2809 self.feedback_fn("Stopping master daemons")
2810 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2812 # Stop daemons on all nodes
2813 for node_name in self.online_nodes:
2814 self.feedback_fn("Stopping daemons on %s" % node_name)
2815 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2817 # All daemons are shut down now
2819 return fn(self, *args)
2820 except Exception, err:
2821 _, errmsg = FormatError(err)
2822 logging.exception("Caught exception")
2823 self.feedback_fn(errmsg)
2826 # Start cluster again, master node last
2827 for node_name in self.nonmaster_nodes + [self.master_node]:
2828 self.feedback_fn("Starting daemons on %s" % node_name)
2829 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2832 watcher_block.Close()
2835 def RunWhileClusterStopped(feedback_fn, fn, *args):
2836 """Calls a function while all cluster daemons are stopped.
2838 @type feedback_fn: callable
2839 @param feedback_fn: Feedback function
2841 @param fn: Function to be called when daemons are stopped
2844 feedback_fn("Gathering cluster information")
2846 # This ensures we're running on the master daemon
2849 (cluster_name, master_node) = \
2850 cl.QueryConfigValues(["cluster_name", "master_node"])
2852 online_nodes = GetOnlineNodes([], cl=cl)
2854 # Don't keep a reference to the client. The master daemon will go away.
2857 assert master_node in online_nodes
2859 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2860 online_nodes).Call(fn, *args)
2863 def GenerateTable(headers, fields, separator, data,
2864 numfields=None, unitfields=None,
2866 """Prints a table with headers and different fields.
2869 @param headers: dictionary mapping field names to headers for
2872 @param fields: the field names corresponding to each row in
2874 @param separator: the separator to be used; if this is None,
2875 the default 'smart' algorithm is used which computes optimal
2876 field width, otherwise just the separator is used between
2879 @param data: a list of lists, each sublist being one row to be output
2880 @type numfields: list
2881 @param numfields: a list with the fields that hold numeric
2882 values and thus should be right-aligned
2883 @type unitfields: list
2884 @param unitfields: a list with the fields that hold numeric
2885 values that should be formatted with the units field
2886 @type units: string or None
2887 @param units: the units we should use for formatting, or None for
2888 automatic choice (human-readable for non-separator usage, otherwise
2889 megabytes); this is a one-letter string
2898 if numfields is None:
2900 if unitfields is None:
2903 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2904 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2907 for field in fields:
2908 if headers and field not in headers:
2909 # TODO: handle better unknown fields (either revert to old
2910 # style of raising exception, or deal more intelligently with
2912 headers[field] = field
2913 if separator is not None:
2914 format_fields.append("%s")
2915 elif numfields.Matches(field):
2916 format_fields.append("%*s")
2918 format_fields.append("%-*s")
2920 if separator is None:
2921 mlens = [0 for name in fields]
2922 format_str = " ".join(format_fields)
2924 format_str = separator.replace("%", "%%").join(format_fields)
2929 for idx, val in enumerate(row):
2930 if unitfields.Matches(fields[idx]):
2933 except (TypeError, ValueError):
2936 val = row[idx] = utils.FormatUnit(val, units)
2937 val = row[idx] = str(val)
2938 if separator is None:
2939 mlens[idx] = max(mlens[idx], len(val))
2944 for idx, name in enumerate(fields):
2946 if separator is None:
2947 mlens[idx] = max(mlens[idx], len(hdr))
2948 args.append(mlens[idx])
2950 result.append(format_str % tuple(args))
2952 if separator is None:
2953 assert len(mlens) == len(fields)
2955 if fields and not numfields.Matches(fields[-1]):
2961 line = ["-" for _ in fields]
2962 for idx in range(len(fields)):
2963 if separator is None:
2964 args.append(mlens[idx])
2965 args.append(line[idx])
2966 result.append(format_str % tuple(args))
2971 def _FormatBool(value):
2972 """Formats a boolean value as a string.
2980 #: Default formatting for query results; (callback, align right)
2981 _DEFAULT_FORMAT_QUERY = {
2982 constants.QFT_TEXT: (str, False),
2983 constants.QFT_BOOL: (_FormatBool, False),
2984 constants.QFT_NUMBER: (str, True),
2985 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2986 constants.QFT_OTHER: (str, False),
2987 constants.QFT_UNKNOWN: (str, False),
2991 def _GetColumnFormatter(fdef, override, unit):
2992 """Returns formatting function for a field.
2994 @type fdef: L{objects.QueryFieldDefinition}
2995 @type override: dict
2996 @param override: Dictionary for overriding field formatting functions,
2997 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2999 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3000 @rtype: tuple; (callable, bool)
3001 @return: Returns the function to format a value (takes one parameter) and a
3002 boolean for aligning the value on the right-hand side
3005 fmt = override.get(fdef.name, None)
3009 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3011 if fdef.kind == constants.QFT_UNIT:
3012 # Can't keep this information in the static dictionary
3013 return (lambda value: utils.FormatUnit(value, unit), True)
3015 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3019 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3022 class _QueryColumnFormatter:
3023 """Callable class for formatting fields of a query.
3026 def __init__(self, fn, status_fn, verbose):
3027 """Initializes this class.
3030 @param fn: Formatting function
3031 @type status_fn: callable
3032 @param status_fn: Function to report fields' status
3033 @type verbose: boolean
3034 @param verbose: whether to use verbose field descriptions or not
3038 self._status_fn = status_fn
3039 self._verbose = verbose
3041 def __call__(self, data):
3042 """Returns a field's string representation.
3045 (status, value) = data
3048 self._status_fn(status)
3050 if status == constants.RS_NORMAL:
3051 return self._fn(value)
3053 assert value is None, \
3054 "Found value %r for abnormal status %s" % (value, status)
3056 return FormatResultError(status, self._verbose)
3059 def FormatResultError(status, verbose):
3060 """Formats result status other than L{constants.RS_NORMAL}.
3062 @param status: The result status
3063 @type verbose: boolean
3064 @param verbose: Whether to return the verbose text
3065 @return: Text of result status
3068 assert status != constants.RS_NORMAL, \
3069 "FormatResultError called with status equal to constants.RS_NORMAL"
3071 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3073 raise NotImplementedError("Unknown status %s" % status)
3080 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3081 header=False, verbose=False):
3082 """Formats data in L{objects.QueryResponse}.
3084 @type result: L{objects.QueryResponse}
3085 @param result: result of query operation
3087 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3088 see L{utils.text.FormatUnit}
3089 @type format_override: dict
3090 @param format_override: Dictionary for overriding field formatting functions,
3091 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3092 @type separator: string or None
3093 @param separator: String used to separate fields
3095 @param header: Whether to output header row
3096 @type verbose: boolean
3097 @param verbose: whether to use verbose field descriptions or not
3106 if format_override is None:
3107 format_override = {}
3109 stats = dict.fromkeys(constants.RS_ALL, 0)
3111 def _RecordStatus(status):
3116 for fdef in result.fields:
3117 assert fdef.title and fdef.name
3118 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3119 columns.append(TableColumn(fdef.title,
3120 _QueryColumnFormatter(fn, _RecordStatus,
3124 table = FormatTable(result.data, columns, header, separator)
3126 # Collect statistics
3127 assert len(stats) == len(constants.RS_ALL)
3128 assert compat.all(count >= 0 for count in stats.values())
3130 # Determine overall status. If there was no data, unknown fields must be
3131 # detected via the field definitions.
3132 if (stats[constants.RS_UNKNOWN] or
3133 (not result.data and _GetUnknownFields(result.fields))):
3135 elif compat.any(count > 0 for key, count in stats.items()
3136 if key != constants.RS_NORMAL):
3137 status = QR_INCOMPLETE
3141 return (status, table)
3144 def _GetUnknownFields(fdefs):
3145 """Returns list of unknown fields included in C{fdefs}.
3147 @type fdefs: list of L{objects.QueryFieldDefinition}
3150 return [fdef for fdef in fdefs
3151 if fdef.kind == constants.QFT_UNKNOWN]
3154 def _WarnUnknownFields(fdefs):
3155 """Prints a warning to stderr if a query included unknown fields.
3157 @type fdefs: list of L{objects.QueryFieldDefinition}
3160 unknown = _GetUnknownFields(fdefs)
3162 ToStderr("Warning: Queried for unknown fields %s",
3163 utils.CommaJoin(fdef.name for fdef in unknown))
3169 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3170 format_override=None, verbose=False, force_filter=False,
3171 namefield=None, qfilter=None, isnumeric=False):
3172 """Generic implementation for listing all items of a resource.
3174 @param resource: One of L{constants.QR_VIA_LUXI}
3175 @type fields: list of strings
3176 @param fields: List of fields to query for
3177 @type names: list of strings
3178 @param names: Names of items to query for
3179 @type unit: string or None
3180 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3181 None for automatic choice (human-readable for non-separator usage,
3182 otherwise megabytes); this is a one-letter string
3183 @type separator: string or None
3184 @param separator: String used to separate fields
3186 @param header: Whether to show header row
3187 @type force_filter: bool
3188 @param force_filter: Whether to always treat names as filter
3189 @type format_override: dict
3190 @param format_override: Dictionary for overriding field formatting functions,
3191 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3192 @type verbose: boolean
3193 @param verbose: whether to use verbose field descriptions or not
3194 @type namefield: string
3195 @param namefield: Name of field to use for simple filters (see
3196 L{qlang.MakeFilter} for details)
3197 @type qfilter: list or None
3198 @param qfilter: Query filter (in addition to names)
3199 @param isnumeric: bool
3200 @param isnumeric: Whether the namefield's type is numeric, and therefore
3201 any simple filters built by namefield should use integer values to
3208 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3209 isnumeric=isnumeric)
3212 qfilter = namefilter
3213 elif namefilter is not None:
3214 qfilter = [qlang.OP_AND, namefilter, qfilter]
3219 response = cl.Query(resource, fields, qfilter)
3221 found_unknown = _WarnUnknownFields(response.fields)
3223 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3225 format_override=format_override,
3231 assert ((found_unknown and status == QR_UNKNOWN) or
3232 (not found_unknown and status != QR_UNKNOWN))
3234 if status == QR_UNKNOWN:
3235 return constants.EXIT_UNKNOWN_FIELD
3237 # TODO: Should the list command fail if not all data could be collected?
3238 return constants.EXIT_SUCCESS
3241 def _FieldDescValues(fdef):
3242 """Helper function for L{GenericListFields} to get query field description.
3244 @type fdef: L{objects.QueryFieldDefinition}
3250 _QFT_NAMES.get(fdef.kind, fdef.kind),
3256 def GenericListFields(resource, fields, separator, header, cl=None):
3257 """Generic implementation for listing fields for a resource.
3259 @param resource: One of L{constants.QR_VIA_LUXI}
3260 @type fields: list of strings
3261 @param fields: List of fields to query for
3262 @type separator: string or None
3263 @param separator: String used to separate fields
3265 @param header: Whether to show header row
3274 response = cl.QueryFields(resource, fields)
3276 found_unknown = _WarnUnknownFields(response.fields)
3279 TableColumn("Name", str, False),
3280 TableColumn("Type", str, False),
3281 TableColumn("Title", str, False),
3282 TableColumn("Description", str, False),
3285 rows = map(_FieldDescValues, response.fields)
3287 for line in FormatTable(rows, columns, header, separator):
3291 return constants.EXIT_UNKNOWN_FIELD
3293 return constants.EXIT_SUCCESS
3297 """Describes a column for L{FormatTable}.
3300 def __init__(self, title, fn, align_right):
3301 """Initializes this class.
3304 @param title: Column title
3306 @param fn: Formatting function
3307 @type align_right: bool
3308 @param align_right: Whether to align values on the right-hand side
3313 self.align_right = align_right
3316 def _GetColFormatString(width, align_right):
3317 """Returns the format string for a field.
3325 return "%%%s%ss" % (sign, width)
3328 def FormatTable(rows, columns, header, separator):
3329 """Formats data as a table.
3331 @type rows: list of lists
3332 @param rows: Row data, one list per row
3333 @type columns: list of L{TableColumn}
3334 @param columns: Column descriptions
3336 @param header: Whether to show header row
3337 @type separator: string or None
3338 @param separator: String used to separate columns
3342 data = [[col.title for col in columns]]
3343 colwidth = [len(col.title) for col in columns]
3346 colwidth = [0 for _ in columns]
3350 assert len(row) == len(columns)
3352 formatted = [col.format(value) for value, col in zip(row, columns)]
3354 if separator is None:
3355 # Update column widths
3356 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3357 # Modifying a list's items while iterating is fine
3358 colwidth[idx] = max(oldwidth, len(value))
3360 data.append(formatted)
3362 if separator is not None:
3363 # Return early if a separator is used
3364 return [separator.join(row) for row in data]
3366 if columns and not columns[-1].align_right:
3367 # Avoid unnecessary spaces at end of line
3370 # Build format string
3371 fmt = " ".join([_GetColFormatString(width, col.align_right)
3372 for col, width in zip(columns, colwidth)])
3374 return [fmt % tuple(row) for row in data]
3377 def FormatTimestamp(ts):
3378 """Formats a given timestamp.
3381 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3384 @return: a string with the formatted timestamp
3387 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3391 return utils.FormatTime(sec, usecs=usecs)
3394 def ParseTimespec(value):
3395 """Parse a time specification.
3397 The following suffixed will be recognized:
3405 Without any suffix, the value will be taken to be in seconds.
3410 raise errors.OpPrereqError("Empty time specification passed",
3419 if value[-1] not in suffix_map:
3422 except (TypeError, ValueError):
3423 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3426 multiplier = suffix_map[value[-1]]
3428 if not value: # no data left after stripping the suffix
3429 raise errors.OpPrereqError("Invalid time specification (only"
3430 " suffix passed)", errors.ECODE_INVAL)
3432 value = int(value) * multiplier
3433 except (TypeError, ValueError):
3434 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3439 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3440 filter_master=False, nodegroup=None):
3441 """Returns the names of online nodes.
3443 This function will also log a warning on stderr with the names of
3446 @param nodes: if not empty, use only this subset of nodes (minus the
3448 @param cl: if not None, luxi client to use
3449 @type nowarn: boolean
3450 @param nowarn: by default, this function will output a note with the
3451 offline nodes that are skipped; if this parameter is True the
3452 note is not displayed
3453 @type secondary_ips: boolean
3454 @param secondary_ips: if True, return the secondary IPs instead of the
3455 names, useful for doing network traffic over the replication interface
3457 @type filter_master: boolean
3458 @param filter_master: if True, do not return the master node in the list
3459 (useful in coordination with secondary_ips where we cannot check our
3460 node name against the list)
3461 @type nodegroup: string
3462 @param nodegroup: If set, only return nodes in this node group
3471 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3473 if nodegroup is not None:
3474 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3475 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3478 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3481 if len(qfilter) > 1:
3482 final_filter = [qlang.OP_AND] + qfilter
3484 assert len(qfilter) == 1
3485 final_filter = qfilter[0]
3489 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3491 def _IsOffline(row):
3492 (_, (_, offline), _) = row
3496 ((_, name), _, _) = row
3500 (_, _, (_, sip)) = row
3503 (offline, online) = compat.partition(result.data, _IsOffline)
3505 if offline and not nowarn:
3506 ToStderr("Note: skipping offline node(s): %s" %
3507 utils.CommaJoin(map(_GetName, offline)))
3514 return map(fn, online)
3517 def _ToStream(stream, txt, *args):
3518 """Write a message to a stream, bypassing the logging system
3520 @type stream: file object
3521 @param stream: the file to which we should write
3523 @param txt: the message
3529 stream.write(txt % args)
3534 except IOError, err:
3535 if err.errno == errno.EPIPE:
3536 # our terminal went away, we'll exit
3537 sys.exit(constants.EXIT_FAILURE)
3542 def ToStdout(txt, *args):
3543 """Write a message to stdout only, bypassing the logging system
3545 This is just a wrapper over _ToStream.
3548 @param txt: the message
3551 _ToStream(sys.stdout, txt, *args)
3554 def ToStderr(txt, *args):
3555 """Write a message to stderr only, bypassing the logging system
3557 This is just a wrapper over _ToStream.
3560 @param txt: the message
3563 _ToStream(sys.stderr, txt, *args)
3566 class JobExecutor(object):
3567 """Class which manages the submission and execution of multiple jobs.
3569 Note that instances of this class should not be reused between
3573 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3578 self.verbose = verbose
3581 self.feedback_fn = feedback_fn
3582 self._counter = itertools.count()
3585 def _IfName(name, fmt):
3586 """Helper function for formatting name.
3594 def QueueJob(self, name, *ops):
3595 """Record a job for later submit.
3598 @param name: a description of the job, will be used in WaitJobSet
3601 SetGenericOpcodeOpts(ops, self.opts)
3602 self.queue.append((self._counter.next(), name, ops))
3604 def AddJobId(self, name, status, job_id):
3605 """Adds a job ID to the internal queue.
3608 self.jobs.append((self._counter.next(), status, job_id, name))
3610 def SubmitPending(self, each=False):
3611 """Submit all pending jobs.
3616 for (_, _, ops) in self.queue:
3617 # SubmitJob will remove the success status, but raise an exception if
3618 # the submission fails, so we'll notice that anyway.
3619 results.append([True, self.cl.SubmitJob(ops)[0]])
3621 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3622 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3623 self.jobs.append((idx, status, data, name))
3625 def _ChooseJob(self):
3626 """Choose a non-waiting/queued job to poll next.
3629 assert self.jobs, "_ChooseJob called with empty job list"
3631 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3635 for job_data, status in zip(self.jobs, result):
3636 if (isinstance(status, list) and status and
3637 status[0] in (constants.JOB_STATUS_QUEUED,
3638 constants.JOB_STATUS_WAITING,
3639 constants.JOB_STATUS_CANCELING)):
3640 # job is still present and waiting
3642 # good candidate found (either running job or lost job)
3643 self.jobs.remove(job_data)
3647 return self.jobs.pop(0)
3649 def GetResults(self):
3650 """Wait for and return the results of all jobs.
3653 @return: list of tuples (success, job results), in the same order
3654 as the submitted jobs; if a job has failed, instead of the result
3655 there will be the error message
3659 self.SubmitPending()
3662 ok_jobs = [row[2] for row in self.jobs if row[1]]
3664 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3666 # first, remove any non-submitted jobs
3667 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3668 for idx, _, jid, name in failures:
3669 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3670 results.append((idx, False, jid))
3673 (idx, _, jid, name) = self._ChooseJob()
3674 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3676 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3678 except errors.JobLost, err:
3679 _, job_result = FormatError(err)
3680 ToStderr("Job %s%s has been archived, cannot check its result",
3681 jid, self._IfName(name, " for %s"))
3683 except (errors.GenericError, luxi.ProtocolError), err:
3684 _, job_result = FormatError(err)
3686 # the error message will always be shown, verbose or not
3687 ToStderr("Job %s%s has failed: %s",
3688 jid, self._IfName(name, " for %s"), job_result)
3690 results.append((idx, success, job_result))
3692 # sort based on the index, then drop it
3694 results = [i[1:] for i in results]
3698 def WaitOrShow(self, wait):
3699 """Wait for job results or only print the job IDs.
3702 @param wait: whether to wait or not
3706 return self.GetResults()
3709 self.SubmitPending()
3710 for _, status, result, name in self.jobs:
3712 ToStdout("%s: %s", result, name)
3714 ToStderr("Failure for %s: %s", name, result)
3715 return [row[1:3] for row in self.jobs]
3718 def FormatParamsDictInfo(param_dict, actual):
3719 """Formats a parameter dictionary.
3721 @type param_dict: dict
3722 @param param_dict: the own parameters
3724 @param actual: the current parameter set (including defaults)
3726 @return: dictionary where the value of each parameter is either a fully
3727 formatted string or a dictionary containing formatted strings
3731 for (key, data) in actual.items():
3732 if isinstance(data, dict) and data:
3733 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3735 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3739 def _FormatListInfoDefault(data, def_data):
3740 if data is not None:
3741 ret = utils.CommaJoin(data)
3743 ret = "default (%s)" % utils.CommaJoin(def_data)
3747 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3748 """Formats an instance policy.
3750 @type custom_ipolicy: dict
3751 @param custom_ipolicy: own policy
3752 @type eff_ipolicy: dict
3753 @param eff_ipolicy: effective policy (including defaults); ignored for
3755 @type iscluster: bool
3756 @param iscluster: the policy is at cluster level
3757 @rtype: list of pairs
3758 @return: formatted data, suitable for L{PrintGenericInfo}
3762 eff_ipolicy = custom_ipolicy
3765 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3767 for (k, minmax) in enumerate(custom_minmax):
3769 ("%s/%s" % (key, k),
3770 FormatParamsDictInfo(minmax[key], minmax[key]))
3771 for key in constants.ISPECS_MINMAX_KEYS
3774 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3776 ("%s/%s" % (key, k),
3777 FormatParamsDictInfo({}, minmax[key]))
3778 for key in constants.ISPECS_MINMAX_KEYS
3780 ret = [("bounds specs", minmax_out)]
3783 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3785 (constants.ISPECS_STD,
3786 FormatParamsDictInfo(stdspecs, stdspecs))
3790 ("allowed disk templates",
3791 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3792 eff_ipolicy[constants.IPOLICY_DTS]))
3795 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3796 for key in constants.IPOLICY_PARAMETERS
3801 def _PrintSpecsParameters(buf, specs):
3802 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3803 buf.write(",".join(values))
3806 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3807 """Print the command option used to generate the given instance policy.
3809 Currently only the parts dealing with specs are supported.
3812 @param buf: stream to write into
3814 @param ipolicy: instance policy
3816 @param isgroup: whether the policy is at group level
3820 stdspecs = ipolicy.get("std")
3822 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3823 _PrintSpecsParameters(buf, stdspecs)
3824 minmaxes = ipolicy.get("minmax", [])
3826 for minmax in minmaxes:
3827 minspecs = minmax.get("min")
3828 maxspecs = minmax.get("max")
3829 if minspecs and maxspecs:
3831 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3836 _PrintSpecsParameters(buf, minspecs)
3838 _PrintSpecsParameters(buf, maxspecs)
3841 def ConfirmOperation(names, list_type, text, extra=""):
3842 """Ask the user to confirm an operation on a list of list_type.
3844 This function is used to request confirmation for doing an operation
3845 on a given list of list_type.
3848 @param names: the list of names that we display when
3849 we ask for confirmation
3850 @type list_type: str
3851 @param list_type: Human readable name for elements in the list (e.g. nodes)
3853 @param text: the operation that the user should confirm
3855 @return: True or False depending on user's confirmation.
3859 msg = ("The %s will operate on %d %s.\n%s"
3860 "Do you want to continue?" % (text, count, list_type, extra))
3861 affected = (("\nAffected %s:\n" % list_type) +
3862 "\n".join([" %s" % name for name in names]))
3864 choices = [("y", True, "Yes, execute the %s" % text),
3865 ("n", False, "No, abort the %s" % text)]
3868 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3871 question = msg + affected
3873 choice = AskUser(question, choices)
3876 choice = AskUser(msg + affected, choices)
3880 def _MaybeParseUnit(elements):
3881 """Parses and returns an array of potential values with units.
3885 for k, v in elements.items():
3886 if v == constants.VALUE_DEFAULT:
3889 parsed[k] = utils.ParseUnit(v)
3893 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3894 ispecs_disk_count, ispecs_disk_size,
3895 ispecs_nic_count, group_ipolicy, fill_all):
3898 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3899 if ispecs_disk_size:
3900 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3901 except (TypeError, ValueError, errors.UnitParseError), err:
3902 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3904 (ispecs_disk_size, ispecs_mem_size, err),
3907 # prepare ipolicy dict
3908 ispecs_transposed = {
3909 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3910 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3911 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3912 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3913 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3916 # first, check that the values given are correct
3918 forced_type = TISPECS_GROUP_TYPES
3920 forced_type = TISPECS_CLUSTER_TYPES
3921 for specs in ispecs_transposed.values():
3922 assert type(specs) is dict
3923 utils.ForceDictType(specs, forced_type)
3927 constants.ISPECS_MIN: {},
3928 constants.ISPECS_MAX: {},
3929 constants.ISPECS_STD: {},
3931 for (name, specs) in ispecs_transposed.iteritems():
3932 assert name in constants.ISPECS_PARAMETERS
3933 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3934 assert key in ispecs
3935 ispecs[key][name] = val
3937 for key in constants.ISPECS_MINMAX_KEYS:
3940 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3942 minmax_out[key] = ispecs[key]
3943 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3945 ipolicy[constants.ISPECS_STD] = \
3946 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3947 ispecs[constants.ISPECS_STD])
3949 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3952 def _ParseSpecUnit(spec, keyname):
3954 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3957 ret[k] = utils.ParseUnit(ret[k])
3958 except (TypeError, ValueError, errors.UnitParseError), err:
3959 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3960 " specs: %s" % (k, ret[k], keyname, err)),
3965 def _ParseISpec(spec, keyname, required):
3966 ret = _ParseSpecUnit(spec, keyname)
3967 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3968 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3969 if required and missing:
3970 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3971 (keyname, utils.CommaJoin(missing)),
3976 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3978 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3979 len(minmax_ispecs[0]) == 1):
3980 for (key, spec) in minmax_ispecs[0].items():
3981 # This loop is executed exactly once
3982 if key in allowed_values and not spec:
3987 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3988 group_ipolicy, allowed_values):
3989 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3990 if found_allowed is not None:
3991 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3992 elif minmax_ispecs is not None:
3994 for mmpair in minmax_ispecs:
3996 for (key, spec) in mmpair.items():
3997 if key not in constants.ISPECS_MINMAX_KEYS:
3998 msg = "Invalid key in bounds instance specifications: %s" % key
3999 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4000 mmpair_out[key] = _ParseISpec(spec, key, True)
4001 minmax_out.append(mmpair_out)
4002 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4003 if std_ispecs is not None:
4004 assert not group_ipolicy # This is not an option for gnt-group
4005 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4008 def CreateIPolicyFromOpts(ispecs_mem_size=None,
4009 ispecs_cpu_count=None,
4010 ispecs_disk_count=None,
4011 ispecs_disk_size=None,
4012 ispecs_nic_count=None,
4015 ipolicy_disk_templates=None,
4016 ipolicy_vcpu_ratio=None,
4017 ipolicy_spindle_ratio=None,
4018 group_ipolicy=False,
4019 allowed_values=None,
4021 """Creation of instance policy based on command line options.
4023 @param fill_all: whether for cluster policies we should ensure that
4024 all values are filled
4027 assert not (fill_all and allowed_values)
4029 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4030 ispecs_disk_size or ispecs_nic_count)
4031 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4032 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4033 " together with any --ipolicy-xxx-specs option",
4036 ipolicy_out = objects.MakeEmptyIPolicy()
4039 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4040 ispecs_disk_count, ispecs_disk_size,
4041 ispecs_nic_count, group_ipolicy, fill_all)
4042 elif (minmax_ispecs is not None or std_ispecs is not None):
4043 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4044 group_ipolicy, allowed_values)
4046 if ipolicy_disk_templates is not None:
4047 if allowed_values and ipolicy_disk_templates in allowed_values:
4048 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4050 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4051 if ipolicy_vcpu_ratio is not None:
4052 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4053 if ipolicy_spindle_ratio is not None:
4054 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4056 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4058 if not group_ipolicy and fill_all:
4059 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4064 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4065 """Formatting core of L{PrintGenericInfo}.
4067 @param buf: (string) stream to accumulate the result into
4068 @param data: data to format
4070 @param level: depth in the data hierarchy, used for indenting
4071 @type afterkey: bool
4072 @param afterkey: True when we are in the middle of a line after a key (used
4073 to properly add newlines or indentation)
4077 if isinstance(data, dict):
4086 for key in sorted(data):
4088 buf.write(baseind * level)
4093 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4094 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4095 # list of tuples (an ordered dictionary)
4101 for (key, val) in data:
4103 buf.write(baseind * level)
4108 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4109 elif isinstance(data, list):
4120 buf.write(baseind * level)
4124 buf.write(baseind[1:])
4125 _SerializeGenericInfo(buf, item, level + 1)
4127 # This branch should be only taken for strings, but it's practically
4128 # impossible to guarantee that no other types are produced somewhere
4129 buf.write(str(data))
4133 def PrintGenericInfo(data):
4134 """Print information formatted according to the hierarchy.
4136 The output is a valid YAML string.
4138 @param data: the data to print. It's a hierarchical structure whose elements
4140 - dictionaries, where keys are strings and values are of any of the
4142 - lists of pairs (key, value), where key is a string and value is of
4143 any of the types listed here; it's a way to encode ordered
4145 - lists of any of the types listed here
4150 _SerializeGenericInfo(buf, data, 0)
4151 ToStdout(buf.getvalue().rstrip("\n"))