4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
103 "DEFAULT_IALLOCATOR_OPT",
104 "IDENTIFY_DEFAULTS_OPT",
105 "IGNORE_CONSIST_OPT",
107 "IGNORE_FAILURES_OPT",
108 "IGNORE_OFFLINE_OPT",
109 "IGNORE_REMOVE_FAILURES_OPT",
110 "IGNORE_SECONDARIES_OPT",
112 "INCLUDEDEFAULTS_OPT",
115 "MAINTAIN_NODE_HEALTH_OPT",
117 "MASTER_NETMASK_OPT",
119 "MIGRATION_MODE_OPT",
120 "MODIFY_ETCHOSTS_OPT",
124 "NEW_CLUSTER_CERT_OPT",
125 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126 "NEW_CONFD_HMAC_KEY_OPT",
130 "NEW_SPICE_CERT_OPT",
132 "NOCONFLICTSCHECK_OPT",
133 "NODE_FORCE_JOIN_OPT",
135 "NODE_PLACEMENT_OPT",
139 "NODRBD_STORAGE_OPT",
145 "NOMODIFY_ETCHOSTS_OPT",
146 "NOMODIFY_SSH_SETUP_OPT",
150 "NORUNTIME_CHGS_OPT",
153 "NOSSH_KEYCHECK_OPT",
167 "PREALLOC_WIPE_DISKS_OPT",
168 "PRIMARY_IP_VERSION_OPT",
175 "REMOVE_INSTANCE_OPT",
176 "REMOVE_RESERVED_IPS_OPT",
182 "SECONDARY_ONLY_OPT",
187 "SHUTDOWN_TIMEOUT_OPT",
189 "SPECS_CPU_COUNT_OPT",
190 "SPECS_DISK_COUNT_OPT",
191 "SPECS_DISK_SIZE_OPT",
192 "SPECS_MEM_SIZE_OPT",
193 "SPECS_NIC_COUNT_OPT",
195 "IPOLICY_STD_SPECS_OPT",
196 "IPOLICY_DISK_TEMPLATES",
197 "IPOLICY_VCPU_RATIO",
203 "STARTUP_PAUSED_OPT",
212 "USE_EXTERNAL_MIP_SCRIPT",
220 "IGNORE_IPOLICY_OPT",
221 "INSTANCE_POLICY_OPTS",
222 # Generic functions for CLI programs
224 "CreateIPolicyFromOpts",
226 "GenericInstanceCreate",
232 "JobSubmittedException",
234 "RunWhileClusterStopped",
238 # Formatting functions
239 "ToStderr", "ToStdout",
242 "FormatParamsDictInfo",
244 "PrintIPolicyCommand",
254 # command line options support infrastructure
255 "ARGS_MANY_INSTANCES",
258 "ARGS_MANY_NETWORKS",
278 "OPT_COMPL_INST_ADD_NODES",
279 "OPT_COMPL_MANY_NODES",
280 "OPT_COMPL_ONE_IALLOCATOR",
281 "OPT_COMPL_ONE_INSTANCE",
282 "OPT_COMPL_ONE_NODE",
283 "OPT_COMPL_ONE_NODEGROUP",
284 "OPT_COMPL_ONE_NETWORK",
286 "OPT_COMPL_ONE_EXTSTORAGE",
291 "COMMON_CREATE_OPTS",
297 #: Priorities (sorted)
299 ("low", constants.OP_PRIO_LOW),
300 ("normal", constants.OP_PRIO_NORMAL),
301 ("high", constants.OP_PRIO_HIGH),
304 #: Priority dictionary for easier lookup
305 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
306 # we migrate to Python 2.6
307 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309 # Query result status for clients
312 QR_INCOMPLETE) = range(3)
314 #: Maximum batch size for ChooseJob
318 # constants used to create InstancePolicy dictionary
319 TISPECS_GROUP_TYPES = {
320 constants.ISPECS_MIN: constants.VTYPE_INT,
321 constants.ISPECS_MAX: constants.VTYPE_INT,
324 TISPECS_CLUSTER_TYPES = {
325 constants.ISPECS_MIN: constants.VTYPE_INT,
326 constants.ISPECS_MAX: constants.VTYPE_INT,
327 constants.ISPECS_STD: constants.VTYPE_INT,
330 #: User-friendly names for query2 field types
332 constants.QFT_UNKNOWN: "Unknown",
333 constants.QFT_TEXT: "Text",
334 constants.QFT_BOOL: "Boolean",
335 constants.QFT_NUMBER: "Number",
336 constants.QFT_UNIT: "Storage size",
337 constants.QFT_TIMESTAMP: "Timestamp",
338 constants.QFT_OTHER: "Custom",
343 def __init__(self, min=0, max=None): # pylint: disable=W0622
348 return ("<%s min=%s max=%s>" %
349 (self.__class__.__name__, self.min, self.max))
352 class ArgSuggest(_Argument):
353 """Suggesting argument.
355 Value can be any of the ones passed to the constructor.
358 # pylint: disable=W0622
359 def __init__(self, min=0, max=None, choices=None):
360 _Argument.__init__(self, min=min, max=max)
361 self.choices = choices
364 return ("<%s min=%s max=%s choices=%r>" %
365 (self.__class__.__name__, self.min, self.max, self.choices))
368 class ArgChoice(ArgSuggest):
371 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
372 but value must be one of the choices.
377 class ArgUnknown(_Argument):
378 """Unknown argument to program (e.g. determined at runtime).
383 class ArgInstance(_Argument):
384 """Instances argument.
389 class ArgNode(_Argument):
395 class ArgNetwork(_Argument):
401 class ArgGroup(_Argument):
402 """Node group argument.
407 class ArgJobId(_Argument):
413 class ArgFile(_Argument):
414 """File path argument.
419 class ArgCommand(_Argument):
425 class ArgHost(_Argument):
431 class ArgOs(_Argument):
437 class ArgExtStorage(_Argument):
438 """ExtStorage argument.
444 ARGS_MANY_INSTANCES = [ArgInstance()]
445 ARGS_MANY_NETWORKS = [ArgNetwork()]
446 ARGS_MANY_NODES = [ArgNode()]
447 ARGS_MANY_GROUPS = [ArgGroup()]
448 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
449 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
450 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
453 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456 def _ExtractTagsObject(opts, args):
457 """Extract the tag type object.
459 Note that this function will modify its args parameter.
462 if not hasattr(opts, "tag_type"):
463 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465 if kind == constants.TAG_CLUSTER:
467 elif kind in (constants.TAG_NODEGROUP,
469 constants.TAG_NETWORK,
470 constants.TAG_INSTANCE):
472 raise errors.OpPrereqError("no arguments passed to the command",
477 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
481 def _ExtendTags(opts, args):
482 """Extend the args if a source file has been given.
484 This function will extend the tags with the contents of the file
485 passed in the 'tags_source' attribute of the opts parameter. A file
486 named '-' will be replaced by stdin.
489 fname = opts.tags_source
495 new_fh = open(fname, "r")
498 # we don't use the nice 'new_data = [line.strip() for line in fh]'
499 # because of python bug 1633941
501 line = new_fh.readline()
504 new_data.append(line.strip())
507 args.extend(new_data)
510 def ListTags(opts, args):
511 """List the tags on a given object.
513 This is a generic implementation that knows how to deal with all
514 three cases of tag objects (cluster, node, instance). The opts
515 argument is expected to contain a tag_type field denoting what
516 object type we work on.
519 kind, name = _ExtractTagsObject(opts, args)
520 cl = GetClient(query=True)
521 result = cl.QueryTags(kind, name)
522 result = list(result)
528 def AddTags(opts, args):
529 """Add tags on a given object.
531 This is a generic implementation that knows how to deal with all
532 three cases of tag objects (cluster, node, instance). The opts
533 argument is expected to contain a tag_type field denoting what
534 object type we work on.
537 kind, name = _ExtractTagsObject(opts, args)
538 _ExtendTags(opts, args)
540 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
541 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
542 SubmitOrSend(op, opts)
545 def RemoveTags(opts, args):
546 """Remove tags from a given object.
548 This is a generic implementation that knows how to deal with all
549 three cases of tag objects (cluster, node, instance). The opts
550 argument is expected to contain a tag_type field denoting what
551 object type we work on.
554 kind, name = _ExtractTagsObject(opts, args)
555 _ExtendTags(opts, args)
557 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
558 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
559 SubmitOrSend(op, opts)
562 def check_unit(option, opt, value): # pylint: disable=W0613
563 """OptParsers custom converter for units.
567 return utils.ParseUnit(value)
568 except errors.UnitParseError, err:
569 raise OptionValueError("option %s: %s" % (opt, err))
572 def _SplitKeyVal(opt, data, parse_prefixes):
573 """Convert a KeyVal string into a dict.
575 This function will convert a key=val[,...] string into a dict. Empty
576 values will be converted specially: keys which have the prefix 'no_'
577 will have the value=False and the prefix stripped, keys with the prefix
578 "-" will have value=None and the prefix stripped, and the others will
582 @param opt: a string holding the option name for which we process the
583 data, used in building error messages
585 @param data: a string of the format key=val,key=val,...
586 @type parse_prefixes: bool
587 @param parse_prefixes: whether to handle prefixes specially
589 @return: {key=val, key=val}
590 @raises errors.ParameterError: if there are duplicate keys
595 for elem in utils.UnescapeAndSplit(data, sep=","):
597 key, val = elem.split("=", 1)
599 if elem.startswith(NO_PREFIX):
600 key, val = elem[len(NO_PREFIX):], False
601 elif elem.startswith(UN_PREFIX):
602 key, val = elem[len(UN_PREFIX):], None
604 key, val = elem, True
606 raise errors.ParameterError("Missing value for key '%s' in option %s" %
609 raise errors.ParameterError("Duplicate key '%s' in option %s" %
615 def _SplitIdentKeyVal(opt, value, parse_prefixes):
616 """Helper function to parse "ident:key=val,key=val" options.
619 @param opt: option name, used in error messages
621 @param value: expected to be in the format "ident:key=val,key=val,..."
622 @type parse_prefixes: bool
623 @param parse_prefixes: whether to handle prefixes specially (see
626 @return: (ident, {key=val, key=val})
627 @raises errors.ParameterError: in case of duplicates or other parsing errors
631 ident, rest = value, ""
633 ident, rest = value.split(":", 1)
635 if parse_prefixes and ident.startswith(NO_PREFIX):
637 msg = "Cannot pass options when removing parameter groups: %s" % value
638 raise errors.ParameterError(msg)
639 retval = (ident[len(NO_PREFIX):], False)
640 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
641 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643 msg = "Cannot pass options when removing parameter groups: %s" % value
644 raise errors.ParameterError(msg)
645 retval = (ident[len(UN_PREFIX):], None)
647 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
648 retval = (ident, kv_dict)
652 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
653 """Custom parser for ident:key=val,key=val options.
655 This will store the parsed values as a tuple (ident, {key: val}). As such,
656 multiple uses of this option via action=append is possible.
659 return _SplitIdentKeyVal(opt, value, True)
662 def check_key_val(option, opt, value): # pylint: disable=W0613
663 """Custom parser class for key=val,key=val options.
665 This will store the parsed values as a dict {key: val}.
668 return _SplitKeyVal(opt, value, True)
671 def _SplitListKeyVal(opt, value):
673 for elem in value.split("/"):
675 raise errors.ParameterError("Empty section in option '%s'" % opt)
676 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680 raise errors.ParameterError(msg)
681 retval[ident] = valdict
685 def check_multilist_ident_key_val(_, opt, value):
686 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688 @rtype: list of dictionary
689 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
693 for line in value.split("//"):
694 retval.append(_SplitListKeyVal(opt, line))
698 def check_bool(option, opt, value): # pylint: disable=W0613
699 """Custom parser for yes/no options.
701 This will store the parsed value as either True or False.
704 value = value.lower()
705 if value == constants.VALUE_FALSE or value == "no":
707 elif value == constants.VALUE_TRUE or value == "yes":
710 raise errors.ParameterError("Invalid boolean value '%s'" % value)
713 def check_list(option, opt, value): # pylint: disable=W0613
714 """Custom parser for comma-separated lists.
717 # we have to make this explicit check since "".split(",") is [""],
718 # not an empty list :(
722 return utils.UnescapeAndSplit(value)
725 def check_maybefloat(option, opt, value): # pylint: disable=W0613
726 """Custom parser for float numbers which might be also defaults.
729 value = value.lower()
731 if value == constants.VALUE_DEFAULT:
737 # completion_suggestion is normally a list. Using numeric values not evaluating
738 # to False for dynamic completion.
739 (OPT_COMPL_MANY_NODES,
741 OPT_COMPL_ONE_INSTANCE,
743 OPT_COMPL_ONE_EXTSTORAGE,
744 OPT_COMPL_ONE_IALLOCATOR,
745 OPT_COMPL_ONE_NETWORK,
746 OPT_COMPL_INST_ADD_NODES,
747 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749 OPT_COMPL_ALL = compat.UniqueFrozenset([
750 OPT_COMPL_MANY_NODES,
752 OPT_COMPL_ONE_INSTANCE,
754 OPT_COMPL_ONE_EXTSTORAGE,
755 OPT_COMPL_ONE_IALLOCATOR,
756 OPT_COMPL_ONE_NETWORK,
757 OPT_COMPL_INST_ADD_NODES,
758 OPT_COMPL_ONE_NODEGROUP,
762 class CliOption(Option):
763 """Custom option class for optparse.
766 ATTRS = Option.ATTRS + [
767 "completion_suggest",
769 TYPES = Option.TYPES + (
770 "multilistidentkeyval",
778 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
779 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
780 TYPE_CHECKER["identkeyval"] = check_ident_key_val
781 TYPE_CHECKER["keyval"] = check_key_val
782 TYPE_CHECKER["unit"] = check_unit
783 TYPE_CHECKER["bool"] = check_bool
784 TYPE_CHECKER["list"] = check_list
785 TYPE_CHECKER["maybefloat"] = check_maybefloat
788 # optparse.py sets make_option, so we do it for our own option class, too
789 cli_option = CliOption
794 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
795 help="Increase debugging level")
797 NOHDR_OPT = cli_option("--no-headers", default=False,
798 action="store_true", dest="no_headers",
799 help="Don't display column headers")
801 SEP_OPT = cli_option("--separator", default=None,
802 action="store", dest="separator",
803 help=("Separator between output fields"
804 " (defaults to one space)"))
806 USEUNITS_OPT = cli_option("--units", default=None,
807 dest="units", choices=("h", "m", "g", "t"),
808 help="Specify units for output (one of h/m/g/t)")
810 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
811 type="string", metavar="FIELDS",
812 help="Comma separated list of output fields")
814 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
815 default=False, help="Force the operation")
817 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
818 default=False, help="Do not require confirmation")
820 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
821 action="store_true", default=False,
822 help=("Ignore offline nodes and do as much"
825 TAG_ADD_OPT = cli_option("--tags", dest="tags",
826 default=None, help="Comma-separated list of instance"
829 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
830 default=None, help="File with tag names")
832 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
833 default=False, action="store_true",
834 help=("Submit the job and return the job ID, but"
835 " don't wait for the job to finish"))
837 SYNC_OPT = cli_option("--sync", dest="do_locking",
838 default=False, action="store_true",
839 help=("Grab locks while doing the queries"
840 " in order to ensure more consistent results"))
842 DRY_RUN_OPT = cli_option("--dry-run", default=False,
844 help=("Do not execute the operation, just run the"
845 " check steps and verify if it could be"
848 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
850 help="Increase the verbosity of the operation")
852 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
853 action="store_true", dest="simulate_errors",
854 help="Debugging option that makes the operation"
855 " treat most runtime checks as failed")
857 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
858 default=True, action="store_false",
859 help="Don't wait for sync (DANGEROUS!)")
861 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
862 default=False, action="store_true",
863 help="Wait for disks to sync")
865 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
866 action="store_true", default=False,
867 help="Enable offline instance")
869 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
870 action="store_true", default=False,
871 help="Disable down instance")
873 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
874 help=("Custom disk setup (%s)" %
875 utils.CommaJoin(constants.DISK_TEMPLATES)),
876 default=None, metavar="TEMPL",
877 choices=list(constants.DISK_TEMPLATES))
879 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
880 help="Do not create any network cards for"
883 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
884 help="Relative path under default cluster-wide"
885 " file storage dir to store file-based disks",
886 default=None, metavar="<DIR>")
888 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
889 help="Driver to use for image files",
890 default=None, metavar="<DRIVER>",
891 choices=list(constants.FILE_DRIVER))
893 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
894 help="Select nodes for the instance automatically"
895 " using the <NAME> iallocator plugin",
896 default=None, type="string",
897 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
899 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
901 help="Set the default instance"
903 default=None, type="string",
904 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
908 completion_suggest=OPT_COMPL_ONE_OS)
910 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
911 type="keyval", default={},
912 help="OS parameters")
914 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
915 action="store_true", default=False,
916 help="Force an unknown variant")
918 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
919 action="store_true", default=False,
920 help="Do not install the OS (will"
923 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
924 dest="allow_runtime_chgs",
925 default=True, action="store_false",
926 help="Don't allow runtime changes")
928 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
929 type="keyval", default={},
930 help="Backend parameters")
932 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
933 default={}, dest="hvparams",
934 help="Hypervisor parameters")
936 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
937 help="Disk template parameters, in the format"
938 " template:option=value,option=value,...",
939 type="identkeyval", action="append", default=[])
941 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
942 type="keyval", default={},
943 help="Memory size specs: list of key=value,"
944 " where key is one of min, max, std"
945 " (in MB or using a unit)")
947 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
948 type="keyval", default={},
949 help="CPU count specs: list of key=value,"
950 " where key is one of min, max, std")
952 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
953 dest="ispecs_disk_count",
954 type="keyval", default={},
955 help="Disk count specs: list of key=value,"
956 " where key is one of min, max, std")
958 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
959 type="keyval", default={},
960 help="Disk size specs: list of key=value,"
961 " where key is one of min, max, std"
962 " (in MB or using a unit)")
964 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
965 type="keyval", default={},
966 help="NIC count specs: list of key=value,"
967 " where key is one of min, max, std")
969 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
970 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
971 dest="ipolicy_bounds_specs",
972 type="multilistidentkeyval", default=None,
973 help="Complete instance specs limits")
975 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
976 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
977 dest="ipolicy_std_specs",
978 type="keyval", default=None,
979 help="Complte standard instance specs")
981 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
982 dest="ipolicy_disk_templates",
983 type="list", default=None,
984 help="Comma-separated list of"
985 " enabled disk templates")
987 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
988 dest="ipolicy_vcpu_ratio",
989 type="maybefloat", default=None,
990 help="The maximum allowed vcpu-to-cpu ratio")
992 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
993 dest="ipolicy_spindle_ratio",
994 type="maybefloat", default=None,
995 help=("The maximum allowed instances to"
998 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
999 help="Hypervisor and hypervisor options, in the"
1000 " format hypervisor:option=value,option=value,...",
1001 default=None, type="identkeyval")
1003 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1004 help="Hypervisor and hypervisor options, in the"
1005 " format hypervisor:option=value,option=value,...",
1006 default=[], action="append", type="identkeyval")
1008 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1009 action="store_false",
1010 help="Don't check that the instance's IP"
1013 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1014 default=True, action="store_false",
1015 help="Don't check that the instance's name"
1018 NET_OPT = cli_option("--net",
1019 help="NIC parameters", default=[],
1020 dest="nics", action="append", type="identkeyval")
1022 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1023 dest="disks", action="append", type="identkeyval")
1025 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1026 help="Comma-separated list of disks"
1027 " indices to act on (e.g. 0,2) (optional,"
1028 " defaults to all disks)")
1030 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1031 help="Enforces a single-disk configuration using the"
1032 " given disk size, in MiB unless a suffix is used",
1033 default=None, type="unit", metavar="<size>")
1035 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1036 dest="ignore_consistency",
1037 action="store_true", default=False,
1038 help="Ignore the consistency of the disks on"
1041 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1042 dest="allow_failover",
1043 action="store_true", default=False,
1044 help="If migration is not possible fallback to"
1047 NONLIVE_OPT = cli_option("--non-live", dest="live",
1048 default=True, action="store_false",
1049 help="Do a non-live migration (this usually means"
1050 " freeze the instance, save the state, transfer and"
1051 " only then resume running on the secondary node)")
1053 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1055 choices=list(constants.HT_MIGRATION_MODES),
1056 help="Override default migration mode (choose"
1057 " either live or non-live")
1059 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1060 help="Target node and optional secondary node",
1061 metavar="<pnode>[:<snode>]",
1062 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1064 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1065 action="append", metavar="<node>",
1066 help="Use only this node (can be used multiple"
1067 " times, if not given defaults to all nodes)",
1068 completion_suggest=OPT_COMPL_ONE_NODE)
1070 NODEGROUP_OPT_NAME = "--node-group"
1071 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1073 help="Node group (name or uuid)",
1074 metavar="<nodegroup>",
1075 default=None, type="string",
1076 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1078 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1080 completion_suggest=OPT_COMPL_ONE_NODE)
1082 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1083 action="store_false",
1084 help="Don't start the instance after creation")
1086 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1087 action="store_true", default=False,
1088 help="Show command instead of executing it")
1090 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1091 default=False, action="store_true",
1092 help="Instead of performing the migration/failover,"
1093 " try to recover from a failed cleanup. This is safe"
1094 " to run even if the instance is healthy, but it"
1095 " will create extra replication traffic and "
1096 " disrupt briefly the replication (like during the"
1097 " migration/failover")
1099 STATIC_OPT = cli_option("-s", "--static", dest="static",
1100 action="store_true", default=False,
1101 help="Only show configuration data, not runtime data")
1103 ALL_OPT = cli_option("--all", dest="show_all",
1104 default=False, action="store_true",
1105 help="Show info on all instances on the cluster."
1106 " This can take a long time to run, use wisely")
1108 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1109 action="store_true", default=False,
1110 help="Interactive OS reinstall, lists available"
1111 " OS templates for selection")
1113 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1114 action="store_true", default=False,
1115 help="Remove the instance from the cluster"
1116 " configuration even if there are failures"
1117 " during the removal process")
1119 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1120 dest="ignore_remove_failures",
1121 action="store_true", default=False,
1122 help="Remove the instance from the"
1123 " cluster configuration even if there"
1124 " are failures during the removal"
1127 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1128 action="store_true", default=False,
1129 help="Remove the instance from the cluster")
1131 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1132 help="Specifies the new node for the instance",
1133 metavar="NODE", default=None,
1134 completion_suggest=OPT_COMPL_ONE_NODE)
1136 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1137 help="Specifies the new secondary node",
1138 metavar="NODE", default=None,
1139 completion_suggest=OPT_COMPL_ONE_NODE)
1141 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1142 help="Specifies the new primary node",
1143 metavar="<node>", default=None,
1144 completion_suggest=OPT_COMPL_ONE_NODE)
1146 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1147 default=False, action="store_true",
1148 help="Replace the disk(s) on the primary"
1149 " node (applies only to internally mirrored"
1150 " disk templates, e.g. %s)" %
1151 utils.CommaJoin(constants.DTS_INT_MIRROR))
1153 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1154 default=False, action="store_true",
1155 help="Replace the disk(s) on the secondary"
1156 " node (applies only to internally mirrored"
1157 " disk templates, e.g. %s)" %
1158 utils.CommaJoin(constants.DTS_INT_MIRROR))
1160 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1161 default=False, action="store_true",
1162 help="Lock all nodes and auto-promote as needed"
1165 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1166 default=False, action="store_true",
1167 help="Automatically replace faulty disks"
1168 " (applies only to internally mirrored"
1169 " disk templates, e.g. %s)" %
1170 utils.CommaJoin(constants.DTS_INT_MIRROR))
1172 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1173 default=False, action="store_true",
1174 help="Ignore current recorded size"
1175 " (useful for forcing activation when"
1176 " the recorded size is wrong)")
1178 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1180 completion_suggest=OPT_COMPL_ONE_NODE)
1182 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1185 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1186 help="Specify the secondary ip for the node",
1187 metavar="ADDRESS", default=None)
1189 READD_OPT = cli_option("--readd", dest="readd",
1190 default=False, action="store_true",
1191 help="Readd old node after replacing it")
1193 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1194 default=True, action="store_false",
1195 help="Disable SSH key fingerprint checking")
1197 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1198 default=False, action="store_true",
1199 help="Force the joining of a node")
1201 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1202 type="bool", default=None, metavar=_YORNO,
1203 help="Set the master_candidate flag on the node")
1205 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1206 type="bool", default=None,
1207 help=("Set the offline flag on the node"
1208 " (cluster does not communicate with offline"
1211 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1212 type="bool", default=None,
1213 help=("Set the drained flag on the node"
1214 " (excluded from allocation operations)"))
1216 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1217 type="bool", default=None, metavar=_YORNO,
1218 help="Set the master_capable flag on the node")
1220 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1221 type="bool", default=None, metavar=_YORNO,
1222 help="Set the vm_capable flag on the node")
1224 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1225 type="bool", default=None, metavar=_YORNO,
1226 help="Set the allocatable flag on a volume")
1228 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1229 help="Disable support for lvm based instances"
1231 action="store_false", default=True)
1233 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1234 dest="enabled_hypervisors",
1235 help="Comma-separated list of hypervisors",
1236 type="string", default=None)
1238 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1239 dest="enabled_disk_templates",
1240 help="Comma-separated list of "
1242 type="string", default=None)
1244 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1245 type="keyval", default={},
1246 help="NIC parameters")
1248 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1249 dest="candidate_pool_size", type="int",
1250 help="Set the candidate pool size")
1252 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1253 help=("Enables LVM and specifies the volume group"
1254 " name (cluster-wide) for disk allocation"
1255 " [%s]" % constants.DEFAULT_VG),
1256 metavar="VG", default=None)
1258 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1259 help="Destroy cluster", action="store_true")
1261 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1262 help="Skip node agreement check (dangerous)",
1263 action="store_true", default=False)
1265 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1266 help="Specify the mac prefix for the instance IP"
1267 " addresses, in the format XX:XX:XX",
1271 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1272 help="Specify the node interface (cluster-wide)"
1273 " on which the master IP address will be added"
1274 " (cluster init default: %s)" %
1275 constants.DEFAULT_BRIDGE,
1279 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1280 help="Specify the netmask of the master IP",
1284 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1285 dest="use_external_mip_script",
1286 help="Specify whether to run a"
1287 " user-provided script for the master"
1288 " IP address turnup and"
1289 " turndown operations",
1290 type="bool", metavar=_YORNO, default=None)
1292 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1293 help="Specify the default directory (cluster-"
1294 "wide) for storing the file-based disks [%s]" %
1295 pathutils.DEFAULT_FILE_STORAGE_DIR,
1297 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1299 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1300 "--shared-file-storage-dir",
1301 dest="shared_file_storage_dir",
1302 help="Specify the default directory (cluster-wide) for storing the"
1303 " shared file-based disks [%s]" %
1304 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1305 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1307 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1308 help="Don't modify %s" % pathutils.ETC_HOSTS,
1309 action="store_false", default=True)
1311 MODIFY_ETCHOSTS_OPT = \
1312 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1313 default=None, type="bool",
1314 help="Defines whether the cluster should autonomously modify"
1315 " and keep in sync the /etc/hosts file of the nodes")
1317 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1318 help="Don't initialize SSH keys",
1319 action="store_false", default=True)
1321 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1322 help="Enable parseable error messages",
1323 action="store_true", default=False)
1325 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1326 help="Skip N+1 memory redundancy tests",
1327 action="store_true", default=False)
1329 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1330 help="Type of reboot: soft/hard/full",
1331 default=constants.INSTANCE_REBOOT_HARD,
1333 choices=list(constants.REBOOT_TYPES))
1335 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1336 dest="ignore_secondaries",
1337 default=False, action="store_true",
1338 help="Ignore errors from secondaries")
1340 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1341 action="store_false", default=True,
1342 help="Don't shutdown the instance (unsafe)")
1344 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1345 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1346 help="Maximum time to wait")
1348 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1349 dest="shutdown_timeout", type="int",
1350 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1351 help="Maximum time to wait for instance"
1354 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1356 help=("Number of seconds between repetions of the"
1359 EARLY_RELEASE_OPT = cli_option("--early-release",
1360 dest="early_release", default=False,
1361 action="store_true",
1362 help="Release the locks on the secondary"
1365 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1366 dest="new_cluster_cert",
1367 default=False, action="store_true",
1368 help="Generate a new cluster certificate")
1370 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1372 help="File containing new RAPI certificate")
1374 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1375 default=None, action="store_true",
1376 help=("Generate a new self-signed RAPI"
1379 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1381 help="File containing new SPICE certificate")
1383 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1385 help="File containing the certificate of the CA"
1386 " which signed the SPICE certificate")
1388 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1389 dest="new_spice_cert", default=None,
1390 action="store_true",
1391 help=("Generate a new self-signed SPICE"
1394 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1395 dest="new_confd_hmac_key",
1396 default=False, action="store_true",
1397 help=("Create a new HMAC key for %s" %
1400 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1401 dest="cluster_domain_secret",
1403 help=("Load new new cluster domain"
1404 " secret from file"))
1406 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1407 dest="new_cluster_domain_secret",
1408 default=False, action="store_true",
1409 help=("Create a new cluster domain"
1412 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1413 dest="use_replication_network",
1414 help="Whether to use the replication network"
1415 " for talking to the nodes",
1416 action="store_true", default=False)
1418 MAINTAIN_NODE_HEALTH_OPT = \
1419 cli_option("--maintain-node-health", dest="maintain_node_health",
1420 metavar=_YORNO, default=None, type="bool",
1421 help="Configure the cluster to automatically maintain node"
1422 " health, by shutting down unknown instances, shutting down"
1423 " unknown DRBD devices, etc.")
1425 IDENTIFY_DEFAULTS_OPT = \
1426 cli_option("--identify-defaults", dest="identify_defaults",
1427 default=False, action="store_true",
1428 help="Identify which saved instance parameters are equal to"
1429 " the current cluster defaults and set them as such, instead"
1430 " of marking them as overridden")
1432 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1433 action="store", dest="uid_pool",
1434 help=("A list of user-ids or user-id"
1435 " ranges separated by commas"))
1437 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1438 action="store", dest="add_uids",
1439 help=("A list of user-ids or user-id"
1440 " ranges separated by commas, to be"
1441 " added to the user-id pool"))
1443 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1444 action="store", dest="remove_uids",
1445 help=("A list of user-ids or user-id"
1446 " ranges separated by commas, to be"
1447 " removed from the user-id pool"))
1449 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1450 action="store", dest="reserved_lvs",
1451 help=("A comma-separated list of reserved"
1452 " logical volumes names, that will be"
1453 " ignored by cluster verify"))
1455 ROMAN_OPT = cli_option("--roman",
1456 dest="roman_integers", default=False,
1457 action="store_true",
1458 help="Use roman numbers for positive integers")
1460 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1461 action="store", default=None,
1462 help="Specifies usermode helper for DRBD")
1464 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1465 action="store_false", default=True,
1466 help="Disable support for DRBD")
1468 PRIMARY_IP_VERSION_OPT = \
1469 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1470 action="store", dest="primary_ip_version",
1471 metavar="%d|%d" % (constants.IP4_VERSION,
1472 constants.IP6_VERSION),
1473 help="Cluster-wide IP version for primary IP")
1475 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1476 action="store_true",
1477 help="Show machine name for every line in output")
1479 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1480 action="store_true",
1481 help=("Hide successful results and show failures"
1482 " only (determined by the exit code)"))
1484 REASON_OPT = cli_option("--reason", default=None,
1485 help="The reason for executing the command")
1488 def _PriorityOptionCb(option, _, value, parser):
1489 """Callback for processing C{--priority} option.
1492 value = _PRIONAME_TO_VALUE[value]
1494 setattr(parser.values, option.dest, value)
1497 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1498 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1499 choices=_PRIONAME_TO_VALUE.keys(),
1500 action="callback", type="choice",
1501 callback=_PriorityOptionCb,
1502 help="Priority for opcode processing")
1504 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1505 type="bool", default=None, metavar=_YORNO,
1506 help="Sets the hidden flag on the OS")
1508 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1509 type="bool", default=None, metavar=_YORNO,
1510 help="Sets the blacklisted flag on the OS")
1512 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1513 type="bool", metavar=_YORNO,
1514 dest="prealloc_wipe_disks",
1515 help=("Wipe disks prior to instance"
1518 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1519 type="keyval", default=None,
1520 help="Node parameters")
1522 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1523 action="store", metavar="POLICY", default=None,
1524 help="Allocation policy for the node group")
1526 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1527 type="bool", metavar=_YORNO,
1528 dest="node_powered",
1529 help="Specify if the SoR for node is powered")
1531 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1532 default=constants.OOB_TIMEOUT,
1533 help="Maximum time to wait for out-of-band helper")
1535 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1536 default=constants.OOB_POWER_DELAY,
1537 help="Time in seconds to wait between power-ons")
1539 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1540 action="store_true", default=False,
1541 help=("Whether command argument should be treated"
1544 NO_REMEMBER_OPT = cli_option("--no-remember",
1546 action="store_true", default=False,
1547 help="Perform but do not record the change"
1548 " in the configuration")
1550 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1551 default=False, action="store_true",
1552 help="Evacuate primary instances only")
1554 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1555 default=False, action="store_true",
1556 help="Evacuate secondary instances only"
1557 " (applies only to internally mirrored"
1558 " disk templates, e.g. %s)" %
1559 utils.CommaJoin(constants.DTS_INT_MIRROR))
1561 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1562 action="store_true", default=False,
1563 help="Pause instance at startup")
1565 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1566 help="Destination node group (name or uuid)",
1567 default=None, action="append",
1568 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1570 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1571 action="append", dest="ignore_errors",
1572 choices=list(constants.CV_ALL_ECODES_STRINGS),
1573 help="Error code to be ignored")
1575 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1577 help=("Specify disk state information in the"
1579 " storage_type/identifier:option=value,...;"
1580 " note this is unused for now"),
1583 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1585 help=("Specify hypervisor state information in the"
1586 " format hypervisor:option=value,...;"
1587 " note this is unused for now"),
1590 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1591 action="store_true", default=False,
1592 help="Ignore instance policy violations")
1594 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1595 help="Sets the instance's runtime memory,"
1596 " ballooning it up or down to the new value",
1597 default=None, type="unit", metavar="<size>")
1599 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1600 action="store_true", default=False,
1601 help="Marks the grow as absolute instead of the"
1602 " (default) relative mode")
1604 NETWORK_OPT = cli_option("--network",
1605 action="store", default=None, dest="network",
1606 help="IP network in CIDR notation")
1608 GATEWAY_OPT = cli_option("--gateway",
1609 action="store", default=None, dest="gateway",
1610 help="IP address of the router (gateway)")
1612 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1613 action="store", default=None,
1614 dest="add_reserved_ips",
1615 help="Comma-separated list of"
1616 " reserved IPs to add")
1618 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1619 action="store", default=None,
1620 dest="remove_reserved_ips",
1621 help="Comma-delimited list of"
1622 " reserved IPs to remove")
1624 NETWORK6_OPT = cli_option("--network6",
1625 action="store", default=None, dest="network6",
1626 help="IP network in CIDR notation")
1628 GATEWAY6_OPT = cli_option("--gateway6",
1629 action="store", default=None, dest="gateway6",
1630 help="IP6 address of the router (gateway)")
1632 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1633 dest="conflicts_check",
1635 action="store_false",
1636 help="Don't check for conflicting IPs")
1638 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1639 default=False, action="store_true",
1640 help="Include default values")
1642 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1643 action="store_true", default=False,
1644 help="Try to hotplug device")
1646 #: Options provided by all commands
1647 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1649 # common options for creating instances. add and import then add their own
1651 COMMON_CREATE_OPTS = [
1656 FILESTORE_DRIVER_OPT,
1662 NOCONFLICTSCHECK_OPT,
1674 # common instance policy options
1675 INSTANCE_POLICY_OPTS = [
1676 IPOLICY_BOUNDS_SPECS_OPT,
1677 IPOLICY_DISK_TEMPLATES,
1679 IPOLICY_SPINDLE_RATIO,
1682 # instance policy split specs options
1683 SPLIT_ISPECS_OPTS = [
1684 SPECS_CPU_COUNT_OPT,
1685 SPECS_DISK_COUNT_OPT,
1686 SPECS_DISK_SIZE_OPT,
1688 SPECS_NIC_COUNT_OPT,
1692 class _ShowUsage(Exception):
1693 """Exception class for L{_ParseArgs}.
1696 def __init__(self, exit_error):
1697 """Initializes instances of this class.
1699 @type exit_error: bool
1700 @param exit_error: Whether to report failure on exit
1703 Exception.__init__(self)
1704 self.exit_error = exit_error
1707 class _ShowVersion(Exception):
1708 """Exception class for L{_ParseArgs}.
1713 def _ParseArgs(binary, argv, commands, aliases, env_override):
1714 """Parser for the command line arguments.
1716 This function parses the arguments and returns the function which
1717 must be executed together with its (modified) arguments.
1719 @param binary: Script name
1720 @param argv: Command line arguments
1721 @param commands: Dictionary containing command definitions
1722 @param aliases: dictionary with command aliases {"alias": "target", ...}
1723 @param env_override: list of env variables allowed for default args
1724 @raise _ShowUsage: If usage description should be shown
1725 @raise _ShowVersion: If version should be shown
1728 assert not (env_override - set(commands))
1729 assert not (set(aliases.keys()) & set(commands.keys()))
1734 # No option or command given
1735 raise _ShowUsage(exit_error=True)
1737 if cmd == "--version":
1738 raise _ShowVersion()
1739 elif cmd == "--help":
1740 raise _ShowUsage(exit_error=False)
1741 elif not (cmd in commands or cmd in aliases):
1742 raise _ShowUsage(exit_error=True)
1744 # get command, unalias it, and look it up in commands
1746 if aliases[cmd] not in commands:
1747 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1748 " command '%s'" % (cmd, aliases[cmd]))
1752 if cmd in env_override:
1753 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1754 env_args = os.environ.get(args_env_name)
1756 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1758 func, args_def, parser_opts, usage, description = commands[cmd]
1759 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1760 description=description,
1761 formatter=TitledHelpFormatter(),
1762 usage="%%prog %s %s" % (cmd, usage))
1763 parser.disable_interspersed_args()
1764 options, args = parser.parse_args(args=argv[2:])
1766 if not _CheckArguments(cmd, args_def, args):
1767 return None, None, None
1769 return func, options, args
1772 def _FormatUsage(binary, commands):
1773 """Generates a nice description of all commands.
1775 @param binary: Script name
1776 @param commands: Dictionary containing command definitions
1779 # compute the max line length for cmd + usage
1780 mlen = min(60, max(map(len, commands)))
1782 yield "Usage: %s {command} [options...] [argument...]" % binary
1783 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1787 # and format a nice command list
1788 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1789 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1790 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1791 for line in help_lines:
1792 yield " %-*s %s" % (mlen, "", line)
1797 def _CheckArguments(cmd, args_def, args):
1798 """Verifies the arguments using the argument definition.
1802 1. Abort with error if values specified by user but none expected.
1804 1. For each argument in definition
1806 1. Keep running count of minimum number of values (min_count)
1807 1. Keep running count of maximum number of values (max_count)
1808 1. If it has an unlimited number of values
1810 1. Abort with error if it's not the last argument in the definition
1812 1. If last argument has limited number of values
1814 1. Abort with error if number of values doesn't match or is too large
1816 1. Abort with error if user didn't pass enough values (min_count)
1819 if args and not args_def:
1820 ToStderr("Error: Command %s expects no arguments", cmd)
1827 last_idx = len(args_def) - 1
1829 for idx, arg in enumerate(args_def):
1830 if min_count is None:
1832 elif arg.min is not None:
1833 min_count += arg.min
1835 if max_count is None:
1837 elif arg.max is not None:
1838 max_count += arg.max
1841 check_max = (arg.max is not None)
1843 elif arg.max is None:
1844 raise errors.ProgrammerError("Only the last argument can have max=None")
1847 # Command with exact number of arguments
1848 if (min_count is not None and max_count is not None and
1849 min_count == max_count and len(args) != min_count):
1850 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1853 # Command with limited number of arguments
1854 if max_count is not None and len(args) > max_count:
1855 ToStderr("Error: Command %s expects only %d argument(s)",
1859 # Command with some required arguments
1860 if min_count is not None and len(args) < min_count:
1861 ToStderr("Error: Command %s expects at least %d argument(s)",
1868 def SplitNodeOption(value):
1869 """Splits the value of a --node option.
1872 if value and ":" in value:
1873 return value.split(":", 1)
1875 return (value, None)
1878 def CalculateOSNames(os_name, os_variants):
1879 """Calculates all the names an OS can be called, according to its variants.
1881 @type os_name: string
1882 @param os_name: base name of the os
1883 @type os_variants: list or None
1884 @param os_variants: list of supported variants
1886 @return: list of valid names
1890 return ["%s+%s" % (os_name, v) for v in os_variants]
1895 def ParseFields(selected, default):
1896 """Parses the values of "--field"-like options.
1898 @type selected: string or None
1899 @param selected: User-selected options
1901 @param default: Default fields
1904 if selected is None:
1907 if selected.startswith("+"):
1908 return default + selected[1:].split(",")
1910 return selected.split(",")
1913 UsesRPC = rpc.RunWithRPC
1916 def AskUser(text, choices=None):
1917 """Ask the user a question.
1919 @param text: the question to ask
1921 @param choices: list with elements tuples (input_char, return_value,
1922 description); if not given, it will default to: [('y', True,
1923 'Perform the operation'), ('n', False, 'Do no do the operation')];
1924 note that the '?' char is reserved for help
1926 @return: one of the return values from the choices list; if input is
1927 not possible (i.e. not running with a tty, we return the last
1932 choices = [("y", True, "Perform the operation"),
1933 ("n", False, "Do not perform the operation")]
1934 if not choices or not isinstance(choices, list):
1935 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1936 for entry in choices:
1937 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1938 raise errors.ProgrammerError("Invalid choices element to AskUser")
1940 answer = choices[-1][1]
1942 for line in text.splitlines():
1943 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1944 text = "\n".join(new_text)
1946 f = file("/dev/tty", "a+")
1950 chars = [entry[0] for entry in choices]
1951 chars[-1] = "[%s]" % chars[-1]
1953 maps = dict([(entry[0], entry[1]) for entry in choices])
1957 f.write("/".join(chars))
1959 line = f.readline(2).strip().lower()
1964 for entry in choices:
1965 f.write(" %s - %s\n" % (entry[0], entry[2]))
1973 class JobSubmittedException(Exception):
1974 """Job was submitted, client should exit.
1976 This exception has one argument, the ID of the job that was
1977 submitted. The handler should print this ID.
1979 This is not an error, just a structured way to exit from clients.
1984 def SendJob(ops, cl=None):
1985 """Function to submit an opcode without waiting for the results.
1988 @param ops: list of opcodes
1989 @type cl: luxi.Client
1990 @param cl: the luxi client to use for communicating with the master;
1991 if None, a new client will be created
1997 job_id = cl.SubmitJob(ops)
2002 def GenericPollJob(job_id, cbs, report_cbs):
2003 """Generic job-polling function.
2005 @type job_id: number
2006 @param job_id: Job ID
2007 @type cbs: Instance of L{JobPollCbBase}
2008 @param cbs: Data callbacks
2009 @type report_cbs: Instance of L{JobPollReportCbBase}
2010 @param report_cbs: Reporting callbacks
2013 prev_job_info = None
2014 prev_logmsg_serial = None
2019 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2022 # job not found, go away!
2023 raise errors.JobLost("Job with id %s lost" % job_id)
2025 if result == constants.JOB_NOTCHANGED:
2026 report_cbs.ReportNotChanged(job_id, status)
2031 # Split result, a tuple of (field values, log entries)
2032 (job_info, log_entries) = result
2033 (status, ) = job_info
2036 for log_entry in log_entries:
2037 (serial, timestamp, log_type, message) = log_entry
2038 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2040 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2042 # TODO: Handle canceled and archived jobs
2043 elif status in (constants.JOB_STATUS_SUCCESS,
2044 constants.JOB_STATUS_ERROR,
2045 constants.JOB_STATUS_CANCELING,
2046 constants.JOB_STATUS_CANCELED):
2049 prev_job_info = job_info
2051 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2053 raise errors.JobLost("Job with id %s lost" % job_id)
2055 status, opstatus, result = jobs[0]
2057 if status == constants.JOB_STATUS_SUCCESS:
2060 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2061 raise errors.OpExecError("Job was canceled")
2064 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2065 if status == constants.OP_STATUS_SUCCESS:
2067 elif status == constants.OP_STATUS_ERROR:
2068 errors.MaybeRaise(msg)
2071 raise errors.OpExecError("partial failure (opcode %d): %s" %
2074 raise errors.OpExecError(str(msg))
2076 # default failure mode
2077 raise errors.OpExecError(result)
2080 class JobPollCbBase:
2081 """Base class for L{GenericPollJob} callbacks.
2085 """Initializes this class.
2089 def WaitForJobChangeOnce(self, job_id, fields,
2090 prev_job_info, prev_log_serial):
2091 """Waits for changes on a job.
2094 raise NotImplementedError()
2096 def QueryJobs(self, job_ids, fields):
2097 """Returns the selected fields for the selected job IDs.
2099 @type job_ids: list of numbers
2100 @param job_ids: Job IDs
2101 @type fields: list of strings
2102 @param fields: Fields
2105 raise NotImplementedError()
2108 class JobPollReportCbBase:
2109 """Base class for L{GenericPollJob} reporting callbacks.
2113 """Initializes this class.
2117 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2118 """Handles a log message.
2121 raise NotImplementedError()
2123 def ReportNotChanged(self, job_id, status):
2124 """Called for if a job hasn't changed in a while.
2126 @type job_id: number
2127 @param job_id: Job ID
2128 @type status: string or None
2129 @param status: Job status if available
2132 raise NotImplementedError()
2135 class _LuxiJobPollCb(JobPollCbBase):
2136 def __init__(self, cl):
2137 """Initializes this class.
2140 JobPollCbBase.__init__(self)
2143 def WaitForJobChangeOnce(self, job_id, fields,
2144 prev_job_info, prev_log_serial):
2145 """Waits for changes on a job.
2148 return self.cl.WaitForJobChangeOnce(job_id, fields,
2149 prev_job_info, prev_log_serial)
2151 def QueryJobs(self, job_ids, fields):
2152 """Returns the selected fields for the selected job IDs.
2155 return self.cl.QueryJobs(job_ids, fields)
2158 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2159 def __init__(self, feedback_fn):
2160 """Initializes this class.
2163 JobPollReportCbBase.__init__(self)
2165 self.feedback_fn = feedback_fn
2167 assert callable(feedback_fn)
2169 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2170 """Handles a log message.
2173 self.feedback_fn((timestamp, log_type, log_msg))
2175 def ReportNotChanged(self, job_id, status):
2176 """Called if a job hasn't changed in a while.
2182 class StdioJobPollReportCb(JobPollReportCbBase):
2184 """Initializes this class.
2187 JobPollReportCbBase.__init__(self)
2189 self.notified_queued = False
2190 self.notified_waitlock = False
2192 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2193 """Handles a log message.
2196 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2197 FormatLogMessage(log_type, log_msg))
2199 def ReportNotChanged(self, job_id, status):
2200 """Called if a job hasn't changed in a while.
2206 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2207 ToStderr("Job %s is waiting in queue", job_id)
2208 self.notified_queued = True
2210 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2211 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2212 self.notified_waitlock = True
2215 def FormatLogMessage(log_type, log_msg):
2216 """Formats a job message according to its type.
2219 if log_type != constants.ELOG_MESSAGE:
2220 log_msg = str(log_msg)
2222 return utils.SafeEncode(log_msg)
2225 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2226 """Function to poll for the result of a job.
2228 @type job_id: job identified
2229 @param job_id: the job to poll for results
2230 @type cl: luxi.Client
2231 @param cl: the luxi client to use for communicating with the master;
2232 if None, a new client will be created
2238 if reporter is None:
2240 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2242 reporter = StdioJobPollReportCb()
2244 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2246 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2249 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2250 """Legacy function to submit an opcode.
2252 This is just a simple wrapper over the construction of the processor
2253 instance. It should be extended to better handle feedback and
2254 interaction functions.
2260 SetGenericOpcodeOpts([op], opts)
2262 job_id = SendJob([op], cl=cl)
2264 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2267 return op_results[0]
2270 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2271 """Wrapper around SubmitOpCode or SendJob.
2273 This function will decide, based on the 'opts' parameter, whether to
2274 submit and wait for the result of the opcode (and return it), or
2275 whether to just send the job and print its identifier. It is used in
2276 order to simplify the implementation of the '--submit' option.
2278 It will also process the opcodes if we're sending the via SendJob
2279 (otherwise SubmitOpCode does it).
2282 if opts and opts.submit_only:
2284 SetGenericOpcodeOpts(job, opts)
2285 job_id = SendJob(job, cl=cl)
2286 raise JobSubmittedException(job_id)
2288 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2291 def _InitReasonTrail(op, opts):
2292 """Builds the first part of the reason trail
2294 Builds the initial part of the reason trail, adding the user provided reason
2295 (if it exists) and the name of the command starting the operation.
2297 @param op: the opcode the reason trail will be added to
2298 @param opts: the command line options selected by the user
2301 assert len(sys.argv) >= 2
2305 trail.append((constants.OPCODE_REASON_SRC_USER,
2309 binary = os.path.basename(sys.argv[0])
2310 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2311 command = sys.argv[1]
2312 trail.append((source, command, utils.EpochNano()))
2316 def SetGenericOpcodeOpts(opcode_list, options):
2317 """Processor for generic options.
2319 This function updates the given opcodes based on generic command
2320 line options (like debug, dry-run, etc.).
2322 @param opcode_list: list of opcodes
2323 @param options: command line options or None
2324 @return: None (in-place modification)
2329 for op in opcode_list:
2330 op.debug_level = options.debug
2331 if hasattr(options, "dry_run"):
2332 op.dry_run = options.dry_run
2333 if getattr(options, "priority", None) is not None:
2334 op.priority = options.priority
2335 _InitReasonTrail(op, options)
2338 def GetClient(query=False):
2339 """Connects to the a luxi socket and returns a client.
2341 @type query: boolean
2342 @param query: this signifies that the client will only be
2343 used for queries; if the build-time parameter
2344 enable-split-queries is enabled, then the client will be
2345 connected to the query socket instead of the masterd socket
2348 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2350 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2351 address = pathutils.MASTER_SOCKET
2352 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2353 address = pathutils.QUERY_SOCKET
2355 address = override_socket
2356 elif query and constants.ENABLE_SPLIT_QUERY:
2357 address = pathutils.QUERY_SOCKET
2360 # TODO: Cache object?
2362 client = luxi.Client(address=address)
2363 except luxi.NoMasterError:
2364 ss = ssconf.SimpleStore()
2366 # Try to read ssconf file
2369 except errors.ConfigurationError:
2370 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2371 " not part of a cluster",
2374 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2375 if master != myself:
2376 raise errors.OpPrereqError("This is not the master node, please connect"
2377 " to node '%s' and rerun the command" %
2378 master, errors.ECODE_INVAL)
2383 def FormatError(err):
2384 """Return a formatted error message for a given error.
2386 This function takes an exception instance and returns a tuple
2387 consisting of two values: first, the recommended exit code, and
2388 second, a string describing the error message (not
2389 newline-terminated).
2395 if isinstance(err, errors.ConfigurationError):
2396 txt = "Corrupt configuration file: %s" % msg
2398 obuf.write(txt + "\n")
2399 obuf.write("Aborting.")
2401 elif isinstance(err, errors.HooksAbort):
2402 obuf.write("Failure: hooks execution failed:\n")
2403 for node, script, out in err.args[0]:
2405 obuf.write(" node: %s, script: %s, output: %s\n" %
2406 (node, script, out))
2408 obuf.write(" node: %s, script: %s (no output)\n" %
2410 elif isinstance(err, errors.HooksFailure):
2411 obuf.write("Failure: hooks general failure: %s" % msg)
2412 elif isinstance(err, errors.ResolverError):
2413 this_host = netutils.Hostname.GetSysName()
2414 if err.args[0] == this_host:
2415 msg = "Failure: can't resolve my own hostname ('%s')"
2417 msg = "Failure: can't resolve hostname '%s'"
2418 obuf.write(msg % err.args[0])
2419 elif isinstance(err, errors.OpPrereqError):
2420 if len(err.args) == 2:
2421 obuf.write("Failure: prerequisites not met for this"
2422 " operation:\nerror type: %s, error details:\n%s" %
2423 (err.args[1], err.args[0]))
2425 obuf.write("Failure: prerequisites not met for this"
2426 " operation:\n%s" % msg)
2427 elif isinstance(err, errors.OpExecError):
2428 obuf.write("Failure: command execution error:\n%s" % msg)
2429 elif isinstance(err, errors.TagError):
2430 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2431 elif isinstance(err, errors.JobQueueDrainError):
2432 obuf.write("Failure: the job queue is marked for drain and doesn't"
2433 " accept new requests\n")
2434 elif isinstance(err, errors.JobQueueFull):
2435 obuf.write("Failure: the job queue is full and doesn't accept new"
2436 " job submissions until old jobs are archived\n")
2437 elif isinstance(err, errors.TypeEnforcementError):
2438 obuf.write("Parameter Error: %s" % msg)
2439 elif isinstance(err, errors.ParameterError):
2440 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2441 elif isinstance(err, luxi.NoMasterError):
2442 if err.args[0] == pathutils.MASTER_SOCKET:
2443 daemon = "the master daemon"
2444 elif err.args[0] == pathutils.QUERY_SOCKET:
2445 daemon = "the config daemon"
2447 daemon = "socket '%s'" % str(err.args[0])
2448 obuf.write("Cannot communicate with %s.\nIs the process running"
2449 " and listening for connections?" % daemon)
2450 elif isinstance(err, luxi.TimeoutError):
2451 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2452 " been submitted and will continue to run even if the call"
2453 " timed out. Useful commands in this situation are \"gnt-job"
2454 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2456 elif isinstance(err, luxi.PermissionError):
2457 obuf.write("It seems you don't have permissions to connect to the"
2458 " master daemon.\nPlease retry as a different user.")
2459 elif isinstance(err, luxi.ProtocolError):
2460 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2462 elif isinstance(err, errors.JobLost):
2463 obuf.write("Error checking job status: %s" % msg)
2464 elif isinstance(err, errors.QueryFilterParseError):
2465 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2466 obuf.write("\n".join(err.GetDetails()))
2467 elif isinstance(err, errors.GenericError):
2468 obuf.write("Unhandled Ganeti error: %s" % msg)
2469 elif isinstance(err, JobSubmittedException):
2470 obuf.write("JobID: %s\n" % err.args[0])
2473 obuf.write("Unhandled exception: %s" % msg)
2474 return retcode, obuf.getvalue().rstrip("\n")
2477 def GenericMain(commands, override=None, aliases=None,
2478 env_override=frozenset()):
2479 """Generic main function for all the gnt-* commands.
2481 @param commands: a dictionary with a special structure, see the design doc
2482 for command line handling.
2483 @param override: if not None, we expect a dictionary with keys that will
2484 override command line options; this can be used to pass
2485 options from the scripts to generic functions
2486 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2487 @param env_override: list of environment names which are allowed to submit
2488 default args for commands
2491 # save the program name and the entire command line for later logging
2493 binary = os.path.basename(sys.argv[0])
2495 binary = sys.argv[0]
2497 if len(sys.argv) >= 2:
2498 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2502 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2504 binary = "<unknown program>"
2505 cmdline = "<unknown>"
2511 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2513 except _ShowVersion:
2514 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2515 constants.RELEASE_VERSION)
2516 return constants.EXIT_SUCCESS
2517 except _ShowUsage, err:
2518 for line in _FormatUsage(binary, commands):
2522 return constants.EXIT_FAILURE
2524 return constants.EXIT_SUCCESS
2525 except errors.ParameterError, err:
2526 result, err_msg = FormatError(err)
2530 if func is None: # parse error
2533 if override is not None:
2534 for key, val in override.iteritems():
2535 setattr(options, key, val)
2537 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2538 stderr_logging=True)
2540 logging.info("Command line: %s", cmdline)
2543 result = func(options, args)
2544 except (errors.GenericError, luxi.ProtocolError,
2545 JobSubmittedException), err:
2546 result, err_msg = FormatError(err)
2547 logging.exception("Error during command processing")
2549 except KeyboardInterrupt:
2550 result = constants.EXIT_FAILURE
2551 ToStderr("Aborted. Note that if the operation created any jobs, they"
2552 " might have been submitted and"
2553 " will continue to run in the background.")
2554 except IOError, err:
2555 if err.errno == errno.EPIPE:
2556 # our terminal went away, we'll exit
2557 sys.exit(constants.EXIT_FAILURE)
2564 def ParseNicOption(optvalue):
2565 """Parses the value of the --net option(s).
2569 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2570 except (TypeError, ValueError), err:
2571 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2574 nics = [{}] * nic_max
2575 for nidx, ndict in optvalue:
2578 if not isinstance(ndict, dict):
2579 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2580 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2582 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2589 def GenericInstanceCreate(mode, opts, args):
2590 """Add an instance to the cluster via either creation or import.
2592 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2593 @param opts: the command line options selected by the user
2595 @param args: should contain only one element, the new instance name
2597 @return: the desired exit code
2602 (pnode, snode) = SplitNodeOption(opts.node)
2607 hypervisor, hvparams = opts.hypervisor
2610 nics = ParseNicOption(opts.nics)
2614 elif mode == constants.INSTANCE_CREATE:
2615 # default of one nic, all auto
2621 if opts.disk_template == constants.DT_DISKLESS:
2622 if opts.disks or opts.sd_size is not None:
2623 raise errors.OpPrereqError("Diskless instance but disk"
2624 " information passed", errors.ECODE_INVAL)
2627 if (not opts.disks and not opts.sd_size
2628 and mode == constants.INSTANCE_CREATE):
2629 raise errors.OpPrereqError("No disk information specified",
2631 if opts.disks and opts.sd_size is not None:
2632 raise errors.OpPrereqError("Please use either the '--disk' or"
2633 " '-s' option", errors.ECODE_INVAL)
2634 if opts.sd_size is not None:
2635 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2639 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2640 except ValueError, err:
2641 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2643 disks = [{}] * disk_max
2646 for didx, ddict in opts.disks:
2648 if not isinstance(ddict, dict):
2649 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2650 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2651 elif constants.IDISK_SIZE in ddict:
2652 if constants.IDISK_ADOPT in ddict:
2653 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2654 " (disk %d)" % didx, errors.ECODE_INVAL)
2656 ddict[constants.IDISK_SIZE] = \
2657 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2658 except ValueError, err:
2659 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2660 (didx, err), errors.ECODE_INVAL)
2661 elif constants.IDISK_ADOPT in ddict:
2662 if mode == constants.INSTANCE_IMPORT:
2663 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2664 " import", errors.ECODE_INVAL)
2665 ddict[constants.IDISK_SIZE] = 0
2667 raise errors.OpPrereqError("Missing size or adoption source for"
2668 " disk %d" % didx, errors.ECODE_INVAL)
2671 if opts.tags is not None:
2672 tags = opts.tags.split(",")
2676 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2677 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2679 if mode == constants.INSTANCE_CREATE:
2682 force_variant = opts.force_variant
2685 no_install = opts.no_install
2686 identify_defaults = False
2687 elif mode == constants.INSTANCE_IMPORT:
2690 force_variant = False
2691 src_node = opts.src_node
2692 src_path = opts.src_dir
2694 identify_defaults = opts.identify_defaults
2696 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2698 op = opcodes.OpInstanceCreate(instance_name=instance,
2700 disk_template=opts.disk_template,
2702 conflicts_check=opts.conflicts_check,
2703 pnode=pnode, snode=snode,
2704 ip_check=opts.ip_check,
2705 name_check=opts.name_check,
2706 wait_for_sync=opts.wait_for_sync,
2707 file_storage_dir=opts.file_storage_dir,
2708 file_driver=opts.file_driver,
2709 iallocator=opts.iallocator,
2710 hypervisor=hypervisor,
2712 beparams=opts.beparams,
2713 osparams=opts.osparams,
2717 force_variant=force_variant,
2721 no_install=no_install,
2722 identify_defaults=identify_defaults,
2723 ignore_ipolicy=opts.ignore_ipolicy)
2725 SubmitOrSend(op, opts)
2729 class _RunWhileClusterStoppedHelper:
2730 """Helper class for L{RunWhileClusterStopped} to simplify state management
2733 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2734 """Initializes this class.
2736 @type feedback_fn: callable
2737 @param feedback_fn: Feedback function
2738 @type cluster_name: string
2739 @param cluster_name: Cluster name
2740 @type master_node: string
2741 @param master_node Master node name
2742 @type online_nodes: list
2743 @param online_nodes: List of names of online nodes
2746 self.feedback_fn = feedback_fn
2747 self.cluster_name = cluster_name
2748 self.master_node = master_node
2749 self.online_nodes = online_nodes
2751 self.ssh = ssh.SshRunner(self.cluster_name)
2753 self.nonmaster_nodes = [name for name in online_nodes
2754 if name != master_node]
2756 assert self.master_node not in self.nonmaster_nodes
2758 def _RunCmd(self, node_name, cmd):
2759 """Runs a command on the local or a remote machine.
2761 @type node_name: string
2762 @param node_name: Machine name
2767 if node_name is None or node_name == self.master_node:
2768 # No need to use SSH
2769 result = utils.RunCmd(cmd)
2771 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2772 utils.ShellQuoteArgs(cmd))
2775 errmsg = ["Failed to run command %s" % result.cmd]
2777 errmsg.append("on node %s" % node_name)
2778 errmsg.append(": exitcode %s and error %s" %
2779 (result.exit_code, result.output))
2780 raise errors.OpExecError(" ".join(errmsg))
2782 def Call(self, fn, *args):
2783 """Call function while all daemons are stopped.
2786 @param fn: Function to be called
2789 # Pause watcher by acquiring an exclusive lock on watcher state file
2790 self.feedback_fn("Blocking watcher")
2791 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2793 # TODO: Currently, this just blocks. There's no timeout.
2794 # TODO: Should it be a shared lock?
2795 watcher_block.Exclusive(blocking=True)
2797 # Stop master daemons, so that no new jobs can come in and all running
2799 self.feedback_fn("Stopping master daemons")
2800 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2802 # Stop daemons on all nodes
2803 for node_name in self.online_nodes:
2804 self.feedback_fn("Stopping daemons on %s" % node_name)
2805 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2807 # All daemons are shut down now
2809 return fn(self, *args)
2810 except Exception, err:
2811 _, errmsg = FormatError(err)
2812 logging.exception("Caught exception")
2813 self.feedback_fn(errmsg)
2816 # Start cluster again, master node last
2817 for node_name in self.nonmaster_nodes + [self.master_node]:
2818 self.feedback_fn("Starting daemons on %s" % node_name)
2819 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2822 watcher_block.Close()
2825 def RunWhileClusterStopped(feedback_fn, fn, *args):
2826 """Calls a function while all cluster daemons are stopped.
2828 @type feedback_fn: callable
2829 @param feedback_fn: Feedback function
2831 @param fn: Function to be called when daemons are stopped
2834 feedback_fn("Gathering cluster information")
2836 # This ensures we're running on the master daemon
2839 (cluster_name, master_node) = \
2840 cl.QueryConfigValues(["cluster_name", "master_node"])
2842 online_nodes = GetOnlineNodes([], cl=cl)
2844 # Don't keep a reference to the client. The master daemon will go away.
2847 assert master_node in online_nodes
2849 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2850 online_nodes).Call(fn, *args)
2853 def GenerateTable(headers, fields, separator, data,
2854 numfields=None, unitfields=None,
2856 """Prints a table with headers and different fields.
2859 @param headers: dictionary mapping field names to headers for
2862 @param fields: the field names corresponding to each row in
2864 @param separator: the separator to be used; if this is None,
2865 the default 'smart' algorithm is used which computes optimal
2866 field width, otherwise just the separator is used between
2869 @param data: a list of lists, each sublist being one row to be output
2870 @type numfields: list
2871 @param numfields: a list with the fields that hold numeric
2872 values and thus should be right-aligned
2873 @type unitfields: list
2874 @param unitfields: a list with the fields that hold numeric
2875 values that should be formatted with the units field
2876 @type units: string or None
2877 @param units: the units we should use for formatting, or None for
2878 automatic choice (human-readable for non-separator usage, otherwise
2879 megabytes); this is a one-letter string
2888 if numfields is None:
2890 if unitfields is None:
2893 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2894 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2897 for field in fields:
2898 if headers and field not in headers:
2899 # TODO: handle better unknown fields (either revert to old
2900 # style of raising exception, or deal more intelligently with
2902 headers[field] = field
2903 if separator is not None:
2904 format_fields.append("%s")
2905 elif numfields.Matches(field):
2906 format_fields.append("%*s")
2908 format_fields.append("%-*s")
2910 if separator is None:
2911 mlens = [0 for name in fields]
2912 format_str = " ".join(format_fields)
2914 format_str = separator.replace("%", "%%").join(format_fields)
2919 for idx, val in enumerate(row):
2920 if unitfields.Matches(fields[idx]):
2923 except (TypeError, ValueError):
2926 val = row[idx] = utils.FormatUnit(val, units)
2927 val = row[idx] = str(val)
2928 if separator is None:
2929 mlens[idx] = max(mlens[idx], len(val))
2934 for idx, name in enumerate(fields):
2936 if separator is None:
2937 mlens[idx] = max(mlens[idx], len(hdr))
2938 args.append(mlens[idx])
2940 result.append(format_str % tuple(args))
2942 if separator is None:
2943 assert len(mlens) == len(fields)
2945 if fields and not numfields.Matches(fields[-1]):
2951 line = ["-" for _ in fields]
2952 for idx in range(len(fields)):
2953 if separator is None:
2954 args.append(mlens[idx])
2955 args.append(line[idx])
2956 result.append(format_str % tuple(args))
2961 def _FormatBool(value):
2962 """Formats a boolean value as a string.
2970 #: Default formatting for query results; (callback, align right)
2971 _DEFAULT_FORMAT_QUERY = {
2972 constants.QFT_TEXT: (str, False),
2973 constants.QFT_BOOL: (_FormatBool, False),
2974 constants.QFT_NUMBER: (str, True),
2975 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2976 constants.QFT_OTHER: (str, False),
2977 constants.QFT_UNKNOWN: (str, False),
2981 def _GetColumnFormatter(fdef, override, unit):
2982 """Returns formatting function for a field.
2984 @type fdef: L{objects.QueryFieldDefinition}
2985 @type override: dict
2986 @param override: Dictionary for overriding field formatting functions,
2987 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2989 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2990 @rtype: tuple; (callable, bool)
2991 @return: Returns the function to format a value (takes one parameter) and a
2992 boolean for aligning the value on the right-hand side
2995 fmt = override.get(fdef.name, None)
2999 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3001 if fdef.kind == constants.QFT_UNIT:
3002 # Can't keep this information in the static dictionary
3003 return (lambda value: utils.FormatUnit(value, unit), True)
3005 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3009 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3012 class _QueryColumnFormatter:
3013 """Callable class for formatting fields of a query.
3016 def __init__(self, fn, status_fn, verbose):
3017 """Initializes this class.
3020 @param fn: Formatting function
3021 @type status_fn: callable
3022 @param status_fn: Function to report fields' status
3023 @type verbose: boolean
3024 @param verbose: whether to use verbose field descriptions or not
3028 self._status_fn = status_fn
3029 self._verbose = verbose
3031 def __call__(self, data):
3032 """Returns a field's string representation.
3035 (status, value) = data
3038 self._status_fn(status)
3040 if status == constants.RS_NORMAL:
3041 return self._fn(value)
3043 assert value is None, \
3044 "Found value %r for abnormal status %s" % (value, status)
3046 return FormatResultError(status, self._verbose)
3049 def FormatResultError(status, verbose):
3050 """Formats result status other than L{constants.RS_NORMAL}.
3052 @param status: The result status
3053 @type verbose: boolean
3054 @param verbose: Whether to return the verbose text
3055 @return: Text of result status
3058 assert status != constants.RS_NORMAL, \
3059 "FormatResultError called with status equal to constants.RS_NORMAL"
3061 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3063 raise NotImplementedError("Unknown status %s" % status)
3070 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3071 header=False, verbose=False):
3072 """Formats data in L{objects.QueryResponse}.
3074 @type result: L{objects.QueryResponse}
3075 @param result: result of query operation
3077 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3078 see L{utils.text.FormatUnit}
3079 @type format_override: dict
3080 @param format_override: Dictionary for overriding field formatting functions,
3081 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3082 @type separator: string or None
3083 @param separator: String used to separate fields
3085 @param header: Whether to output header row
3086 @type verbose: boolean
3087 @param verbose: whether to use verbose field descriptions or not
3096 if format_override is None:
3097 format_override = {}
3099 stats = dict.fromkeys(constants.RS_ALL, 0)
3101 def _RecordStatus(status):
3106 for fdef in result.fields:
3107 assert fdef.title and fdef.name
3108 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3109 columns.append(TableColumn(fdef.title,
3110 _QueryColumnFormatter(fn, _RecordStatus,
3114 table = FormatTable(result.data, columns, header, separator)
3116 # Collect statistics
3117 assert len(stats) == len(constants.RS_ALL)
3118 assert compat.all(count >= 0 for count in stats.values())
3120 # Determine overall status. If there was no data, unknown fields must be
3121 # detected via the field definitions.
3122 if (stats[constants.RS_UNKNOWN] or
3123 (not result.data and _GetUnknownFields(result.fields))):
3125 elif compat.any(count > 0 for key, count in stats.items()
3126 if key != constants.RS_NORMAL):
3127 status = QR_INCOMPLETE
3131 return (status, table)
3134 def _GetUnknownFields(fdefs):
3135 """Returns list of unknown fields included in C{fdefs}.
3137 @type fdefs: list of L{objects.QueryFieldDefinition}
3140 return [fdef for fdef in fdefs
3141 if fdef.kind == constants.QFT_UNKNOWN]
3144 def _WarnUnknownFields(fdefs):
3145 """Prints a warning to stderr if a query included unknown fields.
3147 @type fdefs: list of L{objects.QueryFieldDefinition}
3150 unknown = _GetUnknownFields(fdefs)
3152 ToStderr("Warning: Queried for unknown fields %s",
3153 utils.CommaJoin(fdef.name for fdef in unknown))
3159 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3160 format_override=None, verbose=False, force_filter=False,
3161 namefield=None, qfilter=None, isnumeric=False):
3162 """Generic implementation for listing all items of a resource.
3164 @param resource: One of L{constants.QR_VIA_LUXI}
3165 @type fields: list of strings
3166 @param fields: List of fields to query for
3167 @type names: list of strings
3168 @param names: Names of items to query for
3169 @type unit: string or None
3170 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3171 None for automatic choice (human-readable for non-separator usage,
3172 otherwise megabytes); this is a one-letter string
3173 @type separator: string or None
3174 @param separator: String used to separate fields
3176 @param header: Whether to show header row
3177 @type force_filter: bool
3178 @param force_filter: Whether to always treat names as filter
3179 @type format_override: dict
3180 @param format_override: Dictionary for overriding field formatting functions,
3181 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3182 @type verbose: boolean
3183 @param verbose: whether to use verbose field descriptions or not
3184 @type namefield: string
3185 @param namefield: Name of field to use for simple filters (see
3186 L{qlang.MakeFilter} for details)
3187 @type qfilter: list or None
3188 @param qfilter: Query filter (in addition to names)
3189 @param isnumeric: bool
3190 @param isnumeric: Whether the namefield's type is numeric, and therefore
3191 any simple filters built by namefield should use integer values to
3198 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3199 isnumeric=isnumeric)
3202 qfilter = namefilter
3203 elif namefilter is not None:
3204 qfilter = [qlang.OP_AND, namefilter, qfilter]
3209 response = cl.Query(resource, fields, qfilter)
3211 found_unknown = _WarnUnknownFields(response.fields)
3213 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3215 format_override=format_override,
3221 assert ((found_unknown and status == QR_UNKNOWN) or
3222 (not found_unknown and status != QR_UNKNOWN))
3224 if status == QR_UNKNOWN:
3225 return constants.EXIT_UNKNOWN_FIELD
3227 # TODO: Should the list command fail if not all data could be collected?
3228 return constants.EXIT_SUCCESS
3231 def _FieldDescValues(fdef):
3232 """Helper function for L{GenericListFields} to get query field description.
3234 @type fdef: L{objects.QueryFieldDefinition}
3240 _QFT_NAMES.get(fdef.kind, fdef.kind),
3246 def GenericListFields(resource, fields, separator, header, cl=None):
3247 """Generic implementation for listing fields for a resource.
3249 @param resource: One of L{constants.QR_VIA_LUXI}
3250 @type fields: list of strings
3251 @param fields: List of fields to query for
3252 @type separator: string or None
3253 @param separator: String used to separate fields
3255 @param header: Whether to show header row
3264 response = cl.QueryFields(resource, fields)
3266 found_unknown = _WarnUnknownFields(response.fields)
3269 TableColumn("Name", str, False),
3270 TableColumn("Type", str, False),
3271 TableColumn("Title", str, False),
3272 TableColumn("Description", str, False),
3275 rows = map(_FieldDescValues, response.fields)
3277 for line in FormatTable(rows, columns, header, separator):
3281 return constants.EXIT_UNKNOWN_FIELD
3283 return constants.EXIT_SUCCESS
3287 """Describes a column for L{FormatTable}.
3290 def __init__(self, title, fn, align_right):
3291 """Initializes this class.
3294 @param title: Column title
3296 @param fn: Formatting function
3297 @type align_right: bool
3298 @param align_right: Whether to align values on the right-hand side
3303 self.align_right = align_right
3306 def _GetColFormatString(width, align_right):
3307 """Returns the format string for a field.
3315 return "%%%s%ss" % (sign, width)
3318 def FormatTable(rows, columns, header, separator):
3319 """Formats data as a table.
3321 @type rows: list of lists
3322 @param rows: Row data, one list per row
3323 @type columns: list of L{TableColumn}
3324 @param columns: Column descriptions
3326 @param header: Whether to show header row
3327 @type separator: string or None
3328 @param separator: String used to separate columns
3332 data = [[col.title for col in columns]]
3333 colwidth = [len(col.title) for col in columns]
3336 colwidth = [0 for _ in columns]
3340 assert len(row) == len(columns)
3342 formatted = [col.format(value) for value, col in zip(row, columns)]
3344 if separator is None:
3345 # Update column widths
3346 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3347 # Modifying a list's items while iterating is fine
3348 colwidth[idx] = max(oldwidth, len(value))
3350 data.append(formatted)
3352 if separator is not None:
3353 # Return early if a separator is used
3354 return [separator.join(row) for row in data]
3356 if columns and not columns[-1].align_right:
3357 # Avoid unnecessary spaces at end of line
3360 # Build format string
3361 fmt = " ".join([_GetColFormatString(width, col.align_right)
3362 for col, width in zip(columns, colwidth)])
3364 return [fmt % tuple(row) for row in data]
3367 def FormatTimestamp(ts):
3368 """Formats a given timestamp.
3371 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3374 @return: a string with the formatted timestamp
3377 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3381 return utils.FormatTime(sec, usecs=usecs)
3384 def ParseTimespec(value):
3385 """Parse a time specification.
3387 The following suffixed will be recognized:
3395 Without any suffix, the value will be taken to be in seconds.
3400 raise errors.OpPrereqError("Empty time specification passed",
3409 if value[-1] not in suffix_map:
3412 except (TypeError, ValueError):
3413 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3416 multiplier = suffix_map[value[-1]]
3418 if not value: # no data left after stripping the suffix
3419 raise errors.OpPrereqError("Invalid time specification (only"
3420 " suffix passed)", errors.ECODE_INVAL)
3422 value = int(value) * multiplier
3423 except (TypeError, ValueError):
3424 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3429 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3430 filter_master=False, nodegroup=None):
3431 """Returns the names of online nodes.
3433 This function will also log a warning on stderr with the names of
3436 @param nodes: if not empty, use only this subset of nodes (minus the
3438 @param cl: if not None, luxi client to use
3439 @type nowarn: boolean
3440 @param nowarn: by default, this function will output a note with the
3441 offline nodes that are skipped; if this parameter is True the
3442 note is not displayed
3443 @type secondary_ips: boolean
3444 @param secondary_ips: if True, return the secondary IPs instead of the
3445 names, useful for doing network traffic over the replication interface
3447 @type filter_master: boolean
3448 @param filter_master: if True, do not return the master node in the list
3449 (useful in coordination with secondary_ips where we cannot check our
3450 node name against the list)
3451 @type nodegroup: string
3452 @param nodegroup: If set, only return nodes in this node group
3461 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3463 if nodegroup is not None:
3464 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3465 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3468 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3471 if len(qfilter) > 1:
3472 final_filter = [qlang.OP_AND] + qfilter
3474 assert len(qfilter) == 1
3475 final_filter = qfilter[0]
3479 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3481 def _IsOffline(row):
3482 (_, (_, offline), _) = row
3486 ((_, name), _, _) = row
3490 (_, _, (_, sip)) = row
3493 (offline, online) = compat.partition(result.data, _IsOffline)
3495 if offline and not nowarn:
3496 ToStderr("Note: skipping offline node(s): %s" %
3497 utils.CommaJoin(map(_GetName, offline)))
3504 return map(fn, online)
3507 def _ToStream(stream, txt, *args):
3508 """Write a message to a stream, bypassing the logging system
3510 @type stream: file object
3511 @param stream: the file to which we should write
3513 @param txt: the message
3519 stream.write(txt % args)
3524 except IOError, err:
3525 if err.errno == errno.EPIPE:
3526 # our terminal went away, we'll exit
3527 sys.exit(constants.EXIT_FAILURE)
3532 def ToStdout(txt, *args):
3533 """Write a message to stdout only, bypassing the logging system
3535 This is just a wrapper over _ToStream.
3538 @param txt: the message
3541 _ToStream(sys.stdout, txt, *args)
3544 def ToStderr(txt, *args):
3545 """Write a message to stderr only, bypassing the logging system
3547 This is just a wrapper over _ToStream.
3550 @param txt: the message
3553 _ToStream(sys.stderr, txt, *args)
3556 class JobExecutor(object):
3557 """Class which manages the submission and execution of multiple jobs.
3559 Note that instances of this class should not be reused between
3563 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3568 self.verbose = verbose
3571 self.feedback_fn = feedback_fn
3572 self._counter = itertools.count()
3575 def _IfName(name, fmt):
3576 """Helper function for formatting name.
3584 def QueueJob(self, name, *ops):
3585 """Record a job for later submit.
3588 @param name: a description of the job, will be used in WaitJobSet
3591 SetGenericOpcodeOpts(ops, self.opts)
3592 self.queue.append((self._counter.next(), name, ops))
3594 def AddJobId(self, name, status, job_id):
3595 """Adds a job ID to the internal queue.
3598 self.jobs.append((self._counter.next(), status, job_id, name))
3600 def SubmitPending(self, each=False):
3601 """Submit all pending jobs.
3606 for (_, _, ops) in self.queue:
3607 # SubmitJob will remove the success status, but raise an exception if
3608 # the submission fails, so we'll notice that anyway.
3609 results.append([True, self.cl.SubmitJob(ops)[0]])
3611 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3612 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3613 self.jobs.append((idx, status, data, name))
3615 def _ChooseJob(self):
3616 """Choose a non-waiting/queued job to poll next.
3619 assert self.jobs, "_ChooseJob called with empty job list"
3621 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3625 for job_data, status in zip(self.jobs, result):
3626 if (isinstance(status, list) and status and
3627 status[0] in (constants.JOB_STATUS_QUEUED,
3628 constants.JOB_STATUS_WAITING,
3629 constants.JOB_STATUS_CANCELING)):
3630 # job is still present and waiting
3632 # good candidate found (either running job or lost job)
3633 self.jobs.remove(job_data)
3637 return self.jobs.pop(0)
3639 def GetResults(self):
3640 """Wait for and return the results of all jobs.
3643 @return: list of tuples (success, job results), in the same order
3644 as the submitted jobs; if a job has failed, instead of the result
3645 there will be the error message
3649 self.SubmitPending()
3652 ok_jobs = [row[2] for row in self.jobs if row[1]]
3654 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3656 # first, remove any non-submitted jobs
3657 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3658 for idx, _, jid, name in failures:
3659 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3660 results.append((idx, False, jid))
3663 (idx, _, jid, name) = self._ChooseJob()
3664 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3666 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3668 except errors.JobLost, err:
3669 _, job_result = FormatError(err)
3670 ToStderr("Job %s%s has been archived, cannot check its result",
3671 jid, self._IfName(name, " for %s"))
3673 except (errors.GenericError, luxi.ProtocolError), err:
3674 _, job_result = FormatError(err)
3676 # the error message will always be shown, verbose or not
3677 ToStderr("Job %s%s has failed: %s",
3678 jid, self._IfName(name, " for %s"), job_result)
3680 results.append((idx, success, job_result))
3682 # sort based on the index, then drop it
3684 results = [i[1:] for i in results]
3688 def WaitOrShow(self, wait):
3689 """Wait for job results or only print the job IDs.
3692 @param wait: whether to wait or not
3696 return self.GetResults()
3699 self.SubmitPending()
3700 for _, status, result, name in self.jobs:
3702 ToStdout("%s: %s", result, name)
3704 ToStderr("Failure for %s: %s", name, result)
3705 return [row[1:3] for row in self.jobs]
3708 def FormatParamsDictInfo(param_dict, actual):
3709 """Formats a parameter dictionary.
3711 @type param_dict: dict
3712 @param param_dict: the own parameters
3714 @param actual: the current parameter set (including defaults)
3716 @return: dictionary where the value of each parameter is either a fully
3717 formatted string or a dictionary containing formatted strings
3721 for (key, data) in actual.items():
3722 if isinstance(data, dict) and data:
3723 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3725 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3729 def _FormatListInfoDefault(data, def_data):
3730 if data is not None:
3731 ret = utils.CommaJoin(data)
3733 ret = "default (%s)" % utils.CommaJoin(def_data)
3737 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3738 """Formats an instance policy.
3740 @type custom_ipolicy: dict
3741 @param custom_ipolicy: own policy
3742 @type eff_ipolicy: dict
3743 @param eff_ipolicy: effective policy (including defaults); ignored for
3745 @type iscluster: bool
3746 @param iscluster: the policy is at cluster level
3747 @rtype: list of pairs
3748 @return: formatted data, suitable for L{PrintGenericInfo}
3752 eff_ipolicy = custom_ipolicy
3755 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3757 for (k, minmax) in enumerate(custom_minmax):
3759 ("%s/%s" % (key, k),
3760 FormatParamsDictInfo(minmax[key], minmax[key]))
3761 for key in constants.ISPECS_MINMAX_KEYS
3764 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3766 ("%s/%s" % (key, k),
3767 FormatParamsDictInfo({}, minmax[key]))
3768 for key in constants.ISPECS_MINMAX_KEYS
3770 ret = [("bounds specs", minmax_out)]
3773 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3775 (constants.ISPECS_STD,
3776 FormatParamsDictInfo(stdspecs, stdspecs))
3780 ("allowed disk templates",
3781 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3782 eff_ipolicy[constants.IPOLICY_DTS]))
3785 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3786 for key in constants.IPOLICY_PARAMETERS
3791 def _PrintSpecsParameters(buf, specs):
3792 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3793 buf.write(",".join(values))
3796 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3797 """Print the command option used to generate the given instance policy.
3799 Currently only the parts dealing with specs are supported.
3802 @param buf: stream to write into
3804 @param ipolicy: instance policy
3806 @param isgroup: whether the policy is at group level
3810 stdspecs = ipolicy.get("std")
3812 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3813 _PrintSpecsParameters(buf, stdspecs)
3814 minmaxes = ipolicy.get("minmax", [])
3816 for minmax in minmaxes:
3817 minspecs = minmax.get("min")
3818 maxspecs = minmax.get("max")
3819 if minspecs and maxspecs:
3821 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3826 _PrintSpecsParameters(buf, minspecs)
3828 _PrintSpecsParameters(buf, maxspecs)
3831 def ConfirmOperation(names, list_type, text, extra=""):
3832 """Ask the user to confirm an operation on a list of list_type.
3834 This function is used to request confirmation for doing an operation
3835 on a given list of list_type.
3838 @param names: the list of names that we display when
3839 we ask for confirmation
3840 @type list_type: str
3841 @param list_type: Human readable name for elements in the list (e.g. nodes)
3843 @param text: the operation that the user should confirm
3845 @return: True or False depending on user's confirmation.
3849 msg = ("The %s will operate on %d %s.\n%s"
3850 "Do you want to continue?" % (text, count, list_type, extra))
3851 affected = (("\nAffected %s:\n" % list_type) +
3852 "\n".join([" %s" % name for name in names]))
3854 choices = [("y", True, "Yes, execute the %s" % text),
3855 ("n", False, "No, abort the %s" % text)]
3858 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3861 question = msg + affected
3863 choice = AskUser(question, choices)
3866 choice = AskUser(msg + affected, choices)
3870 def _MaybeParseUnit(elements):
3871 """Parses and returns an array of potential values with units.
3875 for k, v in elements.items():
3876 if v == constants.VALUE_DEFAULT:
3879 parsed[k] = utils.ParseUnit(v)
3883 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3884 ispecs_disk_count, ispecs_disk_size,
3885 ispecs_nic_count, group_ipolicy, fill_all):
3888 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3889 if ispecs_disk_size:
3890 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3891 except (TypeError, ValueError, errors.UnitParseError), err:
3892 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3894 (ispecs_disk_size, ispecs_mem_size, err),
3897 # prepare ipolicy dict
3898 ispecs_transposed = {
3899 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3900 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3901 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3902 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3903 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3906 # first, check that the values given are correct
3908 forced_type = TISPECS_GROUP_TYPES
3910 forced_type = TISPECS_CLUSTER_TYPES
3911 for specs in ispecs_transposed.values():
3912 assert type(specs) is dict
3913 utils.ForceDictType(specs, forced_type)
3917 constants.ISPECS_MIN: {},
3918 constants.ISPECS_MAX: {},
3919 constants.ISPECS_STD: {},
3921 for (name, specs) in ispecs_transposed.iteritems():
3922 assert name in constants.ISPECS_PARAMETERS
3923 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3924 assert key in ispecs
3925 ispecs[key][name] = val
3927 for key in constants.ISPECS_MINMAX_KEYS:
3930 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3932 minmax_out[key] = ispecs[key]
3933 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3935 ipolicy[constants.ISPECS_STD] = \
3936 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3937 ispecs[constants.ISPECS_STD])
3939 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3942 def _ParseSpecUnit(spec, keyname):
3944 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3947 ret[k] = utils.ParseUnit(ret[k])
3948 except (TypeError, ValueError, errors.UnitParseError), err:
3949 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3950 " specs: %s" % (k, ret[k], keyname, err)),
3955 def _ParseISpec(spec, keyname, required):
3956 ret = _ParseSpecUnit(spec, keyname)
3957 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3958 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3959 if required and missing:
3960 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3961 (keyname, utils.CommaJoin(missing)),
3966 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3968 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3969 len(minmax_ispecs[0]) == 1):
3970 for (key, spec) in minmax_ispecs[0].items():
3971 # This loop is executed exactly once
3972 if key in allowed_values and not spec:
3977 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3978 group_ipolicy, allowed_values):
3979 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3980 if found_allowed is not None:
3981 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3982 elif minmax_ispecs is not None:
3984 for mmpair in minmax_ispecs:
3986 for (key, spec) in mmpair.items():
3987 if key not in constants.ISPECS_MINMAX_KEYS:
3988 msg = "Invalid key in bounds instance specifications: %s" % key
3989 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3990 mmpair_out[key] = _ParseISpec(spec, key, True)
3991 minmax_out.append(mmpair_out)
3992 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3993 if std_ispecs is not None:
3994 assert not group_ipolicy # This is not an option for gnt-group
3995 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3998 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3999 ispecs_cpu_count=None,
4000 ispecs_disk_count=None,
4001 ispecs_disk_size=None,
4002 ispecs_nic_count=None,
4005 ipolicy_disk_templates=None,
4006 ipolicy_vcpu_ratio=None,
4007 ipolicy_spindle_ratio=None,
4008 group_ipolicy=False,
4009 allowed_values=None,
4011 """Creation of instance policy based on command line options.
4013 @param fill_all: whether for cluster policies we should ensure that
4014 all values are filled
4017 assert not (fill_all and allowed_values)
4019 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4020 ispecs_disk_size or ispecs_nic_count)
4021 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4022 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4023 " together with any --ipolicy-xxx-specs option",
4026 ipolicy_out = objects.MakeEmptyIPolicy()
4029 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4030 ispecs_disk_count, ispecs_disk_size,
4031 ispecs_nic_count, group_ipolicy, fill_all)
4032 elif (minmax_ispecs is not None or std_ispecs is not None):
4033 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4034 group_ipolicy, allowed_values)
4036 if ipolicy_disk_templates is not None:
4037 if allowed_values and ipolicy_disk_templates in allowed_values:
4038 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4040 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4041 if ipolicy_vcpu_ratio is not None:
4042 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4043 if ipolicy_spindle_ratio is not None:
4044 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4046 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4048 if not group_ipolicy and fill_all:
4049 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4054 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4055 """Formatting core of L{PrintGenericInfo}.
4057 @param buf: (string) stream to accumulate the result into
4058 @param data: data to format
4060 @param level: depth in the data hierarchy, used for indenting
4061 @type afterkey: bool
4062 @param afterkey: True when we are in the middle of a line after a key (used
4063 to properly add newlines or indentation)
4067 if isinstance(data, dict):
4076 for key in sorted(data):
4078 buf.write(baseind * level)
4083 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4084 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4085 # list of tuples (an ordered dictionary)
4091 for (key, val) in data:
4093 buf.write(baseind * level)
4098 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4099 elif isinstance(data, list):
4110 buf.write(baseind * level)
4114 buf.write(baseind[1:])
4115 _SerializeGenericInfo(buf, item, level + 1)
4117 # This branch should be only taken for strings, but it's practically
4118 # impossible to guarantee that no other types are produced somewhere
4119 buf.write(str(data))
4123 def PrintGenericInfo(data):
4124 """Print information formatted according to the hierarchy.
4126 The output is a valid YAML string.
4128 @param data: the data to print. It's a hierarchical structure whose elements
4130 - dictionaries, where keys are strings and values are of any of the
4132 - lists of pairs (key, value), where key is a string and value is of
4133 any of the types listed here; it's a way to encode ordered
4135 - lists of any of the types listed here
4140 _SerializeGenericInfo(buf, data, 0)
4141 ToStdout(buf.getvalue().rstrip("\n"))