4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
122 "NEW_CLUSTER_CERT_OPT",
123 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124 "NEW_CONFD_HMAC_KEY_OPT",
128 "NEW_SPICE_CERT_OPT",
130 "NOCONFLICTSCHECK_OPT",
131 "NODE_FORCE_JOIN_OPT",
133 "NODE_PLACEMENT_OPT",
137 "NODRBD_STORAGE_OPT",
143 "NOMODIFY_ETCHOSTS_OPT",
144 "NOMODIFY_SSH_SETUP_OPT",
148 "NORUNTIME_CHGS_OPT",
151 "NOSSH_KEYCHECK_OPT",
165 "PREALLOC_WIPE_DISKS_OPT",
166 "PRIMARY_IP_VERSION_OPT",
173 "REMOVE_INSTANCE_OPT",
174 "REMOVE_RESERVED_IPS_OPT",
180 "SECONDARY_ONLY_OPT",
185 "SHUTDOWN_TIMEOUT_OPT",
187 "SPECS_CPU_COUNT_OPT",
188 "SPECS_DISK_COUNT_OPT",
189 "SPECS_DISK_SIZE_OPT",
190 "SPECS_MEM_SIZE_OPT",
191 "SPECS_NIC_COUNT_OPT",
193 "IPOLICY_STD_SPECS_OPT",
194 "IPOLICY_DISK_TEMPLATES",
195 "IPOLICY_VCPU_RATIO",
201 "STARTUP_PAUSED_OPT",
210 "USE_EXTERNAL_MIP_SCRIPT",
218 "IGNORE_IPOLICY_OPT",
219 "INSTANCE_POLICY_OPTS",
220 # Generic functions for CLI programs
222 "CreateIPolicyFromOpts",
224 "GenericInstanceCreate",
230 "JobSubmittedException",
232 "RunWhileClusterStopped",
236 # Formatting functions
237 "ToStderr", "ToStdout",
240 "FormatParamsDictInfo",
242 "PrintIPolicyCommand",
252 # command line options support infrastructure
253 "ARGS_MANY_INSTANCES",
256 "ARGS_MANY_NETWORKS",
276 "OPT_COMPL_INST_ADD_NODES",
277 "OPT_COMPL_MANY_NODES",
278 "OPT_COMPL_ONE_IALLOCATOR",
279 "OPT_COMPL_ONE_INSTANCE",
280 "OPT_COMPL_ONE_NODE",
281 "OPT_COMPL_ONE_NODEGROUP",
282 "OPT_COMPL_ONE_NETWORK",
284 "OPT_COMPL_ONE_EXTSTORAGE",
289 "COMMON_CREATE_OPTS",
295 #: Priorities (sorted)
297 ("low", constants.OP_PRIO_LOW),
298 ("normal", constants.OP_PRIO_NORMAL),
299 ("high", constants.OP_PRIO_HIGH),
302 #: Priority dictionary for easier lookup
303 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
304 # we migrate to Python 2.6
305 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
307 # Query result status for clients
310 QR_INCOMPLETE) = range(3)
312 #: Maximum batch size for ChooseJob
316 # constants used to create InstancePolicy dictionary
317 TISPECS_GROUP_TYPES = {
318 constants.ISPECS_MIN: constants.VTYPE_INT,
319 constants.ISPECS_MAX: constants.VTYPE_INT,
322 TISPECS_CLUSTER_TYPES = {
323 constants.ISPECS_MIN: constants.VTYPE_INT,
324 constants.ISPECS_MAX: constants.VTYPE_INT,
325 constants.ISPECS_STD: constants.VTYPE_INT,
328 #: User-friendly names for query2 field types
330 constants.QFT_UNKNOWN: "Unknown",
331 constants.QFT_TEXT: "Text",
332 constants.QFT_BOOL: "Boolean",
333 constants.QFT_NUMBER: "Number",
334 constants.QFT_UNIT: "Storage size",
335 constants.QFT_TIMESTAMP: "Timestamp",
336 constants.QFT_OTHER: "Custom",
341 def __init__(self, min=0, max=None): # pylint: disable=W0622
346 return ("<%s min=%s max=%s>" %
347 (self.__class__.__name__, self.min, self.max))
350 class ArgSuggest(_Argument):
351 """Suggesting argument.
353 Value can be any of the ones passed to the constructor.
356 # pylint: disable=W0622
357 def __init__(self, min=0, max=None, choices=None):
358 _Argument.__init__(self, min=min, max=max)
359 self.choices = choices
362 return ("<%s min=%s max=%s choices=%r>" %
363 (self.__class__.__name__, self.min, self.max, self.choices))
366 class ArgChoice(ArgSuggest):
369 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
370 but value must be one of the choices.
375 class ArgUnknown(_Argument):
376 """Unknown argument to program (e.g. determined at runtime).
381 class ArgInstance(_Argument):
382 """Instances argument.
387 class ArgNode(_Argument):
393 class ArgNetwork(_Argument):
399 class ArgGroup(_Argument):
400 """Node group argument.
405 class ArgJobId(_Argument):
411 class ArgFile(_Argument):
412 """File path argument.
417 class ArgCommand(_Argument):
423 class ArgHost(_Argument):
429 class ArgOs(_Argument):
435 class ArgExtStorage(_Argument):
436 """ExtStorage argument.
442 ARGS_MANY_INSTANCES = [ArgInstance()]
443 ARGS_MANY_NETWORKS = [ArgNetwork()]
444 ARGS_MANY_NODES = [ArgNode()]
445 ARGS_MANY_GROUPS = [ArgGroup()]
446 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
447 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
448 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
450 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
451 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
454 def _ExtractTagsObject(opts, args):
455 """Extract the tag type object.
457 Note that this function will modify its args parameter.
460 if not hasattr(opts, "tag_type"):
461 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
463 if kind == constants.TAG_CLUSTER:
465 elif kind in (constants.TAG_NODEGROUP,
467 constants.TAG_NETWORK,
468 constants.TAG_INSTANCE):
470 raise errors.OpPrereqError("no arguments passed to the command",
475 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479 def _ExtendTags(opts, args):
480 """Extend the args if a source file has been given.
482 This function will extend the tags with the contents of the file
483 passed in the 'tags_source' attribute of the opts parameter. A file
484 named '-' will be replaced by stdin.
487 fname = opts.tags_source
493 new_fh = open(fname, "r")
496 # we don't use the nice 'new_data = [line.strip() for line in fh]'
497 # because of python bug 1633941
499 line = new_fh.readline()
502 new_data.append(line.strip())
505 args.extend(new_data)
508 def ListTags(opts, args):
509 """List the tags on a given object.
511 This is a generic implementation that knows how to deal with all
512 three cases of tag objects (cluster, node, instance). The opts
513 argument is expected to contain a tag_type field denoting what
514 object type we work on.
517 kind, name = _ExtractTagsObject(opts, args)
518 cl = GetClient(query=True)
519 result = cl.QueryTags(kind, name)
520 result = list(result)
526 def AddTags(opts, args):
527 """Add tags on a given object.
529 This is a generic implementation that knows how to deal with all
530 three cases of tag objects (cluster, node, instance). The opts
531 argument is expected to contain a tag_type field denoting what
532 object type we work on.
535 kind, name = _ExtractTagsObject(opts, args)
536 _ExtendTags(opts, args)
538 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
539 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
540 SubmitOrSend(op, opts)
543 def RemoveTags(opts, args):
544 """Remove tags from a given object.
546 This is a generic implementation that knows how to deal with all
547 three cases of tag objects (cluster, node, instance). The opts
548 argument is expected to contain a tag_type field denoting what
549 object type we work on.
552 kind, name = _ExtractTagsObject(opts, args)
553 _ExtendTags(opts, args)
555 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
556 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
557 SubmitOrSend(op, opts)
560 def check_unit(option, opt, value): # pylint: disable=W0613
561 """OptParsers custom converter for units.
565 return utils.ParseUnit(value)
566 except errors.UnitParseError, err:
567 raise OptionValueError("option %s: %s" % (opt, err))
570 def _SplitKeyVal(opt, data, parse_prefixes):
571 """Convert a KeyVal string into a dict.
573 This function will convert a key=val[,...] string into a dict. Empty
574 values will be converted specially: keys which have the prefix 'no_'
575 will have the value=False and the prefix stripped, keys with the prefix
576 "-" will have value=None and the prefix stripped, and the others will
580 @param opt: a string holding the option name for which we process the
581 data, used in building error messages
583 @param data: a string of the format key=val,key=val,...
584 @type parse_prefixes: bool
585 @param parse_prefixes: whether to handle prefixes specially
587 @return: {key=val, key=val}
588 @raises errors.ParameterError: if there are duplicate keys
593 for elem in utils.UnescapeAndSplit(data, sep=","):
595 key, val = elem.split("=", 1)
597 if elem.startswith(NO_PREFIX):
598 key, val = elem[len(NO_PREFIX):], False
599 elif elem.startswith(UN_PREFIX):
600 key, val = elem[len(UN_PREFIX):], None
602 key, val = elem, True
604 raise errors.ParameterError("Missing value for key '%s' in option %s" %
607 raise errors.ParameterError("Duplicate key '%s' in option %s" %
613 def _SplitIdentKeyVal(opt, value, parse_prefixes):
614 """Helper function to parse "ident:key=val,key=val" options.
617 @param opt: option name, used in error messages
619 @param value: expected to be in the format "ident:key=val,key=val,..."
620 @type parse_prefixes: bool
621 @param parse_prefixes: whether to handle prefixes specially (see
624 @return: (ident, {key=val, key=val})
625 @raises errors.ParameterError: in case of duplicates or other parsing errors
629 ident, rest = value, ""
631 ident, rest = value.split(":", 1)
633 if parse_prefixes and ident.startswith(NO_PREFIX):
635 msg = "Cannot pass options when removing parameter groups: %s" % value
636 raise errors.ParameterError(msg)
637 retval = (ident[len(NO_PREFIX):], False)
638 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
639 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
641 msg = "Cannot pass options when removing parameter groups: %s" % value
642 raise errors.ParameterError(msg)
643 retval = (ident[len(UN_PREFIX):], None)
645 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
646 retval = (ident, kv_dict)
650 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
651 """Custom parser for ident:key=val,key=val options.
653 This will store the parsed values as a tuple (ident, {key: val}). As such,
654 multiple uses of this option via action=append is possible.
657 return _SplitIdentKeyVal(opt, value, True)
660 def check_key_val(option, opt, value): # pylint: disable=W0613
661 """Custom parser class for key=val,key=val options.
663 This will store the parsed values as a dict {key: val}.
666 return _SplitKeyVal(opt, value, True)
669 def _SplitListKeyVal(opt, value):
671 for elem in value.split("/"):
673 raise errors.ParameterError("Empty section in option '%s'" % opt)
674 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
676 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
678 raise errors.ParameterError(msg)
679 retval[ident] = valdict
683 def check_multilist_ident_key_val(_, opt, value):
684 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
686 @rtype: list of dictionary
687 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
691 for line in value.split("//"):
692 retval.append(_SplitListKeyVal(opt, line))
696 def check_bool(option, opt, value): # pylint: disable=W0613
697 """Custom parser for yes/no options.
699 This will store the parsed value as either True or False.
702 value = value.lower()
703 if value == constants.VALUE_FALSE or value == "no":
705 elif value == constants.VALUE_TRUE or value == "yes":
708 raise errors.ParameterError("Invalid boolean value '%s'" % value)
711 def check_list(option, opt, value): # pylint: disable=W0613
712 """Custom parser for comma-separated lists.
715 # we have to make this explicit check since "".split(",") is [""],
716 # not an empty list :(
720 return utils.UnescapeAndSplit(value)
723 def check_maybefloat(option, opt, value): # pylint: disable=W0613
724 """Custom parser for float numbers which might be also defaults.
727 value = value.lower()
729 if value == constants.VALUE_DEFAULT:
735 # completion_suggestion is normally a list. Using numeric values not evaluating
736 # to False for dynamic completion.
737 (OPT_COMPL_MANY_NODES,
739 OPT_COMPL_ONE_INSTANCE,
741 OPT_COMPL_ONE_EXTSTORAGE,
742 OPT_COMPL_ONE_IALLOCATOR,
743 OPT_COMPL_ONE_NETWORK,
744 OPT_COMPL_INST_ADD_NODES,
745 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
747 OPT_COMPL_ALL = compat.UniqueFrozenset([
748 OPT_COMPL_MANY_NODES,
750 OPT_COMPL_ONE_INSTANCE,
752 OPT_COMPL_ONE_EXTSTORAGE,
753 OPT_COMPL_ONE_IALLOCATOR,
754 OPT_COMPL_ONE_NETWORK,
755 OPT_COMPL_INST_ADD_NODES,
756 OPT_COMPL_ONE_NODEGROUP,
760 class CliOption(Option):
761 """Custom option class for optparse.
764 ATTRS = Option.ATTRS + [
765 "completion_suggest",
767 TYPES = Option.TYPES + (
768 "multilistidentkeyval",
776 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
777 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
778 TYPE_CHECKER["identkeyval"] = check_ident_key_val
779 TYPE_CHECKER["keyval"] = check_key_val
780 TYPE_CHECKER["unit"] = check_unit
781 TYPE_CHECKER["bool"] = check_bool
782 TYPE_CHECKER["list"] = check_list
783 TYPE_CHECKER["maybefloat"] = check_maybefloat
786 # optparse.py sets make_option, so we do it for our own option class, too
787 cli_option = CliOption
792 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
793 help="Increase debugging level")
795 NOHDR_OPT = cli_option("--no-headers", default=False,
796 action="store_true", dest="no_headers",
797 help="Don't display column headers")
799 SEP_OPT = cli_option("--separator", default=None,
800 action="store", dest="separator",
801 help=("Separator between output fields"
802 " (defaults to one space)"))
804 USEUNITS_OPT = cli_option("--units", default=None,
805 dest="units", choices=("h", "m", "g", "t"),
806 help="Specify units for output (one of h/m/g/t)")
808 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
809 type="string", metavar="FIELDS",
810 help="Comma separated list of output fields")
812 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
813 default=False, help="Force the operation")
815 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
816 default=False, help="Do not require confirmation")
818 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
819 action="store_true", default=False,
820 help=("Ignore offline nodes and do as much"
823 TAG_ADD_OPT = cli_option("--tags", dest="tags",
824 default=None, help="Comma-separated list of instance"
827 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
828 default=None, help="File with tag names")
830 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
831 default=False, action="store_true",
832 help=("Submit the job and return the job ID, but"
833 " don't wait for the job to finish"))
835 SYNC_OPT = cli_option("--sync", dest="do_locking",
836 default=False, action="store_true",
837 help=("Grab locks while doing the queries"
838 " in order to ensure more consistent results"))
840 DRY_RUN_OPT = cli_option("--dry-run", default=False,
842 help=("Do not execute the operation, just run the"
843 " check steps and verify if it could be"
846 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
848 help="Increase the verbosity of the operation")
850 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
851 action="store_true", dest="simulate_errors",
852 help="Debugging option that makes the operation"
853 " treat most runtime checks as failed")
855 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
856 default=True, action="store_false",
857 help="Don't wait for sync (DANGEROUS!)")
859 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
860 default=False, action="store_true",
861 help="Wait for disks to sync")
863 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
864 action="store_true", default=False,
865 help="Enable offline instance")
867 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
868 action="store_true", default=False,
869 help="Disable down instance")
871 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
872 help=("Custom disk setup (%s)" %
873 utils.CommaJoin(constants.DISK_TEMPLATES)),
874 default=None, metavar="TEMPL",
875 choices=list(constants.DISK_TEMPLATES))
877 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
878 help="Do not create any network cards for"
881 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
882 help="Relative path under default cluster-wide"
883 " file storage dir to store file-based disks",
884 default=None, metavar="<DIR>")
886 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
887 help="Driver to use for image files",
888 default="loop", metavar="<DRIVER>",
889 choices=list(constants.FILE_DRIVER))
891 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
892 help="Select nodes for the instance automatically"
893 " using the <NAME> iallocator plugin",
894 default=None, type="string",
895 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
897 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
899 help="Set the default instance"
901 default=None, type="string",
902 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
906 completion_suggest=OPT_COMPL_ONE_OS)
908 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
909 type="keyval", default={},
910 help="OS parameters")
912 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
913 action="store_true", default=False,
914 help="Force an unknown variant")
916 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
917 action="store_true", default=False,
918 help="Do not install the OS (will"
921 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
922 dest="allow_runtime_chgs",
923 default=True, action="store_false",
924 help="Don't allow runtime changes")
926 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
927 type="keyval", default={},
928 help="Backend parameters")
930 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
931 default={}, dest="hvparams",
932 help="Hypervisor parameters")
934 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
935 help="Disk template parameters, in the format"
936 " template:option=value,option=value,...",
937 type="identkeyval", action="append", default=[])
939 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
940 type="keyval", default={},
941 help="Memory size specs: list of key=value,"
942 " where key is one of min, max, std"
943 " (in MB or using a unit)")
945 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
946 type="keyval", default={},
947 help="CPU count specs: list of key=value,"
948 " where key is one of min, max, std")
950 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
951 dest="ispecs_disk_count",
952 type="keyval", default={},
953 help="Disk count specs: list of key=value,"
954 " where key is one of min, max, std")
956 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
957 type="keyval", default={},
958 help="Disk size specs: list of key=value,"
959 " where key is one of min, max, std"
960 " (in MB or using a unit)")
962 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
963 type="keyval", default={},
964 help="NIC count specs: list of key=value,"
965 " where key is one of min, max, std")
967 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
968 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
969 dest="ipolicy_bounds_specs",
970 type="multilistidentkeyval", default=None,
971 help="Complete instance specs limits")
973 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
974 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
975 dest="ipolicy_std_specs",
976 type="keyval", default=None,
977 help="Complte standard instance specs")
979 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
980 dest="ipolicy_disk_templates",
981 type="list", default=None,
982 help="Comma-separated list of"
983 " enabled disk templates")
985 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
986 dest="ipolicy_vcpu_ratio",
987 type="maybefloat", default=None,
988 help="The maximum allowed vcpu-to-cpu ratio")
990 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
991 dest="ipolicy_spindle_ratio",
992 type="maybefloat", default=None,
993 help=("The maximum allowed instances to"
996 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
997 help="Hypervisor and hypervisor options, in the"
998 " format hypervisor:option=value,option=value,...",
999 default=None, type="identkeyval")
1001 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1002 help="Hypervisor and hypervisor options, in the"
1003 " format hypervisor:option=value,option=value,...",
1004 default=[], action="append", type="identkeyval")
1006 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1007 action="store_false",
1008 help="Don't check that the instance's IP"
1011 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1012 default=True, action="store_false",
1013 help="Don't check that the instance's name"
1016 NET_OPT = cli_option("--net",
1017 help="NIC parameters", default=[],
1018 dest="nics", action="append", type="identkeyval")
1020 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1021 dest="disks", action="append", type="identkeyval")
1023 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1024 help="Comma-separated list of disks"
1025 " indices to act on (e.g. 0,2) (optional,"
1026 " defaults to all disks)")
1028 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1029 help="Enforces a single-disk configuration using the"
1030 " given disk size, in MiB unless a suffix is used",
1031 default=None, type="unit", metavar="<size>")
1033 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1034 dest="ignore_consistency",
1035 action="store_true", default=False,
1036 help="Ignore the consistency of the disks on"
1039 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1040 dest="allow_failover",
1041 action="store_true", default=False,
1042 help="If migration is not possible fallback to"
1045 NONLIVE_OPT = cli_option("--non-live", dest="live",
1046 default=True, action="store_false",
1047 help="Do a non-live migration (this usually means"
1048 " freeze the instance, save the state, transfer and"
1049 " only then resume running on the secondary node)")
1051 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1053 choices=list(constants.HT_MIGRATION_MODES),
1054 help="Override default migration mode (choose"
1055 " either live or non-live")
1057 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1058 help="Target node and optional secondary node",
1059 metavar="<pnode>[:<snode>]",
1060 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1062 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1063 action="append", metavar="<node>",
1064 help="Use only this node (can be used multiple"
1065 " times, if not given defaults to all nodes)",
1066 completion_suggest=OPT_COMPL_ONE_NODE)
1068 NODEGROUP_OPT_NAME = "--node-group"
1069 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1071 help="Node group (name or uuid)",
1072 metavar="<nodegroup>",
1073 default=None, type="string",
1074 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1076 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1078 completion_suggest=OPT_COMPL_ONE_NODE)
1080 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1081 action="store_false",
1082 help="Don't start the instance after creation")
1084 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1085 action="store_true", default=False,
1086 help="Show command instead of executing it")
1088 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1089 default=False, action="store_true",
1090 help="Instead of performing the migration, try to"
1091 " recover from a failed cleanup. This is safe"
1092 " to run even if the instance is healthy, but it"
1093 " will create extra replication traffic and "
1094 " disrupt briefly the replication (like during the"
1097 STATIC_OPT = cli_option("-s", "--static", dest="static",
1098 action="store_true", default=False,
1099 help="Only show configuration data, not runtime data")
1101 ALL_OPT = cli_option("--all", dest="show_all",
1102 default=False, action="store_true",
1103 help="Show info on all instances on the cluster."
1104 " This can take a long time to run, use wisely")
1106 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1107 action="store_true", default=False,
1108 help="Interactive OS reinstall, lists available"
1109 " OS templates for selection")
1111 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1112 action="store_true", default=False,
1113 help="Remove the instance from the cluster"
1114 " configuration even if there are failures"
1115 " during the removal process")
1117 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1118 dest="ignore_remove_failures",
1119 action="store_true", default=False,
1120 help="Remove the instance from the"
1121 " cluster configuration even if there"
1122 " are failures during the removal"
1125 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1126 action="store_true", default=False,
1127 help="Remove the instance from the cluster")
1129 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1130 help="Specifies the new node for the instance",
1131 metavar="NODE", default=None,
1132 completion_suggest=OPT_COMPL_ONE_NODE)
1134 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1135 help="Specifies the new secondary node",
1136 metavar="NODE", default=None,
1137 completion_suggest=OPT_COMPL_ONE_NODE)
1139 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1140 help="Specifies the new primary node",
1141 metavar="<node>", default=None,
1142 completion_suggest=OPT_COMPL_ONE_NODE)
1144 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1145 default=False, action="store_true",
1146 help="Replace the disk(s) on the primary"
1147 " node (applies only to internally mirrored"
1148 " disk templates, e.g. %s)" %
1149 utils.CommaJoin(constants.DTS_INT_MIRROR))
1151 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1152 default=False, action="store_true",
1153 help="Replace the disk(s) on the secondary"
1154 " node (applies only to internally mirrored"
1155 " disk templates, e.g. %s)" %
1156 utils.CommaJoin(constants.DTS_INT_MIRROR))
1158 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1159 default=False, action="store_true",
1160 help="Lock all nodes and auto-promote as needed"
1163 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1164 default=False, action="store_true",
1165 help="Automatically replace faulty disks"
1166 " (applies only to internally mirrored"
1167 " disk templates, e.g. %s)" %
1168 utils.CommaJoin(constants.DTS_INT_MIRROR))
1170 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1171 default=False, action="store_true",
1172 help="Ignore current recorded size"
1173 " (useful for forcing activation when"
1174 " the recorded size is wrong)")
1176 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1178 completion_suggest=OPT_COMPL_ONE_NODE)
1180 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1183 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1184 help="Specify the secondary ip for the node",
1185 metavar="ADDRESS", default=None)
1187 READD_OPT = cli_option("--readd", dest="readd",
1188 default=False, action="store_true",
1189 help="Readd old node after replacing it")
1191 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1192 default=True, action="store_false",
1193 help="Disable SSH key fingerprint checking")
1195 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1196 default=False, action="store_true",
1197 help="Force the joining of a node")
1199 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1200 type="bool", default=None, metavar=_YORNO,
1201 help="Set the master_candidate flag on the node")
1203 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1204 type="bool", default=None,
1205 help=("Set the offline flag on the node"
1206 " (cluster does not communicate with offline"
1209 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1210 type="bool", default=None,
1211 help=("Set the drained flag on the node"
1212 " (excluded from allocation operations)"))
1214 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1215 type="bool", default=None, metavar=_YORNO,
1216 help="Set the master_capable flag on the node")
1218 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1219 type="bool", default=None, metavar=_YORNO,
1220 help="Set the vm_capable flag on the node")
1222 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1223 type="bool", default=None, metavar=_YORNO,
1224 help="Set the allocatable flag on a volume")
1226 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1227 help="Disable support for lvm based instances"
1229 action="store_false", default=True)
1231 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1232 dest="enabled_hypervisors",
1233 help="Comma-separated list of hypervisors",
1234 type="string", default=None)
1236 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1237 dest="enabled_disk_templates",
1238 help="Comma-separated list of "
1240 type="string", default=None)
1242 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1243 type="keyval", default={},
1244 help="NIC parameters")
1246 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1247 dest="candidate_pool_size", type="int",
1248 help="Set the candidate pool size")
1250 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1251 help=("Enables LVM and specifies the volume group"
1252 " name (cluster-wide) for disk allocation"
1253 " [%s]" % constants.DEFAULT_VG),
1254 metavar="VG", default=None)
1256 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1257 help="Destroy cluster", action="store_true")
1259 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1260 help="Skip node agreement check (dangerous)",
1261 action="store_true", default=False)
1263 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1264 help="Specify the mac prefix for the instance IP"
1265 " addresses, in the format XX:XX:XX",
1269 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1270 help="Specify the node interface (cluster-wide)"
1271 " on which the master IP address will be added"
1272 " (cluster init default: %s)" %
1273 constants.DEFAULT_BRIDGE,
1277 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1278 help="Specify the netmask of the master IP",
1282 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1283 dest="use_external_mip_script",
1284 help="Specify whether to run a"
1285 " user-provided script for the master"
1286 " IP address turnup and"
1287 " turndown operations",
1288 type="bool", metavar=_YORNO, default=None)
1290 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1291 help="Specify the default directory (cluster-"
1292 "wide) for storing the file-based disks [%s]" %
1293 pathutils.DEFAULT_FILE_STORAGE_DIR,
1295 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1297 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1298 "--shared-file-storage-dir",
1299 dest="shared_file_storage_dir",
1300 help="Specify the default directory (cluster-wide) for storing the"
1301 " shared file-based disks [%s]" %
1302 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1303 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1305 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1306 help="Don't modify %s" % pathutils.ETC_HOSTS,
1307 action="store_false", default=True)
1309 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1310 help="Don't initialize SSH keys",
1311 action="store_false", default=True)
1313 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1314 help="Enable parseable error messages",
1315 action="store_true", default=False)
1317 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1318 help="Skip N+1 memory redundancy tests",
1319 action="store_true", default=False)
1321 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1322 help="Type of reboot: soft/hard/full",
1323 default=constants.INSTANCE_REBOOT_HARD,
1325 choices=list(constants.REBOOT_TYPES))
1327 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1328 dest="ignore_secondaries",
1329 default=False, action="store_true",
1330 help="Ignore errors from secondaries")
1332 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1333 action="store_false", default=True,
1334 help="Don't shutdown the instance (unsafe)")
1336 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1337 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1338 help="Maximum time to wait")
1340 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1341 dest="shutdown_timeout", type="int",
1342 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1343 help="Maximum time to wait for instance"
1346 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1348 help=("Number of seconds between repetions of the"
1351 EARLY_RELEASE_OPT = cli_option("--early-release",
1352 dest="early_release", default=False,
1353 action="store_true",
1354 help="Release the locks on the secondary"
1357 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1358 dest="new_cluster_cert",
1359 default=False, action="store_true",
1360 help="Generate a new cluster certificate")
1362 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1364 help="File containing new RAPI certificate")
1366 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1367 default=None, action="store_true",
1368 help=("Generate a new self-signed RAPI"
1371 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1373 help="File containing new SPICE certificate")
1375 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1377 help="File containing the certificate of the CA"
1378 " which signed the SPICE certificate")
1380 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1381 dest="new_spice_cert", default=None,
1382 action="store_true",
1383 help=("Generate a new self-signed SPICE"
1386 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1387 dest="new_confd_hmac_key",
1388 default=False, action="store_true",
1389 help=("Create a new HMAC key for %s" %
1392 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1393 dest="cluster_domain_secret",
1395 help=("Load new new cluster domain"
1396 " secret from file"))
1398 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1399 dest="new_cluster_domain_secret",
1400 default=False, action="store_true",
1401 help=("Create a new cluster domain"
1404 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1405 dest="use_replication_network",
1406 help="Whether to use the replication network"
1407 " for talking to the nodes",
1408 action="store_true", default=False)
1410 MAINTAIN_NODE_HEALTH_OPT = \
1411 cli_option("--maintain-node-health", dest="maintain_node_health",
1412 metavar=_YORNO, default=None, type="bool",
1413 help="Configure the cluster to automatically maintain node"
1414 " health, by shutting down unknown instances, shutting down"
1415 " unknown DRBD devices, etc.")
1417 IDENTIFY_DEFAULTS_OPT = \
1418 cli_option("--identify-defaults", dest="identify_defaults",
1419 default=False, action="store_true",
1420 help="Identify which saved instance parameters are equal to"
1421 " the current cluster defaults and set them as such, instead"
1422 " of marking them as overridden")
1424 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1425 action="store", dest="uid_pool",
1426 help=("A list of user-ids or user-id"
1427 " ranges separated by commas"))
1429 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1430 action="store", dest="add_uids",
1431 help=("A list of user-ids or user-id"
1432 " ranges separated by commas, to be"
1433 " added to the user-id pool"))
1435 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1436 action="store", dest="remove_uids",
1437 help=("A list of user-ids or user-id"
1438 " ranges separated by commas, to be"
1439 " removed from the user-id pool"))
1441 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1442 action="store", dest="reserved_lvs",
1443 help=("A comma-separated list of reserved"
1444 " logical volumes names, that will be"
1445 " ignored by cluster verify"))
1447 ROMAN_OPT = cli_option("--roman",
1448 dest="roman_integers", default=False,
1449 action="store_true",
1450 help="Use roman numbers for positive integers")
1452 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1453 action="store", default=None,
1454 help="Specifies usermode helper for DRBD")
1456 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1457 action="store_false", default=True,
1458 help="Disable support for DRBD")
1460 PRIMARY_IP_VERSION_OPT = \
1461 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1462 action="store", dest="primary_ip_version",
1463 metavar="%d|%d" % (constants.IP4_VERSION,
1464 constants.IP6_VERSION),
1465 help="Cluster-wide IP version for primary IP")
1467 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1468 action="store_true",
1469 help="Show machine name for every line in output")
1471 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1472 action="store_true",
1473 help=("Hide successful results and show failures"
1474 " only (determined by the exit code)"))
1476 REASON_OPT = cli_option("--reason", default=None,
1477 help="The reason for executing the command")
1480 def _PriorityOptionCb(option, _, value, parser):
1481 """Callback for processing C{--priority} option.
1484 value = _PRIONAME_TO_VALUE[value]
1486 setattr(parser.values, option.dest, value)
1489 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1490 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1491 choices=_PRIONAME_TO_VALUE.keys(),
1492 action="callback", type="choice",
1493 callback=_PriorityOptionCb,
1494 help="Priority for opcode processing")
1496 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1497 type="bool", default=None, metavar=_YORNO,
1498 help="Sets the hidden flag on the OS")
1500 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1501 type="bool", default=None, metavar=_YORNO,
1502 help="Sets the blacklisted flag on the OS")
1504 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1505 type="bool", metavar=_YORNO,
1506 dest="prealloc_wipe_disks",
1507 help=("Wipe disks prior to instance"
1510 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1511 type="keyval", default=None,
1512 help="Node parameters")
1514 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1515 action="store", metavar="POLICY", default=None,
1516 help="Allocation policy for the node group")
1518 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1519 type="bool", metavar=_YORNO,
1520 dest="node_powered",
1521 help="Specify if the SoR for node is powered")
1523 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1524 default=constants.OOB_TIMEOUT,
1525 help="Maximum time to wait for out-of-band helper")
1527 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1528 default=constants.OOB_POWER_DELAY,
1529 help="Time in seconds to wait between power-ons")
1531 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1532 action="store_true", default=False,
1533 help=("Whether command argument should be treated"
1536 NO_REMEMBER_OPT = cli_option("--no-remember",
1538 action="store_true", default=False,
1539 help="Perform but do not record the change"
1540 " in the configuration")
1542 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1543 default=False, action="store_true",
1544 help="Evacuate primary instances only")
1546 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1547 default=False, action="store_true",
1548 help="Evacuate secondary instances only"
1549 " (applies only to internally mirrored"
1550 " disk templates, e.g. %s)" %
1551 utils.CommaJoin(constants.DTS_INT_MIRROR))
1553 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1554 action="store_true", default=False,
1555 help="Pause instance at startup")
1557 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1558 help="Destination node group (name or uuid)",
1559 default=None, action="append",
1560 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1562 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1563 action="append", dest="ignore_errors",
1564 choices=list(constants.CV_ALL_ECODES_STRINGS),
1565 help="Error code to be ignored")
1567 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1569 help=("Specify disk state information in the"
1571 " storage_type/identifier:option=value,...;"
1572 " note this is unused for now"),
1575 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1577 help=("Specify hypervisor state information in the"
1578 " format hypervisor:option=value,...;"
1579 " note this is unused for now"),
1582 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1583 action="store_true", default=False,
1584 help="Ignore instance policy violations")
1586 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1587 help="Sets the instance's runtime memory,"
1588 " ballooning it up or down to the new value",
1589 default=None, type="unit", metavar="<size>")
1591 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1592 action="store_true", default=False,
1593 help="Marks the grow as absolute instead of the"
1594 " (default) relative mode")
1596 NETWORK_OPT = cli_option("--network",
1597 action="store", default=None, dest="network",
1598 help="IP network in CIDR notation")
1600 GATEWAY_OPT = cli_option("--gateway",
1601 action="store", default=None, dest="gateway",
1602 help="IP address of the router (gateway)")
1604 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1605 action="store", default=None,
1606 dest="add_reserved_ips",
1607 help="Comma-separated list of"
1608 " reserved IPs to add")
1610 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1611 action="store", default=None,
1612 dest="remove_reserved_ips",
1613 help="Comma-delimited list of"
1614 " reserved IPs to remove")
1616 NETWORK6_OPT = cli_option("--network6",
1617 action="store", default=None, dest="network6",
1618 help="IP network in CIDR notation")
1620 GATEWAY6_OPT = cli_option("--gateway6",
1621 action="store", default=None, dest="gateway6",
1622 help="IP6 address of the router (gateway)")
1624 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1625 dest="conflicts_check",
1627 action="store_false",
1628 help="Don't check for conflicting IPs")
1630 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1631 default=False, action="store_true",
1632 help="Include default values")
1634 #: Options provided by all commands
1635 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1637 # common options for creating instances. add and import then add their own
1639 COMMON_CREATE_OPTS = [
1644 FILESTORE_DRIVER_OPT,
1650 NOCONFLICTSCHECK_OPT,
1662 # common instance policy options
1663 INSTANCE_POLICY_OPTS = [
1664 IPOLICY_BOUNDS_SPECS_OPT,
1665 IPOLICY_DISK_TEMPLATES,
1667 IPOLICY_SPINDLE_RATIO,
1670 # instance policy split specs options
1671 SPLIT_ISPECS_OPTS = [
1672 SPECS_CPU_COUNT_OPT,
1673 SPECS_DISK_COUNT_OPT,
1674 SPECS_DISK_SIZE_OPT,
1676 SPECS_NIC_COUNT_OPT,
1680 class _ShowUsage(Exception):
1681 """Exception class for L{_ParseArgs}.
1684 def __init__(self, exit_error):
1685 """Initializes instances of this class.
1687 @type exit_error: bool
1688 @param exit_error: Whether to report failure on exit
1691 Exception.__init__(self)
1692 self.exit_error = exit_error
1695 class _ShowVersion(Exception):
1696 """Exception class for L{_ParseArgs}.
1701 def _ParseArgs(binary, argv, commands, aliases, env_override):
1702 """Parser for the command line arguments.
1704 This function parses the arguments and returns the function which
1705 must be executed together with its (modified) arguments.
1707 @param binary: Script name
1708 @param argv: Command line arguments
1709 @param commands: Dictionary containing command definitions
1710 @param aliases: dictionary with command aliases {"alias": "target", ...}
1711 @param env_override: list of env variables allowed for default args
1712 @raise _ShowUsage: If usage description should be shown
1713 @raise _ShowVersion: If version should be shown
1716 assert not (env_override - set(commands))
1717 assert not (set(aliases.keys()) & set(commands.keys()))
1722 # No option or command given
1723 raise _ShowUsage(exit_error=True)
1725 if cmd == "--version":
1726 raise _ShowVersion()
1727 elif cmd == "--help":
1728 raise _ShowUsage(exit_error=False)
1729 elif not (cmd in commands or cmd in aliases):
1730 raise _ShowUsage(exit_error=True)
1732 # get command, unalias it, and look it up in commands
1734 if aliases[cmd] not in commands:
1735 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1736 " command '%s'" % (cmd, aliases[cmd]))
1740 if cmd in env_override:
1741 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1742 env_args = os.environ.get(args_env_name)
1744 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1746 func, args_def, parser_opts, usage, description = commands[cmd]
1747 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1748 description=description,
1749 formatter=TitledHelpFormatter(),
1750 usage="%%prog %s %s" % (cmd, usage))
1751 parser.disable_interspersed_args()
1752 options, args = parser.parse_args(args=argv[2:])
1754 if not _CheckArguments(cmd, args_def, args):
1755 return None, None, None
1757 return func, options, args
1760 def _FormatUsage(binary, commands):
1761 """Generates a nice description of all commands.
1763 @param binary: Script name
1764 @param commands: Dictionary containing command definitions
1767 # compute the max line length for cmd + usage
1768 mlen = min(60, max(map(len, commands)))
1770 yield "Usage: %s {command} [options...] [argument...]" % binary
1771 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1775 # and format a nice command list
1776 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1777 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1778 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1779 for line in help_lines:
1780 yield " %-*s %s" % (mlen, "", line)
1785 def _CheckArguments(cmd, args_def, args):
1786 """Verifies the arguments using the argument definition.
1790 1. Abort with error if values specified by user but none expected.
1792 1. For each argument in definition
1794 1. Keep running count of minimum number of values (min_count)
1795 1. Keep running count of maximum number of values (max_count)
1796 1. If it has an unlimited number of values
1798 1. Abort with error if it's not the last argument in the definition
1800 1. If last argument has limited number of values
1802 1. Abort with error if number of values doesn't match or is too large
1804 1. Abort with error if user didn't pass enough values (min_count)
1807 if args and not args_def:
1808 ToStderr("Error: Command %s expects no arguments", cmd)
1815 last_idx = len(args_def) - 1
1817 for idx, arg in enumerate(args_def):
1818 if min_count is None:
1820 elif arg.min is not None:
1821 min_count += arg.min
1823 if max_count is None:
1825 elif arg.max is not None:
1826 max_count += arg.max
1829 check_max = (arg.max is not None)
1831 elif arg.max is None:
1832 raise errors.ProgrammerError("Only the last argument can have max=None")
1835 # Command with exact number of arguments
1836 if (min_count is not None and max_count is not None and
1837 min_count == max_count and len(args) != min_count):
1838 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1841 # Command with limited number of arguments
1842 if max_count is not None and len(args) > max_count:
1843 ToStderr("Error: Command %s expects only %d argument(s)",
1847 # Command with some required arguments
1848 if min_count is not None and len(args) < min_count:
1849 ToStderr("Error: Command %s expects at least %d argument(s)",
1856 def SplitNodeOption(value):
1857 """Splits the value of a --node option.
1860 if value and ":" in value:
1861 return value.split(":", 1)
1863 return (value, None)
1866 def CalculateOSNames(os_name, os_variants):
1867 """Calculates all the names an OS can be called, according to its variants.
1869 @type os_name: string
1870 @param os_name: base name of the os
1871 @type os_variants: list or None
1872 @param os_variants: list of supported variants
1874 @return: list of valid names
1878 return ["%s+%s" % (os_name, v) for v in os_variants]
1883 def ParseFields(selected, default):
1884 """Parses the values of "--field"-like options.
1886 @type selected: string or None
1887 @param selected: User-selected options
1889 @param default: Default fields
1892 if selected is None:
1895 if selected.startswith("+"):
1896 return default + selected[1:].split(",")
1898 return selected.split(",")
1901 UsesRPC = rpc.RunWithRPC
1904 def AskUser(text, choices=None):
1905 """Ask the user a question.
1907 @param text: the question to ask
1909 @param choices: list with elements tuples (input_char, return_value,
1910 description); if not given, it will default to: [('y', True,
1911 'Perform the operation'), ('n', False, 'Do no do the operation')];
1912 note that the '?' char is reserved for help
1914 @return: one of the return values from the choices list; if input is
1915 not possible (i.e. not running with a tty, we return the last
1920 choices = [("y", True, "Perform the operation"),
1921 ("n", False, "Do not perform the operation")]
1922 if not choices or not isinstance(choices, list):
1923 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1924 for entry in choices:
1925 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1926 raise errors.ProgrammerError("Invalid choices element to AskUser")
1928 answer = choices[-1][1]
1930 for line in text.splitlines():
1931 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1932 text = "\n".join(new_text)
1934 f = file("/dev/tty", "a+")
1938 chars = [entry[0] for entry in choices]
1939 chars[-1] = "[%s]" % chars[-1]
1941 maps = dict([(entry[0], entry[1]) for entry in choices])
1945 f.write("/".join(chars))
1947 line = f.readline(2).strip().lower()
1952 for entry in choices:
1953 f.write(" %s - %s\n" % (entry[0], entry[2]))
1961 class JobSubmittedException(Exception):
1962 """Job was submitted, client should exit.
1964 This exception has one argument, the ID of the job that was
1965 submitted. The handler should print this ID.
1967 This is not an error, just a structured way to exit from clients.
1972 def SendJob(ops, cl=None):
1973 """Function to submit an opcode without waiting for the results.
1976 @param ops: list of opcodes
1977 @type cl: luxi.Client
1978 @param cl: the luxi client to use for communicating with the master;
1979 if None, a new client will be created
1985 job_id = cl.SubmitJob(ops)
1990 def GenericPollJob(job_id, cbs, report_cbs):
1991 """Generic job-polling function.
1993 @type job_id: number
1994 @param job_id: Job ID
1995 @type cbs: Instance of L{JobPollCbBase}
1996 @param cbs: Data callbacks
1997 @type report_cbs: Instance of L{JobPollReportCbBase}
1998 @param report_cbs: Reporting callbacks
2001 prev_job_info = None
2002 prev_logmsg_serial = None
2007 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2010 # job not found, go away!
2011 raise errors.JobLost("Job with id %s lost" % job_id)
2013 if result == constants.JOB_NOTCHANGED:
2014 report_cbs.ReportNotChanged(job_id, status)
2019 # Split result, a tuple of (field values, log entries)
2020 (job_info, log_entries) = result
2021 (status, ) = job_info
2024 for log_entry in log_entries:
2025 (serial, timestamp, log_type, message) = log_entry
2026 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2028 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2030 # TODO: Handle canceled and archived jobs
2031 elif status in (constants.JOB_STATUS_SUCCESS,
2032 constants.JOB_STATUS_ERROR,
2033 constants.JOB_STATUS_CANCELING,
2034 constants.JOB_STATUS_CANCELED):
2037 prev_job_info = job_info
2039 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2041 raise errors.JobLost("Job with id %s lost" % job_id)
2043 status, opstatus, result = jobs[0]
2045 if status == constants.JOB_STATUS_SUCCESS:
2048 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2049 raise errors.OpExecError("Job was canceled")
2052 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2053 if status == constants.OP_STATUS_SUCCESS:
2055 elif status == constants.OP_STATUS_ERROR:
2056 errors.MaybeRaise(msg)
2059 raise errors.OpExecError("partial failure (opcode %d): %s" %
2062 raise errors.OpExecError(str(msg))
2064 # default failure mode
2065 raise errors.OpExecError(result)
2068 class JobPollCbBase:
2069 """Base class for L{GenericPollJob} callbacks.
2073 """Initializes this class.
2077 def WaitForJobChangeOnce(self, job_id, fields,
2078 prev_job_info, prev_log_serial):
2079 """Waits for changes on a job.
2082 raise NotImplementedError()
2084 def QueryJobs(self, job_ids, fields):
2085 """Returns the selected fields for the selected job IDs.
2087 @type job_ids: list of numbers
2088 @param job_ids: Job IDs
2089 @type fields: list of strings
2090 @param fields: Fields
2093 raise NotImplementedError()
2096 class JobPollReportCbBase:
2097 """Base class for L{GenericPollJob} reporting callbacks.
2101 """Initializes this class.
2105 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2106 """Handles a log message.
2109 raise NotImplementedError()
2111 def ReportNotChanged(self, job_id, status):
2112 """Called for if a job hasn't changed in a while.
2114 @type job_id: number
2115 @param job_id: Job ID
2116 @type status: string or None
2117 @param status: Job status if available
2120 raise NotImplementedError()
2123 class _LuxiJobPollCb(JobPollCbBase):
2124 def __init__(self, cl):
2125 """Initializes this class.
2128 JobPollCbBase.__init__(self)
2131 def WaitForJobChangeOnce(self, job_id, fields,
2132 prev_job_info, prev_log_serial):
2133 """Waits for changes on a job.
2136 return self.cl.WaitForJobChangeOnce(job_id, fields,
2137 prev_job_info, prev_log_serial)
2139 def QueryJobs(self, job_ids, fields):
2140 """Returns the selected fields for the selected job IDs.
2143 return self.cl.QueryJobs(job_ids, fields)
2146 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2147 def __init__(self, feedback_fn):
2148 """Initializes this class.
2151 JobPollReportCbBase.__init__(self)
2153 self.feedback_fn = feedback_fn
2155 assert callable(feedback_fn)
2157 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2158 """Handles a log message.
2161 self.feedback_fn((timestamp, log_type, log_msg))
2163 def ReportNotChanged(self, job_id, status):
2164 """Called if a job hasn't changed in a while.
2170 class StdioJobPollReportCb(JobPollReportCbBase):
2172 """Initializes this class.
2175 JobPollReportCbBase.__init__(self)
2177 self.notified_queued = False
2178 self.notified_waitlock = False
2180 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181 """Handles a log message.
2184 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2185 FormatLogMessage(log_type, log_msg))
2187 def ReportNotChanged(self, job_id, status):
2188 """Called if a job hasn't changed in a while.
2194 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2195 ToStderr("Job %s is waiting in queue", job_id)
2196 self.notified_queued = True
2198 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2199 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2200 self.notified_waitlock = True
2203 def FormatLogMessage(log_type, log_msg):
2204 """Formats a job message according to its type.
2207 if log_type != constants.ELOG_MESSAGE:
2208 log_msg = str(log_msg)
2210 return utils.SafeEncode(log_msg)
2213 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2214 """Function to poll for the result of a job.
2216 @type job_id: job identified
2217 @param job_id: the job to poll for results
2218 @type cl: luxi.Client
2219 @param cl: the luxi client to use for communicating with the master;
2220 if None, a new client will be created
2226 if reporter is None:
2228 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2230 reporter = StdioJobPollReportCb()
2232 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2234 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2237 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2238 """Legacy function to submit an opcode.
2240 This is just a simple wrapper over the construction of the processor
2241 instance. It should be extended to better handle feedback and
2242 interaction functions.
2248 SetGenericOpcodeOpts([op], opts)
2250 job_id = SendJob([op], cl=cl)
2252 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2255 return op_results[0]
2258 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2259 """Wrapper around SubmitOpCode or SendJob.
2261 This function will decide, based on the 'opts' parameter, whether to
2262 submit and wait for the result of the opcode (and return it), or
2263 whether to just send the job and print its identifier. It is used in
2264 order to simplify the implementation of the '--submit' option.
2266 It will also process the opcodes if we're sending the via SendJob
2267 (otherwise SubmitOpCode does it).
2270 if opts and opts.submit_only:
2272 SetGenericOpcodeOpts(job, opts)
2273 job_id = SendJob(job, cl=cl)
2274 raise JobSubmittedException(job_id)
2276 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2279 def _InitReasonTrail(op, opts):
2280 """Builds the first part of the reason trail
2282 Builds the initial part of the reason trail, adding the user provided reason
2283 (if it exists) and the name of the command starting the operation.
2285 @param op: the opcode the reason trail will be added to
2286 @param opts: the command line options selected by the user
2289 assert len(sys.argv) >= 2
2293 trail.append((constants.OPCODE_REASON_SRC_USER,
2297 binary = os.path.basename(sys.argv[0])
2298 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2299 command = sys.argv[1]
2300 trail.append((source, command, utils.EpochNano()))
2304 def SetGenericOpcodeOpts(opcode_list, options):
2305 """Processor for generic options.
2307 This function updates the given opcodes based on generic command
2308 line options (like debug, dry-run, etc.).
2310 @param opcode_list: list of opcodes
2311 @param options: command line options or None
2312 @return: None (in-place modification)
2317 for op in opcode_list:
2318 op.debug_level = options.debug
2319 if hasattr(options, "dry_run"):
2320 op.dry_run = options.dry_run
2321 if getattr(options, "priority", None) is not None:
2322 op.priority = options.priority
2323 _InitReasonTrail(op, options)
2326 def GetClient(query=False):
2327 """Connects to the a luxi socket and returns a client.
2329 @type query: boolean
2330 @param query: this signifies that the client will only be
2331 used for queries; if the build-time parameter
2332 enable-split-queries is enabled, then the client will be
2333 connected to the query socket instead of the masterd socket
2336 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2338 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2339 address = pathutils.MASTER_SOCKET
2340 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2341 address = pathutils.QUERY_SOCKET
2343 address = override_socket
2344 elif query and constants.ENABLE_SPLIT_QUERY:
2345 address = pathutils.QUERY_SOCKET
2348 # TODO: Cache object?
2350 client = luxi.Client(address=address)
2351 except luxi.NoMasterError:
2352 ss = ssconf.SimpleStore()
2354 # Try to read ssconf file
2357 except errors.ConfigurationError:
2358 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2359 " not part of a cluster",
2362 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2363 if master != myself:
2364 raise errors.OpPrereqError("This is not the master node, please connect"
2365 " to node '%s' and rerun the command" %
2366 master, errors.ECODE_INVAL)
2371 def FormatError(err):
2372 """Return a formatted error message for a given error.
2374 This function takes an exception instance and returns a tuple
2375 consisting of two values: first, the recommended exit code, and
2376 second, a string describing the error message (not
2377 newline-terminated).
2383 if isinstance(err, errors.ConfigurationError):
2384 txt = "Corrupt configuration file: %s" % msg
2386 obuf.write(txt + "\n")
2387 obuf.write("Aborting.")
2389 elif isinstance(err, errors.HooksAbort):
2390 obuf.write("Failure: hooks execution failed:\n")
2391 for node, script, out in err.args[0]:
2393 obuf.write(" node: %s, script: %s, output: %s\n" %
2394 (node, script, out))
2396 obuf.write(" node: %s, script: %s (no output)\n" %
2398 elif isinstance(err, errors.HooksFailure):
2399 obuf.write("Failure: hooks general failure: %s" % msg)
2400 elif isinstance(err, errors.ResolverError):
2401 this_host = netutils.Hostname.GetSysName()
2402 if err.args[0] == this_host:
2403 msg = "Failure: can't resolve my own hostname ('%s')"
2405 msg = "Failure: can't resolve hostname '%s'"
2406 obuf.write(msg % err.args[0])
2407 elif isinstance(err, errors.OpPrereqError):
2408 if len(err.args) == 2:
2409 obuf.write("Failure: prerequisites not met for this"
2410 " operation:\nerror type: %s, error details:\n%s" %
2411 (err.args[1], err.args[0]))
2413 obuf.write("Failure: prerequisites not met for this"
2414 " operation:\n%s" % msg)
2415 elif isinstance(err, errors.OpExecError):
2416 obuf.write("Failure: command execution error:\n%s" % msg)
2417 elif isinstance(err, errors.TagError):
2418 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2419 elif isinstance(err, errors.JobQueueDrainError):
2420 obuf.write("Failure: the job queue is marked for drain and doesn't"
2421 " accept new requests\n")
2422 elif isinstance(err, errors.JobQueueFull):
2423 obuf.write("Failure: the job queue is full and doesn't accept new"
2424 " job submissions until old jobs are archived\n")
2425 elif isinstance(err, errors.TypeEnforcementError):
2426 obuf.write("Parameter Error: %s" % msg)
2427 elif isinstance(err, errors.ParameterError):
2428 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2429 elif isinstance(err, luxi.NoMasterError):
2430 if err.args[0] == pathutils.MASTER_SOCKET:
2431 daemon = "the master daemon"
2432 elif err.args[0] == pathutils.QUERY_SOCKET:
2433 daemon = "the config daemon"
2435 daemon = "socket '%s'" % str(err.args[0])
2436 obuf.write("Cannot communicate with %s.\nIs the process running"
2437 " and listening for connections?" % daemon)
2438 elif isinstance(err, luxi.TimeoutError):
2439 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2440 " been submitted and will continue to run even if the call"
2441 " timed out. Useful commands in this situation are \"gnt-job"
2442 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2444 elif isinstance(err, luxi.PermissionError):
2445 obuf.write("It seems you don't have permissions to connect to the"
2446 " master daemon.\nPlease retry as a different user.")
2447 elif isinstance(err, luxi.ProtocolError):
2448 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2450 elif isinstance(err, errors.JobLost):
2451 obuf.write("Error checking job status: %s" % msg)
2452 elif isinstance(err, errors.QueryFilterParseError):
2453 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2454 obuf.write("\n".join(err.GetDetails()))
2455 elif isinstance(err, errors.GenericError):
2456 obuf.write("Unhandled Ganeti error: %s" % msg)
2457 elif isinstance(err, JobSubmittedException):
2458 obuf.write("JobID: %s\n" % err.args[0])
2461 obuf.write("Unhandled exception: %s" % msg)
2462 return retcode, obuf.getvalue().rstrip("\n")
2465 def GenericMain(commands, override=None, aliases=None,
2466 env_override=frozenset()):
2467 """Generic main function for all the gnt-* commands.
2469 @param commands: a dictionary with a special structure, see the design doc
2470 for command line handling.
2471 @param override: if not None, we expect a dictionary with keys that will
2472 override command line options; this can be used to pass
2473 options from the scripts to generic functions
2474 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2475 @param env_override: list of environment names which are allowed to submit
2476 default args for commands
2479 # save the program name and the entire command line for later logging
2481 binary = os.path.basename(sys.argv[0])
2483 binary = sys.argv[0]
2485 if len(sys.argv) >= 2:
2486 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2490 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2492 binary = "<unknown program>"
2493 cmdline = "<unknown>"
2499 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2501 except _ShowVersion:
2502 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2503 constants.RELEASE_VERSION)
2504 return constants.EXIT_SUCCESS
2505 except _ShowUsage, err:
2506 for line in _FormatUsage(binary, commands):
2510 return constants.EXIT_FAILURE
2512 return constants.EXIT_SUCCESS
2513 except errors.ParameterError, err:
2514 result, err_msg = FormatError(err)
2518 if func is None: # parse error
2521 if override is not None:
2522 for key, val in override.iteritems():
2523 setattr(options, key, val)
2525 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2526 stderr_logging=True)
2528 logging.info("Command line: %s", cmdline)
2531 result = func(options, args)
2532 except (errors.GenericError, luxi.ProtocolError,
2533 JobSubmittedException), err:
2534 result, err_msg = FormatError(err)
2535 logging.exception("Error during command processing")
2537 except KeyboardInterrupt:
2538 result = constants.EXIT_FAILURE
2539 ToStderr("Aborted. Note that if the operation created any jobs, they"
2540 " might have been submitted and"
2541 " will continue to run in the background.")
2542 except IOError, err:
2543 if err.errno == errno.EPIPE:
2544 # our terminal went away, we'll exit
2545 sys.exit(constants.EXIT_FAILURE)
2552 def ParseNicOption(optvalue):
2553 """Parses the value of the --net option(s).
2557 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2558 except (TypeError, ValueError), err:
2559 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2562 nics = [{}] * nic_max
2563 for nidx, ndict in optvalue:
2566 if not isinstance(ndict, dict):
2567 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2568 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2570 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2577 def GenericInstanceCreate(mode, opts, args):
2578 """Add an instance to the cluster via either creation or import.
2580 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2581 @param opts: the command line options selected by the user
2583 @param args: should contain only one element, the new instance name
2585 @return: the desired exit code
2590 (pnode, snode) = SplitNodeOption(opts.node)
2595 hypervisor, hvparams = opts.hypervisor
2598 nics = ParseNicOption(opts.nics)
2602 elif mode == constants.INSTANCE_CREATE:
2603 # default of one nic, all auto
2609 if opts.disk_template == constants.DT_DISKLESS:
2610 if opts.disks or opts.sd_size is not None:
2611 raise errors.OpPrereqError("Diskless instance but disk"
2612 " information passed", errors.ECODE_INVAL)
2615 if (not opts.disks and not opts.sd_size
2616 and mode == constants.INSTANCE_CREATE):
2617 raise errors.OpPrereqError("No disk information specified",
2619 if opts.disks and opts.sd_size is not None:
2620 raise errors.OpPrereqError("Please use either the '--disk' or"
2621 " '-s' option", errors.ECODE_INVAL)
2622 if opts.sd_size is not None:
2623 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2627 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2628 except ValueError, err:
2629 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2631 disks = [{}] * disk_max
2634 for didx, ddict in opts.disks:
2636 if not isinstance(ddict, dict):
2637 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2638 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2639 elif constants.IDISK_SIZE in ddict:
2640 if constants.IDISK_ADOPT in ddict:
2641 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2642 " (disk %d)" % didx, errors.ECODE_INVAL)
2644 ddict[constants.IDISK_SIZE] = \
2645 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2646 except ValueError, err:
2647 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2648 (didx, err), errors.ECODE_INVAL)
2649 elif constants.IDISK_ADOPT in ddict:
2650 if mode == constants.INSTANCE_IMPORT:
2651 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2652 " import", errors.ECODE_INVAL)
2653 ddict[constants.IDISK_SIZE] = 0
2655 raise errors.OpPrereqError("Missing size or adoption source for"
2656 " disk %d" % didx, errors.ECODE_INVAL)
2659 if opts.tags is not None:
2660 tags = opts.tags.split(",")
2664 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2665 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2667 if mode == constants.INSTANCE_CREATE:
2670 force_variant = opts.force_variant
2673 no_install = opts.no_install
2674 identify_defaults = False
2675 elif mode == constants.INSTANCE_IMPORT:
2678 force_variant = False
2679 src_node = opts.src_node
2680 src_path = opts.src_dir
2682 identify_defaults = opts.identify_defaults
2684 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2686 op = opcodes.OpInstanceCreate(instance_name=instance,
2688 disk_template=opts.disk_template,
2690 conflicts_check=opts.conflicts_check,
2691 pnode=pnode, snode=snode,
2692 ip_check=opts.ip_check,
2693 name_check=opts.name_check,
2694 wait_for_sync=opts.wait_for_sync,
2695 file_storage_dir=opts.file_storage_dir,
2696 file_driver=opts.file_driver,
2697 iallocator=opts.iallocator,
2698 hypervisor=hypervisor,
2700 beparams=opts.beparams,
2701 osparams=opts.osparams,
2705 force_variant=force_variant,
2709 no_install=no_install,
2710 identify_defaults=identify_defaults,
2711 ignore_ipolicy=opts.ignore_ipolicy)
2713 SubmitOrSend(op, opts)
2717 class _RunWhileClusterStoppedHelper:
2718 """Helper class for L{RunWhileClusterStopped} to simplify state management
2721 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2722 """Initializes this class.
2724 @type feedback_fn: callable
2725 @param feedback_fn: Feedback function
2726 @type cluster_name: string
2727 @param cluster_name: Cluster name
2728 @type master_node: string
2729 @param master_node Master node name
2730 @type online_nodes: list
2731 @param online_nodes: List of names of online nodes
2734 self.feedback_fn = feedback_fn
2735 self.cluster_name = cluster_name
2736 self.master_node = master_node
2737 self.online_nodes = online_nodes
2739 self.ssh = ssh.SshRunner(self.cluster_name)
2741 self.nonmaster_nodes = [name for name in online_nodes
2742 if name != master_node]
2744 assert self.master_node not in self.nonmaster_nodes
2746 def _RunCmd(self, node_name, cmd):
2747 """Runs a command on the local or a remote machine.
2749 @type node_name: string
2750 @param node_name: Machine name
2755 if node_name is None or node_name == self.master_node:
2756 # No need to use SSH
2757 result = utils.RunCmd(cmd)
2759 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2760 utils.ShellQuoteArgs(cmd))
2763 errmsg = ["Failed to run command %s" % result.cmd]
2765 errmsg.append("on node %s" % node_name)
2766 errmsg.append(": exitcode %s and error %s" %
2767 (result.exit_code, result.output))
2768 raise errors.OpExecError(" ".join(errmsg))
2770 def Call(self, fn, *args):
2771 """Call function while all daemons are stopped.
2774 @param fn: Function to be called
2777 # Pause watcher by acquiring an exclusive lock on watcher state file
2778 self.feedback_fn("Blocking watcher")
2779 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2781 # TODO: Currently, this just blocks. There's no timeout.
2782 # TODO: Should it be a shared lock?
2783 watcher_block.Exclusive(blocking=True)
2785 # Stop master daemons, so that no new jobs can come in and all running
2787 self.feedback_fn("Stopping master daemons")
2788 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2790 # Stop daemons on all nodes
2791 for node_name in self.online_nodes:
2792 self.feedback_fn("Stopping daemons on %s" % node_name)
2793 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2795 # All daemons are shut down now
2797 return fn(self, *args)
2798 except Exception, err:
2799 _, errmsg = FormatError(err)
2800 logging.exception("Caught exception")
2801 self.feedback_fn(errmsg)
2804 # Start cluster again, master node last
2805 for node_name in self.nonmaster_nodes + [self.master_node]:
2806 self.feedback_fn("Starting daemons on %s" % node_name)
2807 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2810 watcher_block.Close()
2813 def RunWhileClusterStopped(feedback_fn, fn, *args):
2814 """Calls a function while all cluster daemons are stopped.
2816 @type feedback_fn: callable
2817 @param feedback_fn: Feedback function
2819 @param fn: Function to be called when daemons are stopped
2822 feedback_fn("Gathering cluster information")
2824 # This ensures we're running on the master daemon
2827 (cluster_name, master_node) = \
2828 cl.QueryConfigValues(["cluster_name", "master_node"])
2830 online_nodes = GetOnlineNodes([], cl=cl)
2832 # Don't keep a reference to the client. The master daemon will go away.
2835 assert master_node in online_nodes
2837 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2838 online_nodes).Call(fn, *args)
2841 def GenerateTable(headers, fields, separator, data,
2842 numfields=None, unitfields=None,
2844 """Prints a table with headers and different fields.
2847 @param headers: dictionary mapping field names to headers for
2850 @param fields: the field names corresponding to each row in
2852 @param separator: the separator to be used; if this is None,
2853 the default 'smart' algorithm is used which computes optimal
2854 field width, otherwise just the separator is used between
2857 @param data: a list of lists, each sublist being one row to be output
2858 @type numfields: list
2859 @param numfields: a list with the fields that hold numeric
2860 values and thus should be right-aligned
2861 @type unitfields: list
2862 @param unitfields: a list with the fields that hold numeric
2863 values that should be formatted with the units field
2864 @type units: string or None
2865 @param units: the units we should use for formatting, or None for
2866 automatic choice (human-readable for non-separator usage, otherwise
2867 megabytes); this is a one-letter string
2876 if numfields is None:
2878 if unitfields is None:
2881 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2882 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2885 for field in fields:
2886 if headers and field not in headers:
2887 # TODO: handle better unknown fields (either revert to old
2888 # style of raising exception, or deal more intelligently with
2890 headers[field] = field
2891 if separator is not None:
2892 format_fields.append("%s")
2893 elif numfields.Matches(field):
2894 format_fields.append("%*s")
2896 format_fields.append("%-*s")
2898 if separator is None:
2899 mlens = [0 for name in fields]
2900 format_str = " ".join(format_fields)
2902 format_str = separator.replace("%", "%%").join(format_fields)
2907 for idx, val in enumerate(row):
2908 if unitfields.Matches(fields[idx]):
2911 except (TypeError, ValueError):
2914 val = row[idx] = utils.FormatUnit(val, units)
2915 val = row[idx] = str(val)
2916 if separator is None:
2917 mlens[idx] = max(mlens[idx], len(val))
2922 for idx, name in enumerate(fields):
2924 if separator is None:
2925 mlens[idx] = max(mlens[idx], len(hdr))
2926 args.append(mlens[idx])
2928 result.append(format_str % tuple(args))
2930 if separator is None:
2931 assert len(mlens) == len(fields)
2933 if fields and not numfields.Matches(fields[-1]):
2939 line = ["-" for _ in fields]
2940 for idx in range(len(fields)):
2941 if separator is None:
2942 args.append(mlens[idx])
2943 args.append(line[idx])
2944 result.append(format_str % tuple(args))
2949 def _FormatBool(value):
2950 """Formats a boolean value as a string.
2958 #: Default formatting for query results; (callback, align right)
2959 _DEFAULT_FORMAT_QUERY = {
2960 constants.QFT_TEXT: (str, False),
2961 constants.QFT_BOOL: (_FormatBool, False),
2962 constants.QFT_NUMBER: (str, True),
2963 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2964 constants.QFT_OTHER: (str, False),
2965 constants.QFT_UNKNOWN: (str, False),
2969 def _GetColumnFormatter(fdef, override, unit):
2970 """Returns formatting function for a field.
2972 @type fdef: L{objects.QueryFieldDefinition}
2973 @type override: dict
2974 @param override: Dictionary for overriding field formatting functions,
2975 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2977 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2978 @rtype: tuple; (callable, bool)
2979 @return: Returns the function to format a value (takes one parameter) and a
2980 boolean for aligning the value on the right-hand side
2983 fmt = override.get(fdef.name, None)
2987 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2989 if fdef.kind == constants.QFT_UNIT:
2990 # Can't keep this information in the static dictionary
2991 return (lambda value: utils.FormatUnit(value, unit), True)
2993 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2997 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3000 class _QueryColumnFormatter:
3001 """Callable class for formatting fields of a query.
3004 def __init__(self, fn, status_fn, verbose):
3005 """Initializes this class.
3008 @param fn: Formatting function
3009 @type status_fn: callable
3010 @param status_fn: Function to report fields' status
3011 @type verbose: boolean
3012 @param verbose: whether to use verbose field descriptions or not
3016 self._status_fn = status_fn
3017 self._verbose = verbose
3019 def __call__(self, data):
3020 """Returns a field's string representation.
3023 (status, value) = data
3026 self._status_fn(status)
3028 if status == constants.RS_NORMAL:
3029 return self._fn(value)
3031 assert value is None, \
3032 "Found value %r for abnormal status %s" % (value, status)
3034 return FormatResultError(status, self._verbose)
3037 def FormatResultError(status, verbose):
3038 """Formats result status other than L{constants.RS_NORMAL}.
3040 @param status: The result status
3041 @type verbose: boolean
3042 @param verbose: Whether to return the verbose text
3043 @return: Text of result status
3046 assert status != constants.RS_NORMAL, \
3047 "FormatResultError called with status equal to constants.RS_NORMAL"
3049 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3051 raise NotImplementedError("Unknown status %s" % status)
3058 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3059 header=False, verbose=False):
3060 """Formats data in L{objects.QueryResponse}.
3062 @type result: L{objects.QueryResponse}
3063 @param result: result of query operation
3065 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3066 see L{utils.text.FormatUnit}
3067 @type format_override: dict
3068 @param format_override: Dictionary for overriding field formatting functions,
3069 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3070 @type separator: string or None
3071 @param separator: String used to separate fields
3073 @param header: Whether to output header row
3074 @type verbose: boolean
3075 @param verbose: whether to use verbose field descriptions or not
3084 if format_override is None:
3085 format_override = {}
3087 stats = dict.fromkeys(constants.RS_ALL, 0)
3089 def _RecordStatus(status):
3094 for fdef in result.fields:
3095 assert fdef.title and fdef.name
3096 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3097 columns.append(TableColumn(fdef.title,
3098 _QueryColumnFormatter(fn, _RecordStatus,
3102 table = FormatTable(result.data, columns, header, separator)
3104 # Collect statistics
3105 assert len(stats) == len(constants.RS_ALL)
3106 assert compat.all(count >= 0 for count in stats.values())
3108 # Determine overall status. If there was no data, unknown fields must be
3109 # detected via the field definitions.
3110 if (stats[constants.RS_UNKNOWN] or
3111 (not result.data and _GetUnknownFields(result.fields))):
3113 elif compat.any(count > 0 for key, count in stats.items()
3114 if key != constants.RS_NORMAL):
3115 status = QR_INCOMPLETE
3119 return (status, table)
3122 def _GetUnknownFields(fdefs):
3123 """Returns list of unknown fields included in C{fdefs}.
3125 @type fdefs: list of L{objects.QueryFieldDefinition}
3128 return [fdef for fdef in fdefs
3129 if fdef.kind == constants.QFT_UNKNOWN]
3132 def _WarnUnknownFields(fdefs):
3133 """Prints a warning to stderr if a query included unknown fields.
3135 @type fdefs: list of L{objects.QueryFieldDefinition}
3138 unknown = _GetUnknownFields(fdefs)
3140 ToStderr("Warning: Queried for unknown fields %s",
3141 utils.CommaJoin(fdef.name for fdef in unknown))
3147 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3148 format_override=None, verbose=False, force_filter=False,
3149 namefield=None, qfilter=None, isnumeric=False):
3150 """Generic implementation for listing all items of a resource.
3152 @param resource: One of L{constants.QR_VIA_LUXI}
3153 @type fields: list of strings
3154 @param fields: List of fields to query for
3155 @type names: list of strings
3156 @param names: Names of items to query for
3157 @type unit: string or None
3158 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3159 None for automatic choice (human-readable for non-separator usage,
3160 otherwise megabytes); this is a one-letter string
3161 @type separator: string or None
3162 @param separator: String used to separate fields
3164 @param header: Whether to show header row
3165 @type force_filter: bool
3166 @param force_filter: Whether to always treat names as filter
3167 @type format_override: dict
3168 @param format_override: Dictionary for overriding field formatting functions,
3169 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3170 @type verbose: boolean
3171 @param verbose: whether to use verbose field descriptions or not
3172 @type namefield: string
3173 @param namefield: Name of field to use for simple filters (see
3174 L{qlang.MakeFilter} for details)
3175 @type qfilter: list or None
3176 @param qfilter: Query filter (in addition to names)
3177 @param isnumeric: bool
3178 @param isnumeric: Whether the namefield's type is numeric, and therefore
3179 any simple filters built by namefield should use integer values to
3186 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3187 isnumeric=isnumeric)
3190 qfilter = namefilter
3191 elif namefilter is not None:
3192 qfilter = [qlang.OP_AND, namefilter, qfilter]
3197 response = cl.Query(resource, fields, qfilter)
3199 found_unknown = _WarnUnknownFields(response.fields)
3201 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3203 format_override=format_override,
3209 assert ((found_unknown and status == QR_UNKNOWN) or
3210 (not found_unknown and status != QR_UNKNOWN))
3212 if status == QR_UNKNOWN:
3213 return constants.EXIT_UNKNOWN_FIELD
3215 # TODO: Should the list command fail if not all data could be collected?
3216 return constants.EXIT_SUCCESS
3219 def _FieldDescValues(fdef):
3220 """Helper function for L{GenericListFields} to get query field description.
3222 @type fdef: L{objects.QueryFieldDefinition}
3228 _QFT_NAMES.get(fdef.kind, fdef.kind),
3234 def GenericListFields(resource, fields, separator, header, cl=None):
3235 """Generic implementation for listing fields for a resource.
3237 @param resource: One of L{constants.QR_VIA_LUXI}
3238 @type fields: list of strings
3239 @param fields: List of fields to query for
3240 @type separator: string or None
3241 @param separator: String used to separate fields
3243 @param header: Whether to show header row
3252 response = cl.QueryFields(resource, fields)
3254 found_unknown = _WarnUnknownFields(response.fields)
3257 TableColumn("Name", str, False),
3258 TableColumn("Type", str, False),
3259 TableColumn("Title", str, False),
3260 TableColumn("Description", str, False),
3263 rows = map(_FieldDescValues, response.fields)
3265 for line in FormatTable(rows, columns, header, separator):
3269 return constants.EXIT_UNKNOWN_FIELD
3271 return constants.EXIT_SUCCESS
3275 """Describes a column for L{FormatTable}.
3278 def __init__(self, title, fn, align_right):
3279 """Initializes this class.
3282 @param title: Column title
3284 @param fn: Formatting function
3285 @type align_right: bool
3286 @param align_right: Whether to align values on the right-hand side
3291 self.align_right = align_right
3294 def _GetColFormatString(width, align_right):
3295 """Returns the format string for a field.
3303 return "%%%s%ss" % (sign, width)
3306 def FormatTable(rows, columns, header, separator):
3307 """Formats data as a table.
3309 @type rows: list of lists
3310 @param rows: Row data, one list per row
3311 @type columns: list of L{TableColumn}
3312 @param columns: Column descriptions
3314 @param header: Whether to show header row
3315 @type separator: string or None
3316 @param separator: String used to separate columns
3320 data = [[col.title for col in columns]]
3321 colwidth = [len(col.title) for col in columns]
3324 colwidth = [0 for _ in columns]
3328 assert len(row) == len(columns)
3330 formatted = [col.format(value) for value, col in zip(row, columns)]
3332 if separator is None:
3333 # Update column widths
3334 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3335 # Modifying a list's items while iterating is fine
3336 colwidth[idx] = max(oldwidth, len(value))
3338 data.append(formatted)
3340 if separator is not None:
3341 # Return early if a separator is used
3342 return [separator.join(row) for row in data]
3344 if columns and not columns[-1].align_right:
3345 # Avoid unnecessary spaces at end of line
3348 # Build format string
3349 fmt = " ".join([_GetColFormatString(width, col.align_right)
3350 for col, width in zip(columns, colwidth)])
3352 return [fmt % tuple(row) for row in data]
3355 def FormatTimestamp(ts):
3356 """Formats a given timestamp.
3359 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3362 @return: a string with the formatted timestamp
3365 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3369 return utils.FormatTime(sec, usecs=usecs)
3372 def ParseTimespec(value):
3373 """Parse a time specification.
3375 The following suffixed will be recognized:
3383 Without any suffix, the value will be taken to be in seconds.
3388 raise errors.OpPrereqError("Empty time specification passed",
3397 if value[-1] not in suffix_map:
3400 except (TypeError, ValueError):
3401 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3404 multiplier = suffix_map[value[-1]]
3406 if not value: # no data left after stripping the suffix
3407 raise errors.OpPrereqError("Invalid time specification (only"
3408 " suffix passed)", errors.ECODE_INVAL)
3410 value = int(value) * multiplier
3411 except (TypeError, ValueError):
3412 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3417 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3418 filter_master=False, nodegroup=None):
3419 """Returns the names of online nodes.
3421 This function will also log a warning on stderr with the names of
3424 @param nodes: if not empty, use only this subset of nodes (minus the
3426 @param cl: if not None, luxi client to use
3427 @type nowarn: boolean
3428 @param nowarn: by default, this function will output a note with the
3429 offline nodes that are skipped; if this parameter is True the
3430 note is not displayed
3431 @type secondary_ips: boolean
3432 @param secondary_ips: if True, return the secondary IPs instead of the
3433 names, useful for doing network traffic over the replication interface
3435 @type filter_master: boolean
3436 @param filter_master: if True, do not return the master node in the list
3437 (useful in coordination with secondary_ips where we cannot check our
3438 node name against the list)
3439 @type nodegroup: string
3440 @param nodegroup: If set, only return nodes in this node group
3449 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3451 if nodegroup is not None:
3452 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3453 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3456 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3459 if len(qfilter) > 1:
3460 final_filter = [qlang.OP_AND] + qfilter
3462 assert len(qfilter) == 1
3463 final_filter = qfilter[0]
3467 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3469 def _IsOffline(row):
3470 (_, (_, offline), _) = row
3474 ((_, name), _, _) = row
3478 (_, _, (_, sip)) = row
3481 (offline, online) = compat.partition(result.data, _IsOffline)
3483 if offline and not nowarn:
3484 ToStderr("Note: skipping offline node(s): %s" %
3485 utils.CommaJoin(map(_GetName, offline)))
3492 return map(fn, online)
3495 def _ToStream(stream, txt, *args):
3496 """Write a message to a stream, bypassing the logging system
3498 @type stream: file object
3499 @param stream: the file to which we should write
3501 @param txt: the message
3507 stream.write(txt % args)
3512 except IOError, err:
3513 if err.errno == errno.EPIPE:
3514 # our terminal went away, we'll exit
3515 sys.exit(constants.EXIT_FAILURE)
3520 def ToStdout(txt, *args):
3521 """Write a message to stdout only, bypassing the logging system
3523 This is just a wrapper over _ToStream.
3526 @param txt: the message
3529 _ToStream(sys.stdout, txt, *args)
3532 def ToStderr(txt, *args):
3533 """Write a message to stderr only, bypassing the logging system
3535 This is just a wrapper over _ToStream.
3538 @param txt: the message
3541 _ToStream(sys.stderr, txt, *args)
3544 class JobExecutor(object):
3545 """Class which manages the submission and execution of multiple jobs.
3547 Note that instances of this class should not be reused between
3551 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3556 self.verbose = verbose
3559 self.feedback_fn = feedback_fn
3560 self._counter = itertools.count()
3563 def _IfName(name, fmt):
3564 """Helper function for formatting name.
3572 def QueueJob(self, name, *ops):
3573 """Record a job for later submit.
3576 @param name: a description of the job, will be used in WaitJobSet
3579 SetGenericOpcodeOpts(ops, self.opts)
3580 self.queue.append((self._counter.next(), name, ops))
3582 def AddJobId(self, name, status, job_id):
3583 """Adds a job ID to the internal queue.
3586 self.jobs.append((self._counter.next(), status, job_id, name))
3588 def SubmitPending(self, each=False):
3589 """Submit all pending jobs.
3594 for (_, _, ops) in self.queue:
3595 # SubmitJob will remove the success status, but raise an exception if
3596 # the submission fails, so we'll notice that anyway.
3597 results.append([True, self.cl.SubmitJob(ops)[0]])
3599 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3600 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3601 self.jobs.append((idx, status, data, name))
3603 def _ChooseJob(self):
3604 """Choose a non-waiting/queued job to poll next.
3607 assert self.jobs, "_ChooseJob called with empty job list"
3609 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3613 for job_data, status in zip(self.jobs, result):
3614 if (isinstance(status, list) and status and
3615 status[0] in (constants.JOB_STATUS_QUEUED,
3616 constants.JOB_STATUS_WAITING,
3617 constants.JOB_STATUS_CANCELING)):
3618 # job is still present and waiting
3620 # good candidate found (either running job or lost job)
3621 self.jobs.remove(job_data)
3625 return self.jobs.pop(0)
3627 def GetResults(self):
3628 """Wait for and return the results of all jobs.
3631 @return: list of tuples (success, job results), in the same order
3632 as the submitted jobs; if a job has failed, instead of the result
3633 there will be the error message
3637 self.SubmitPending()
3640 ok_jobs = [row[2] for row in self.jobs if row[1]]
3642 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3644 # first, remove any non-submitted jobs
3645 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3646 for idx, _, jid, name in failures:
3647 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3648 results.append((idx, False, jid))
3651 (idx, _, jid, name) = self._ChooseJob()
3652 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3654 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3656 except errors.JobLost, err:
3657 _, job_result = FormatError(err)
3658 ToStderr("Job %s%s has been archived, cannot check its result",
3659 jid, self._IfName(name, " for %s"))
3661 except (errors.GenericError, luxi.ProtocolError), err:
3662 _, job_result = FormatError(err)
3664 # the error message will always be shown, verbose or not
3665 ToStderr("Job %s%s has failed: %s",
3666 jid, self._IfName(name, " for %s"), job_result)
3668 results.append((idx, success, job_result))
3670 # sort based on the index, then drop it
3672 results = [i[1:] for i in results]
3676 def WaitOrShow(self, wait):
3677 """Wait for job results or only print the job IDs.
3680 @param wait: whether to wait or not
3684 return self.GetResults()
3687 self.SubmitPending()
3688 for _, status, result, name in self.jobs:
3690 ToStdout("%s: %s", result, name)
3692 ToStderr("Failure for %s: %s", name, result)
3693 return [row[1:3] for row in self.jobs]
3696 def FormatParamsDictInfo(param_dict, actual):
3697 """Formats a parameter dictionary.
3699 @type param_dict: dict
3700 @param param_dict: the own parameters
3702 @param actual: the current parameter set (including defaults)
3704 @return: dictionary where the value of each parameter is either a fully
3705 formatted string or a dictionary containing formatted strings
3709 for (key, data) in actual.items():
3710 if isinstance(data, dict) and data:
3711 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3713 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3717 def _FormatListInfoDefault(data, def_data):
3718 if data is not None:
3719 ret = utils.CommaJoin(data)
3721 ret = "default (%s)" % utils.CommaJoin(def_data)
3725 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3726 """Formats an instance policy.
3728 @type custom_ipolicy: dict
3729 @param custom_ipolicy: own policy
3730 @type eff_ipolicy: dict
3731 @param eff_ipolicy: effective policy (including defaults); ignored for
3733 @type iscluster: bool
3734 @param iscluster: the policy is at cluster level
3735 @rtype: list of pairs
3736 @return: formatted data, suitable for L{PrintGenericInfo}
3740 eff_ipolicy = custom_ipolicy
3743 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3745 for (k, minmax) in enumerate(custom_minmax):
3747 ("%s/%s" % (key, k),
3748 FormatParamsDictInfo(minmax[key], minmax[key]))
3749 for key in constants.ISPECS_MINMAX_KEYS
3752 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3754 ("%s/%s" % (key, k),
3755 FormatParamsDictInfo({}, minmax[key]))
3756 for key in constants.ISPECS_MINMAX_KEYS
3758 ret = [("bounds specs", minmax_out)]
3761 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3763 (constants.ISPECS_STD,
3764 FormatParamsDictInfo(stdspecs, stdspecs))
3768 ("allowed disk templates",
3769 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3770 eff_ipolicy[constants.IPOLICY_DTS]))
3773 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3774 for key in constants.IPOLICY_PARAMETERS
3779 def _PrintSpecsParameters(buf, specs):
3780 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3781 buf.write(",".join(values))
3784 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3785 """Print the command option used to generate the given instance policy.
3787 Currently only the parts dealing with specs are supported.
3790 @param buf: stream to write into
3792 @param ipolicy: instance policy
3794 @param isgroup: whether the policy is at group level
3798 stdspecs = ipolicy.get("std")
3800 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3801 _PrintSpecsParameters(buf, stdspecs)
3802 minmaxes = ipolicy.get("minmax", [])
3804 for minmax in minmaxes:
3805 minspecs = minmax.get("min")
3806 maxspecs = minmax.get("max")
3807 if minspecs and maxspecs:
3809 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3814 _PrintSpecsParameters(buf, minspecs)
3816 _PrintSpecsParameters(buf, maxspecs)
3819 def ConfirmOperation(names, list_type, text, extra=""):
3820 """Ask the user to confirm an operation on a list of list_type.
3822 This function is used to request confirmation for doing an operation
3823 on a given list of list_type.
3826 @param names: the list of names that we display when
3827 we ask for confirmation
3828 @type list_type: str
3829 @param list_type: Human readable name for elements in the list (e.g. nodes)
3831 @param text: the operation that the user should confirm
3833 @return: True or False depending on user's confirmation.
3837 msg = ("The %s will operate on %d %s.\n%s"
3838 "Do you want to continue?" % (text, count, list_type, extra))
3839 affected = (("\nAffected %s:\n" % list_type) +
3840 "\n".join([" %s" % name for name in names]))
3842 choices = [("y", True, "Yes, execute the %s" % text),
3843 ("n", False, "No, abort the %s" % text)]
3846 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3849 question = msg + affected
3851 choice = AskUser(question, choices)
3854 choice = AskUser(msg + affected, choices)
3858 def _MaybeParseUnit(elements):
3859 """Parses and returns an array of potential values with units.
3863 for k, v in elements.items():
3864 if v == constants.VALUE_DEFAULT:
3867 parsed[k] = utils.ParseUnit(v)
3871 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3872 ispecs_disk_count, ispecs_disk_size,
3873 ispecs_nic_count, group_ipolicy, fill_all):
3876 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3877 if ispecs_disk_size:
3878 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3879 except (TypeError, ValueError, errors.UnitParseError), err:
3880 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3882 (ispecs_disk_size, ispecs_mem_size, err),
3885 # prepare ipolicy dict
3886 ispecs_transposed = {
3887 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3888 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3889 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3890 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3891 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3894 # first, check that the values given are correct
3896 forced_type = TISPECS_GROUP_TYPES
3898 forced_type = TISPECS_CLUSTER_TYPES
3899 for specs in ispecs_transposed.values():
3900 assert type(specs) is dict
3901 utils.ForceDictType(specs, forced_type)
3905 constants.ISPECS_MIN: {},
3906 constants.ISPECS_MAX: {},
3907 constants.ISPECS_STD: {},
3909 for (name, specs) in ispecs_transposed.iteritems():
3910 assert name in constants.ISPECS_PARAMETERS
3911 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3912 assert key in ispecs
3913 ispecs[key][name] = val
3915 for key in constants.ISPECS_MINMAX_KEYS:
3918 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3920 minmax_out[key] = ispecs[key]
3921 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3923 ipolicy[constants.ISPECS_STD] = \
3924 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3925 ispecs[constants.ISPECS_STD])
3927 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3930 def _ParseSpecUnit(spec, keyname):
3932 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3935 ret[k] = utils.ParseUnit(ret[k])
3936 except (TypeError, ValueError, errors.UnitParseError), err:
3937 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3938 " specs: %s" % (k, ret[k], keyname, err)),
3943 def _ParseISpec(spec, keyname, required):
3944 ret = _ParseSpecUnit(spec, keyname)
3945 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3946 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3947 if required and missing:
3948 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3949 (keyname, utils.CommaJoin(missing)),
3954 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3956 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3957 len(minmax_ispecs[0]) == 1):
3958 for (key, spec) in minmax_ispecs[0].items():
3959 # This loop is executed exactly once
3960 if key in allowed_values and not spec:
3965 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3966 group_ipolicy, allowed_values):
3967 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3968 if found_allowed is not None:
3969 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3970 elif minmax_ispecs is not None:
3972 for mmpair in minmax_ispecs:
3974 for (key, spec) in mmpair.items():
3975 if key not in constants.ISPECS_MINMAX_KEYS:
3976 msg = "Invalid key in bounds instance specifications: %s" % key
3977 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3978 mmpair_out[key] = _ParseISpec(spec, key, True)
3979 minmax_out.append(mmpair_out)
3980 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3981 if std_ispecs is not None:
3982 assert not group_ipolicy # This is not an option for gnt-group
3983 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3986 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3987 ispecs_cpu_count=None,
3988 ispecs_disk_count=None,
3989 ispecs_disk_size=None,
3990 ispecs_nic_count=None,
3993 ipolicy_disk_templates=None,
3994 ipolicy_vcpu_ratio=None,
3995 ipolicy_spindle_ratio=None,
3996 group_ipolicy=False,
3997 allowed_values=None,
3999 """Creation of instance policy based on command line options.
4001 @param fill_all: whether for cluster policies we should ensure that
4002 all values are filled
4005 assert not (fill_all and allowed_values)
4007 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4008 ispecs_disk_size or ispecs_nic_count)
4009 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4010 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4011 " together with any --ipolicy-xxx-specs option",
4014 ipolicy_out = objects.MakeEmptyIPolicy()
4017 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4018 ispecs_disk_count, ispecs_disk_size,
4019 ispecs_nic_count, group_ipolicy, fill_all)
4020 elif (minmax_ispecs is not None or std_ispecs is not None):
4021 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4022 group_ipolicy, allowed_values)
4024 if ipolicy_disk_templates is not None:
4025 if allowed_values and ipolicy_disk_templates in allowed_values:
4026 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4028 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4029 if ipolicy_vcpu_ratio is not None:
4030 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4031 if ipolicy_spindle_ratio is not None:
4032 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4034 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4036 if not group_ipolicy and fill_all:
4037 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4042 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4043 """Formatting core of L{PrintGenericInfo}.
4045 @param buf: (string) stream to accumulate the result into
4046 @param data: data to format
4048 @param level: depth in the data hierarchy, used for indenting
4049 @type afterkey: bool
4050 @param afterkey: True when we are in the middle of a line after a key (used
4051 to properly add newlines or indentation)
4055 if isinstance(data, dict):
4064 for key in sorted(data):
4066 buf.write(baseind * level)
4071 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4072 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4073 # list of tuples (an ordered dictionary)
4079 for (key, val) in data:
4081 buf.write(baseind * level)
4086 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4087 elif isinstance(data, list):
4098 buf.write(baseind * level)
4102 buf.write(baseind[1:])
4103 _SerializeGenericInfo(buf, item, level + 1)
4105 # This branch should be only taken for strings, but it's practically
4106 # impossible to guarantee that no other types are produced somewhere
4107 buf.write(str(data))
4111 def PrintGenericInfo(data):
4112 """Print information formatted according to the hierarchy.
4114 The output is a valid YAML string.
4116 @param data: the data to print. It's a hierarchical structure whose elements
4118 - dictionaries, where keys are strings and values are of any of the
4120 - lists of pairs (key, value), where key is a string and value is of
4121 any of the types listed here; it's a way to encode ordered
4123 - lists of any of the types listed here
4128 _SerializeGenericInfo(buf, data, 0)
4129 ToStdout(buf.getvalue().rstrip("\n"))