4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
111 "INCLUDEDEFAULTS_OPT",
114 "MAINTAIN_NODE_HEALTH_OPT",
116 "MASTER_NETMASK_OPT",
118 "MIGRATION_MODE_OPT",
122 "NEW_CLUSTER_CERT_OPT",
123 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124 "NEW_CONFD_HMAC_KEY_OPT",
128 "NEW_SPICE_CERT_OPT",
130 "NOCONFLICTSCHECK_OPT",
131 "NODE_FORCE_JOIN_OPT",
133 "NODE_PLACEMENT_OPT",
137 "NODRBD_STORAGE_OPT",
143 "NOMODIFY_ETCHOSTS_OPT",
144 "NOMODIFY_SSH_SETUP_OPT",
148 "NORUNTIME_CHGS_OPT",
151 "NOSSH_KEYCHECK_OPT",
165 "PREALLOC_WIPE_DISKS_OPT",
166 "PRIMARY_IP_VERSION_OPT",
173 "REMOVE_INSTANCE_OPT",
174 "REMOVE_RESERVED_IPS_OPT",
180 "SECONDARY_ONLY_OPT",
185 "SHUTDOWN_TIMEOUT_OPT",
187 "SPECS_CPU_COUNT_OPT",
188 "SPECS_DISK_COUNT_OPT",
189 "SPECS_DISK_SIZE_OPT",
190 "SPECS_MEM_SIZE_OPT",
191 "SPECS_NIC_COUNT_OPT",
193 "IPOLICY_STD_SPECS_OPT",
194 "IPOLICY_DISK_TEMPLATES",
195 "IPOLICY_VCPU_RATIO",
201 "STARTUP_PAUSED_OPT",
210 "USE_EXTERNAL_MIP_SCRIPT",
218 "IGNORE_IPOLICY_OPT",
219 "INSTANCE_POLICY_OPTS",
220 # Generic functions for CLI programs
222 "CreateIPolicyFromOpts",
224 "GenericInstanceCreate",
230 "JobSubmittedException",
232 "RunWhileClusterStopped",
236 # Formatting functions
237 "ToStderr", "ToStdout",
240 "FormatParamsDictInfo",
242 "PrintIPolicyCommand",
252 # command line options support infrastructure
253 "ARGS_MANY_INSTANCES",
256 "ARGS_MANY_NETWORKS",
276 "OPT_COMPL_INST_ADD_NODES",
277 "OPT_COMPL_MANY_NODES",
278 "OPT_COMPL_ONE_IALLOCATOR",
279 "OPT_COMPL_ONE_INSTANCE",
280 "OPT_COMPL_ONE_NODE",
281 "OPT_COMPL_ONE_NODEGROUP",
282 "OPT_COMPL_ONE_NETWORK",
284 "OPT_COMPL_ONE_EXTSTORAGE",
289 "COMMON_CREATE_OPTS",
295 #: Priorities (sorted)
297 ("low", constants.OP_PRIO_LOW),
298 ("normal", constants.OP_PRIO_NORMAL),
299 ("high", constants.OP_PRIO_HIGH),
302 #: Priority dictionary for easier lookup
303 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
304 # we migrate to Python 2.6
305 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
307 # Query result status for clients
310 QR_INCOMPLETE) = range(3)
312 #: Maximum batch size for ChooseJob
316 # constants used to create InstancePolicy dictionary
317 TISPECS_GROUP_TYPES = {
318 constants.ISPECS_MIN: constants.VTYPE_INT,
319 constants.ISPECS_MAX: constants.VTYPE_INT,
322 TISPECS_CLUSTER_TYPES = {
323 constants.ISPECS_MIN: constants.VTYPE_INT,
324 constants.ISPECS_MAX: constants.VTYPE_INT,
325 constants.ISPECS_STD: constants.VTYPE_INT,
328 #: User-friendly names for query2 field types
330 constants.QFT_UNKNOWN: "Unknown",
331 constants.QFT_TEXT: "Text",
332 constants.QFT_BOOL: "Boolean",
333 constants.QFT_NUMBER: "Number",
334 constants.QFT_UNIT: "Storage size",
335 constants.QFT_TIMESTAMP: "Timestamp",
336 constants.QFT_OTHER: "Custom",
341 def __init__(self, min=0, max=None): # pylint: disable=W0622
346 return ("<%s min=%s max=%s>" %
347 (self.__class__.__name__, self.min, self.max))
350 class ArgSuggest(_Argument):
351 """Suggesting argument.
353 Value can be any of the ones passed to the constructor.
356 # pylint: disable=W0622
357 def __init__(self, min=0, max=None, choices=None):
358 _Argument.__init__(self, min=min, max=max)
359 self.choices = choices
362 return ("<%s min=%s max=%s choices=%r>" %
363 (self.__class__.__name__, self.min, self.max, self.choices))
366 class ArgChoice(ArgSuggest):
369 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
370 but value must be one of the choices.
375 class ArgUnknown(_Argument):
376 """Unknown argument to program (e.g. determined at runtime).
381 class ArgInstance(_Argument):
382 """Instances argument.
387 class ArgNode(_Argument):
393 class ArgNetwork(_Argument):
399 class ArgGroup(_Argument):
400 """Node group argument.
405 class ArgJobId(_Argument):
411 class ArgFile(_Argument):
412 """File path argument.
417 class ArgCommand(_Argument):
423 class ArgHost(_Argument):
429 class ArgOs(_Argument):
435 class ArgExtStorage(_Argument):
436 """ExtStorage argument.
442 ARGS_MANY_INSTANCES = [ArgInstance()]
443 ARGS_MANY_NETWORKS = [ArgNetwork()]
444 ARGS_MANY_NODES = [ArgNode()]
445 ARGS_MANY_GROUPS = [ArgGroup()]
446 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
447 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
448 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
450 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
451 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
454 def _ExtractTagsObject(opts, args):
455 """Extract the tag type object.
457 Note that this function will modify its args parameter.
460 if not hasattr(opts, "tag_type"):
461 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
463 if kind == constants.TAG_CLUSTER:
465 elif kind in (constants.TAG_NODEGROUP,
467 constants.TAG_NETWORK,
468 constants.TAG_INSTANCE):
470 raise errors.OpPrereqError("no arguments passed to the command",
475 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479 def _ExtendTags(opts, args):
480 """Extend the args if a source file has been given.
482 This function will extend the tags with the contents of the file
483 passed in the 'tags_source' attribute of the opts parameter. A file
484 named '-' will be replaced by stdin.
487 fname = opts.tags_source
493 new_fh = open(fname, "r")
496 # we don't use the nice 'new_data = [line.strip() for line in fh]'
497 # because of python bug 1633941
499 line = new_fh.readline()
502 new_data.append(line.strip())
505 args.extend(new_data)
508 def ListTags(opts, args):
509 """List the tags on a given object.
511 This is a generic implementation that knows how to deal with all
512 three cases of tag objects (cluster, node, instance). The opts
513 argument is expected to contain a tag_type field denoting what
514 object type we work on.
517 kind, name = _ExtractTagsObject(opts, args)
518 cl = GetClient(query=True)
519 result = cl.QueryTags(kind, name)
520 result = list(result)
526 def AddTags(opts, args):
527 """Add tags on a given object.
529 This is a generic implementation that knows how to deal with all
530 three cases of tag objects (cluster, node, instance). The opts
531 argument is expected to contain a tag_type field denoting what
532 object type we work on.
535 kind, name = _ExtractTagsObject(opts, args)
536 _ExtendTags(opts, args)
538 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
539 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
540 SubmitOrSend(op, opts)
543 def RemoveTags(opts, args):
544 """Remove tags from a given object.
546 This is a generic implementation that knows how to deal with all
547 three cases of tag objects (cluster, node, instance). The opts
548 argument is expected to contain a tag_type field denoting what
549 object type we work on.
552 kind, name = _ExtractTagsObject(opts, args)
553 _ExtendTags(opts, args)
555 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
556 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
557 SubmitOrSend(op, opts)
560 def check_unit(option, opt, value): # pylint: disable=W0613
561 """OptParsers custom converter for units.
565 return utils.ParseUnit(value)
566 except errors.UnitParseError, err:
567 raise OptionValueError("option %s: %s" % (opt, err))
570 def _SplitKeyVal(opt, data, parse_prefixes):
571 """Convert a KeyVal string into a dict.
573 This function will convert a key=val[,...] string into a dict. Empty
574 values will be converted specially: keys which have the prefix 'no_'
575 will have the value=False and the prefix stripped, keys with the prefix
576 "-" will have value=None and the prefix stripped, and the others will
580 @param opt: a string holding the option name for which we process the
581 data, used in building error messages
583 @param data: a string of the format key=val,key=val,...
584 @type parse_prefixes: bool
585 @param parse_prefixes: whether to handle prefixes specially
587 @return: {key=val, key=val}
588 @raises errors.ParameterError: if there are duplicate keys
593 for elem in utils.UnescapeAndSplit(data, sep=","):
595 key, val = elem.split("=", 1)
597 if elem.startswith(NO_PREFIX):
598 key, val = elem[len(NO_PREFIX):], False
599 elif elem.startswith(UN_PREFIX):
600 key, val = elem[len(UN_PREFIX):], None
602 key, val = elem, True
604 raise errors.ParameterError("Missing value for key '%s' in option %s" %
607 raise errors.ParameterError("Duplicate key '%s' in option %s" %
613 def _SplitIdentKeyVal(opt, value, parse_prefixes):
614 """Helper function to parse "ident:key=val,key=val" options.
617 @param opt: option name, used in error messages
619 @param value: expected to be in the format "ident:key=val,key=val,..."
620 @type parse_prefixes: bool
621 @param parse_prefixes: whether to handle prefixes specially (see
624 @return: (ident, {key=val, key=val})
625 @raises errors.ParameterError: in case of duplicates or other parsing errors
629 ident, rest = value, ""
631 ident, rest = value.split(":", 1)
633 if parse_prefixes and ident.startswith(NO_PREFIX):
635 msg = "Cannot pass options when removing parameter groups: %s" % value
636 raise errors.ParameterError(msg)
637 retval = (ident[len(NO_PREFIX):], False)
638 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
639 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
641 msg = "Cannot pass options when removing parameter groups: %s" % value
642 raise errors.ParameterError(msg)
643 retval = (ident[len(UN_PREFIX):], None)
645 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
646 retval = (ident, kv_dict)
650 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
651 """Custom parser for ident:key=val,key=val options.
653 This will store the parsed values as a tuple (ident, {key: val}). As such,
654 multiple uses of this option via action=append is possible.
657 return _SplitIdentKeyVal(opt, value, True)
660 def check_key_val(option, opt, value): # pylint: disable=W0613
661 """Custom parser class for key=val,key=val options.
663 This will store the parsed values as a dict {key: val}.
666 return _SplitKeyVal(opt, value, True)
669 def _SplitListKeyVal(opt, value):
671 for elem in value.split("/"):
673 raise errors.ParameterError("Empty section in option '%s'" % opt)
674 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
676 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
678 raise errors.ParameterError(msg)
679 retval[ident] = valdict
683 def check_list_ident_key_val(_, opt, value):
684 """Custom parser for "ident:key=val,key=val/ident:key=val" options.
686 @rtype: list of dictionary
687 @return: {ident: {key: val, key: val}, ident: {key: val}}
690 return _SplitListKeyVal(opt, value)
693 def check_bool(option, opt, value): # pylint: disable=W0613
694 """Custom parser for yes/no options.
696 This will store the parsed value as either True or False.
699 value = value.lower()
700 if value == constants.VALUE_FALSE or value == "no":
702 elif value == constants.VALUE_TRUE or value == "yes":
705 raise errors.ParameterError("Invalid boolean value '%s'" % value)
708 def check_list(option, opt, value): # pylint: disable=W0613
709 """Custom parser for comma-separated lists.
712 # we have to make this explicit check since "".split(",") is [""],
713 # not an empty list :(
717 return utils.UnescapeAndSplit(value)
720 def check_maybefloat(option, opt, value): # pylint: disable=W0613
721 """Custom parser for float numbers which might be also defaults.
724 value = value.lower()
726 if value == constants.VALUE_DEFAULT:
732 # completion_suggestion is normally a list. Using numeric values not evaluating
733 # to False for dynamic completion.
734 (OPT_COMPL_MANY_NODES,
736 OPT_COMPL_ONE_INSTANCE,
738 OPT_COMPL_ONE_EXTSTORAGE,
739 OPT_COMPL_ONE_IALLOCATOR,
740 OPT_COMPL_ONE_NETWORK,
741 OPT_COMPL_INST_ADD_NODES,
742 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
744 OPT_COMPL_ALL = compat.UniqueFrozenset([
745 OPT_COMPL_MANY_NODES,
747 OPT_COMPL_ONE_INSTANCE,
749 OPT_COMPL_ONE_EXTSTORAGE,
750 OPT_COMPL_ONE_IALLOCATOR,
751 OPT_COMPL_ONE_NETWORK,
752 OPT_COMPL_INST_ADD_NODES,
753 OPT_COMPL_ONE_NODEGROUP,
757 class CliOption(Option):
758 """Custom option class for optparse.
761 ATTRS = Option.ATTRS + [
762 "completion_suggest",
764 TYPES = Option.TYPES + (
773 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
774 TYPE_CHECKER["listidentkeyval"] = check_list_ident_key_val
775 TYPE_CHECKER["identkeyval"] = check_ident_key_val
776 TYPE_CHECKER["keyval"] = check_key_val
777 TYPE_CHECKER["unit"] = check_unit
778 TYPE_CHECKER["bool"] = check_bool
779 TYPE_CHECKER["list"] = check_list
780 TYPE_CHECKER["maybefloat"] = check_maybefloat
783 # optparse.py sets make_option, so we do it for our own option class, too
784 cli_option = CliOption
789 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
790 help="Increase debugging level")
792 NOHDR_OPT = cli_option("--no-headers", default=False,
793 action="store_true", dest="no_headers",
794 help="Don't display column headers")
796 SEP_OPT = cli_option("--separator", default=None,
797 action="store", dest="separator",
798 help=("Separator between output fields"
799 " (defaults to one space)"))
801 USEUNITS_OPT = cli_option("--units", default=None,
802 dest="units", choices=("h", "m", "g", "t"),
803 help="Specify units for output (one of h/m/g/t)")
805 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
806 type="string", metavar="FIELDS",
807 help="Comma separated list of output fields")
809 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
810 default=False, help="Force the operation")
812 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
813 default=False, help="Do not require confirmation")
815 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
816 action="store_true", default=False,
817 help=("Ignore offline nodes and do as much"
820 TAG_ADD_OPT = cli_option("--tags", dest="tags",
821 default=None, help="Comma-separated list of instance"
824 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
825 default=None, help="File with tag names")
827 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
828 default=False, action="store_true",
829 help=("Submit the job and return the job ID, but"
830 " don't wait for the job to finish"))
832 SYNC_OPT = cli_option("--sync", dest="do_locking",
833 default=False, action="store_true",
834 help=("Grab locks while doing the queries"
835 " in order to ensure more consistent results"))
837 DRY_RUN_OPT = cli_option("--dry-run", default=False,
839 help=("Do not execute the operation, just run the"
840 " check steps and verify if it could be"
843 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
845 help="Increase the verbosity of the operation")
847 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
848 action="store_true", dest="simulate_errors",
849 help="Debugging option that makes the operation"
850 " treat most runtime checks as failed")
852 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
853 default=True, action="store_false",
854 help="Don't wait for sync (DANGEROUS!)")
856 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
857 default=False, action="store_true",
858 help="Wait for disks to sync")
860 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
861 action="store_true", default=False,
862 help="Enable offline instance")
864 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
865 action="store_true", default=False,
866 help="Disable down instance")
868 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
869 help=("Custom disk setup (%s)" %
870 utils.CommaJoin(constants.DISK_TEMPLATES)),
871 default=None, metavar="TEMPL",
872 choices=list(constants.DISK_TEMPLATES))
874 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
875 help="Do not create any network cards for"
878 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
879 help="Relative path under default cluster-wide"
880 " file storage dir to store file-based disks",
881 default=None, metavar="<DIR>")
883 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
884 help="Driver to use for image files",
885 default="loop", metavar="<DRIVER>",
886 choices=list(constants.FILE_DRIVER))
888 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
889 help="Select nodes for the instance automatically"
890 " using the <NAME> iallocator plugin",
891 default=None, type="string",
892 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
894 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
896 help="Set the default instance"
898 default=None, type="string",
899 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
901 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
903 completion_suggest=OPT_COMPL_ONE_OS)
905 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
906 type="keyval", default={},
907 help="OS parameters")
909 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
910 action="store_true", default=False,
911 help="Force an unknown variant")
913 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
914 action="store_true", default=False,
915 help="Do not install the OS (will"
918 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
919 dest="allow_runtime_chgs",
920 default=True, action="store_false",
921 help="Don't allow runtime changes")
923 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
924 type="keyval", default={},
925 help="Backend parameters")
927 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
928 default={}, dest="hvparams",
929 help="Hypervisor parameters")
931 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
932 help="Disk template parameters, in the format"
933 " template:option=value,option=value,...",
934 type="identkeyval", action="append", default=[])
936 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
937 type="keyval", default={},
938 help="Memory size specs: list of key=value,"
939 " where key is one of min, max, std"
940 " (in MB or using a unit)")
942 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
943 type="keyval", default={},
944 help="CPU count specs: list of key=value,"
945 " where key is one of min, max, std")
947 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
948 dest="ispecs_disk_count",
949 type="keyval", default={},
950 help="Disk count specs: list of key=value,"
951 " where key is one of min, max, std")
953 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
954 type="keyval", default={},
955 help="Disk size specs: list of key=value,"
956 " where key is one of min, max, std"
957 " (in MB or using a unit)")
959 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
960 type="keyval", default={},
961 help="NIC count specs: list of key=value,"
962 " where key is one of min, max, std")
964 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
965 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
966 dest="ipolicy_bounds_specs",
967 type="listidentkeyval", default=None,
968 help="Complete instance specs limits")
970 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
971 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
972 dest="ipolicy_std_specs",
973 type="keyval", default=None,
974 help="Complte standard instance specs")
976 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
977 dest="ipolicy_disk_templates",
978 type="list", default=None,
979 help="Comma-separated list of"
980 " enabled disk templates")
982 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
983 dest="ipolicy_vcpu_ratio",
984 type="maybefloat", default=None,
985 help="The maximum allowed vcpu-to-cpu ratio")
987 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
988 dest="ipolicy_spindle_ratio",
989 type="maybefloat", default=None,
990 help=("The maximum allowed instances to"
993 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
994 help="Hypervisor and hypervisor options, in the"
995 " format hypervisor:option=value,option=value,...",
996 default=None, type="identkeyval")
998 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
999 help="Hypervisor and hypervisor options, in the"
1000 " format hypervisor:option=value,option=value,...",
1001 default=[], action="append", type="identkeyval")
1003 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1004 action="store_false",
1005 help="Don't check that the instance's IP"
1008 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1009 default=True, action="store_false",
1010 help="Don't check that the instance's name"
1013 NET_OPT = cli_option("--net",
1014 help="NIC parameters", default=[],
1015 dest="nics", action="append", type="identkeyval")
1017 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1018 dest="disks", action="append", type="identkeyval")
1020 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1021 help="Comma-separated list of disks"
1022 " indices to act on (e.g. 0,2) (optional,"
1023 " defaults to all disks)")
1025 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1026 help="Enforces a single-disk configuration using the"
1027 " given disk size, in MiB unless a suffix is used",
1028 default=None, type="unit", metavar="<size>")
1030 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1031 dest="ignore_consistency",
1032 action="store_true", default=False,
1033 help="Ignore the consistency of the disks on"
1036 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1037 dest="allow_failover",
1038 action="store_true", default=False,
1039 help="If migration is not possible fallback to"
1042 NONLIVE_OPT = cli_option("--non-live", dest="live",
1043 default=True, action="store_false",
1044 help="Do a non-live migration (this usually means"
1045 " freeze the instance, save the state, transfer and"
1046 " only then resume running on the secondary node)")
1048 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1050 choices=list(constants.HT_MIGRATION_MODES),
1051 help="Override default migration mode (choose"
1052 " either live or non-live")
1054 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1055 help="Target node and optional secondary node",
1056 metavar="<pnode>[:<snode>]",
1057 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1059 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1060 action="append", metavar="<node>",
1061 help="Use only this node (can be used multiple"
1062 " times, if not given defaults to all nodes)",
1063 completion_suggest=OPT_COMPL_ONE_NODE)
1065 NODEGROUP_OPT_NAME = "--node-group"
1066 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1068 help="Node group (name or uuid)",
1069 metavar="<nodegroup>",
1070 default=None, type="string",
1071 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1073 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1075 completion_suggest=OPT_COMPL_ONE_NODE)
1077 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1078 action="store_false",
1079 help="Don't start the instance after creation")
1081 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1082 action="store_true", default=False,
1083 help="Show command instead of executing it")
1085 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1086 default=False, action="store_true",
1087 help="Instead of performing the migration, try to"
1088 " recover from a failed cleanup. This is safe"
1089 " to run even if the instance is healthy, but it"
1090 " will create extra replication traffic and "
1091 " disrupt briefly the replication (like during the"
1094 STATIC_OPT = cli_option("-s", "--static", dest="static",
1095 action="store_true", default=False,
1096 help="Only show configuration data, not runtime data")
1098 ALL_OPT = cli_option("--all", dest="show_all",
1099 default=False, action="store_true",
1100 help="Show info on all instances on the cluster."
1101 " This can take a long time to run, use wisely")
1103 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1104 action="store_true", default=False,
1105 help="Interactive OS reinstall, lists available"
1106 " OS templates for selection")
1108 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1109 action="store_true", default=False,
1110 help="Remove the instance from the cluster"
1111 " configuration even if there are failures"
1112 " during the removal process")
1114 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1115 dest="ignore_remove_failures",
1116 action="store_true", default=False,
1117 help="Remove the instance from the"
1118 " cluster configuration even if there"
1119 " are failures during the removal"
1122 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1123 action="store_true", default=False,
1124 help="Remove the instance from the cluster")
1126 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1127 help="Specifies the new node for the instance",
1128 metavar="NODE", default=None,
1129 completion_suggest=OPT_COMPL_ONE_NODE)
1131 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1132 help="Specifies the new secondary node",
1133 metavar="NODE", default=None,
1134 completion_suggest=OPT_COMPL_ONE_NODE)
1136 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1137 help="Specifies the new primary node",
1138 metavar="<node>", default=None,
1139 completion_suggest=OPT_COMPL_ONE_NODE)
1141 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1142 default=False, action="store_true",
1143 help="Replace the disk(s) on the primary"
1144 " node (applies only to internally mirrored"
1145 " disk templates, e.g. %s)" %
1146 utils.CommaJoin(constants.DTS_INT_MIRROR))
1148 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1149 default=False, action="store_true",
1150 help="Replace the disk(s) on the secondary"
1151 " node (applies only to internally mirrored"
1152 " disk templates, e.g. %s)" %
1153 utils.CommaJoin(constants.DTS_INT_MIRROR))
1155 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1156 default=False, action="store_true",
1157 help="Lock all nodes and auto-promote as needed"
1160 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1161 default=False, action="store_true",
1162 help="Automatically replace faulty disks"
1163 " (applies only to internally mirrored"
1164 " disk templates, e.g. %s)" %
1165 utils.CommaJoin(constants.DTS_INT_MIRROR))
1167 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1168 default=False, action="store_true",
1169 help="Ignore current recorded size"
1170 " (useful for forcing activation when"
1171 " the recorded size is wrong)")
1173 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1175 completion_suggest=OPT_COMPL_ONE_NODE)
1177 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1180 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1181 help="Specify the secondary ip for the node",
1182 metavar="ADDRESS", default=None)
1184 READD_OPT = cli_option("--readd", dest="readd",
1185 default=False, action="store_true",
1186 help="Readd old node after replacing it")
1188 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1189 default=True, action="store_false",
1190 help="Disable SSH key fingerprint checking")
1192 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1193 default=False, action="store_true",
1194 help="Force the joining of a node")
1196 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1197 type="bool", default=None, metavar=_YORNO,
1198 help="Set the master_candidate flag on the node")
1200 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1201 type="bool", default=None,
1202 help=("Set the offline flag on the node"
1203 " (cluster does not communicate with offline"
1206 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1207 type="bool", default=None,
1208 help=("Set the drained flag on the node"
1209 " (excluded from allocation operations)"))
1211 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1212 type="bool", default=None, metavar=_YORNO,
1213 help="Set the master_capable flag on the node")
1215 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1216 type="bool", default=None, metavar=_YORNO,
1217 help="Set the vm_capable flag on the node")
1219 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1220 type="bool", default=None, metavar=_YORNO,
1221 help="Set the allocatable flag on a volume")
1223 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1224 help="Disable support for lvm based instances"
1226 action="store_false", default=True)
1228 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1229 dest="enabled_hypervisors",
1230 help="Comma-separated list of hypervisors",
1231 type="string", default=None)
1233 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1234 dest="enabled_disk_templates",
1235 help="Comma-separated list of "
1237 type="string", default=None)
1239 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1240 type="keyval", default={},
1241 help="NIC parameters")
1243 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1244 dest="candidate_pool_size", type="int",
1245 help="Set the candidate pool size")
1247 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1248 help=("Enables LVM and specifies the volume group"
1249 " name (cluster-wide) for disk allocation"
1250 " [%s]" % constants.DEFAULT_VG),
1251 metavar="VG", default=None)
1253 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1254 help="Destroy cluster", action="store_true")
1256 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1257 help="Skip node agreement check (dangerous)",
1258 action="store_true", default=False)
1260 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1261 help="Specify the mac prefix for the instance IP"
1262 " addresses, in the format XX:XX:XX",
1266 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1267 help="Specify the node interface (cluster-wide)"
1268 " on which the master IP address will be added"
1269 " (cluster init default: %s)" %
1270 constants.DEFAULT_BRIDGE,
1274 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1275 help="Specify the netmask of the master IP",
1279 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1280 dest="use_external_mip_script",
1281 help="Specify whether to run a"
1282 " user-provided script for the master"
1283 " IP address turnup and"
1284 " turndown operations",
1285 type="bool", metavar=_YORNO, default=None)
1287 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1288 help="Specify the default directory (cluster-"
1289 "wide) for storing the file-based disks [%s]" %
1290 pathutils.DEFAULT_FILE_STORAGE_DIR,
1292 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1294 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1295 "--shared-file-storage-dir",
1296 dest="shared_file_storage_dir",
1297 help="Specify the default directory (cluster-wide) for storing the"
1298 " shared file-based disks [%s]" %
1299 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1300 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1302 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1303 help="Don't modify %s" % pathutils.ETC_HOSTS,
1304 action="store_false", default=True)
1306 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1307 help="Don't initialize SSH keys",
1308 action="store_false", default=True)
1310 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1311 help="Enable parseable error messages",
1312 action="store_true", default=False)
1314 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1315 help="Skip N+1 memory redundancy tests",
1316 action="store_true", default=False)
1318 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1319 help="Type of reboot: soft/hard/full",
1320 default=constants.INSTANCE_REBOOT_HARD,
1322 choices=list(constants.REBOOT_TYPES))
1324 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1325 dest="ignore_secondaries",
1326 default=False, action="store_true",
1327 help="Ignore errors from secondaries")
1329 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1330 action="store_false", default=True,
1331 help="Don't shutdown the instance (unsafe)")
1333 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1334 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1335 help="Maximum time to wait")
1337 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1338 dest="shutdown_timeout", type="int",
1339 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1340 help="Maximum time to wait for instance"
1343 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1345 help=("Number of seconds between repetions of the"
1348 EARLY_RELEASE_OPT = cli_option("--early-release",
1349 dest="early_release", default=False,
1350 action="store_true",
1351 help="Release the locks on the secondary"
1354 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1355 dest="new_cluster_cert",
1356 default=False, action="store_true",
1357 help="Generate a new cluster certificate")
1359 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1361 help="File containing new RAPI certificate")
1363 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1364 default=None, action="store_true",
1365 help=("Generate a new self-signed RAPI"
1368 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1370 help="File containing new SPICE certificate")
1372 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1374 help="File containing the certificate of the CA"
1375 " which signed the SPICE certificate")
1377 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1378 dest="new_spice_cert", default=None,
1379 action="store_true",
1380 help=("Generate a new self-signed SPICE"
1383 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1384 dest="new_confd_hmac_key",
1385 default=False, action="store_true",
1386 help=("Create a new HMAC key for %s" %
1389 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1390 dest="cluster_domain_secret",
1392 help=("Load new new cluster domain"
1393 " secret from file"))
1395 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1396 dest="new_cluster_domain_secret",
1397 default=False, action="store_true",
1398 help=("Create a new cluster domain"
1401 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1402 dest="use_replication_network",
1403 help="Whether to use the replication network"
1404 " for talking to the nodes",
1405 action="store_true", default=False)
1407 MAINTAIN_NODE_HEALTH_OPT = \
1408 cli_option("--maintain-node-health", dest="maintain_node_health",
1409 metavar=_YORNO, default=None, type="bool",
1410 help="Configure the cluster to automatically maintain node"
1411 " health, by shutting down unknown instances, shutting down"
1412 " unknown DRBD devices, etc.")
1414 IDENTIFY_DEFAULTS_OPT = \
1415 cli_option("--identify-defaults", dest="identify_defaults",
1416 default=False, action="store_true",
1417 help="Identify which saved instance parameters are equal to"
1418 " the current cluster defaults and set them as such, instead"
1419 " of marking them as overridden")
1421 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1422 action="store", dest="uid_pool",
1423 help=("A list of user-ids or user-id"
1424 " ranges separated by commas"))
1426 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1427 action="store", dest="add_uids",
1428 help=("A list of user-ids or user-id"
1429 " ranges separated by commas, to be"
1430 " added to the user-id pool"))
1432 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1433 action="store", dest="remove_uids",
1434 help=("A list of user-ids or user-id"
1435 " ranges separated by commas, to be"
1436 " removed from the user-id pool"))
1438 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1439 action="store", dest="reserved_lvs",
1440 help=("A comma-separated list of reserved"
1441 " logical volumes names, that will be"
1442 " ignored by cluster verify"))
1444 ROMAN_OPT = cli_option("--roman",
1445 dest="roman_integers", default=False,
1446 action="store_true",
1447 help="Use roman numbers for positive integers")
1449 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1450 action="store", default=None,
1451 help="Specifies usermode helper for DRBD")
1453 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1454 action="store_false", default=True,
1455 help="Disable support for DRBD")
1457 PRIMARY_IP_VERSION_OPT = \
1458 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1459 action="store", dest="primary_ip_version",
1460 metavar="%d|%d" % (constants.IP4_VERSION,
1461 constants.IP6_VERSION),
1462 help="Cluster-wide IP version for primary IP")
1464 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1465 action="store_true",
1466 help="Show machine name for every line in output")
1468 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1469 action="store_true",
1470 help=("Hide successful results and show failures"
1471 " only (determined by the exit code)"))
1473 REASON_OPT = cli_option("--reason", default=None,
1474 help="The reason for executing the command")
1477 def _PriorityOptionCb(option, _, value, parser):
1478 """Callback for processing C{--priority} option.
1481 value = _PRIONAME_TO_VALUE[value]
1483 setattr(parser.values, option.dest, value)
1486 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1487 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1488 choices=_PRIONAME_TO_VALUE.keys(),
1489 action="callback", type="choice",
1490 callback=_PriorityOptionCb,
1491 help="Priority for opcode processing")
1493 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1494 type="bool", default=None, metavar=_YORNO,
1495 help="Sets the hidden flag on the OS")
1497 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1498 type="bool", default=None, metavar=_YORNO,
1499 help="Sets the blacklisted flag on the OS")
1501 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1502 type="bool", metavar=_YORNO,
1503 dest="prealloc_wipe_disks",
1504 help=("Wipe disks prior to instance"
1507 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1508 type="keyval", default=None,
1509 help="Node parameters")
1511 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1512 action="store", metavar="POLICY", default=None,
1513 help="Allocation policy for the node group")
1515 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1516 type="bool", metavar=_YORNO,
1517 dest="node_powered",
1518 help="Specify if the SoR for node is powered")
1520 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1521 default=constants.OOB_TIMEOUT,
1522 help="Maximum time to wait for out-of-band helper")
1524 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1525 default=constants.OOB_POWER_DELAY,
1526 help="Time in seconds to wait between power-ons")
1528 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1529 action="store_true", default=False,
1530 help=("Whether command argument should be treated"
1533 NO_REMEMBER_OPT = cli_option("--no-remember",
1535 action="store_true", default=False,
1536 help="Perform but do not record the change"
1537 " in the configuration")
1539 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1540 default=False, action="store_true",
1541 help="Evacuate primary instances only")
1543 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1544 default=False, action="store_true",
1545 help="Evacuate secondary instances only"
1546 " (applies only to internally mirrored"
1547 " disk templates, e.g. %s)" %
1548 utils.CommaJoin(constants.DTS_INT_MIRROR))
1550 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1551 action="store_true", default=False,
1552 help="Pause instance at startup")
1554 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1555 help="Destination node group (name or uuid)",
1556 default=None, action="append",
1557 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1559 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1560 action="append", dest="ignore_errors",
1561 choices=list(constants.CV_ALL_ECODES_STRINGS),
1562 help="Error code to be ignored")
1564 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1566 help=("Specify disk state information in the"
1568 " storage_type/identifier:option=value,...;"
1569 " note this is unused for now"),
1572 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1574 help=("Specify hypervisor state information in the"
1575 " format hypervisor:option=value,...;"
1576 " note this is unused for now"),
1579 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1580 action="store_true", default=False,
1581 help="Ignore instance policy violations")
1583 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1584 help="Sets the instance's runtime memory,"
1585 " ballooning it up or down to the new value",
1586 default=None, type="unit", metavar="<size>")
1588 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1589 action="store_true", default=False,
1590 help="Marks the grow as absolute instead of the"
1591 " (default) relative mode")
1593 NETWORK_OPT = cli_option("--network",
1594 action="store", default=None, dest="network",
1595 help="IP network in CIDR notation")
1597 GATEWAY_OPT = cli_option("--gateway",
1598 action="store", default=None, dest="gateway",
1599 help="IP address of the router (gateway)")
1601 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1602 action="store", default=None,
1603 dest="add_reserved_ips",
1604 help="Comma-separated list of"
1605 " reserved IPs to add")
1607 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1608 action="store", default=None,
1609 dest="remove_reserved_ips",
1610 help="Comma-delimited list of"
1611 " reserved IPs to remove")
1613 NETWORK6_OPT = cli_option("--network6",
1614 action="store", default=None, dest="network6",
1615 help="IP network in CIDR notation")
1617 GATEWAY6_OPT = cli_option("--gateway6",
1618 action="store", default=None, dest="gateway6",
1619 help="IP6 address of the router (gateway)")
1621 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1622 dest="conflicts_check",
1624 action="store_false",
1625 help="Don't check for conflicting IPs")
1627 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1628 default=False, action="store_true",
1629 help="Include default values")
1631 #: Options provided by all commands
1632 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1634 # common options for creating instances. add and import then add their own
1636 COMMON_CREATE_OPTS = [
1641 FILESTORE_DRIVER_OPT,
1647 NOCONFLICTSCHECK_OPT,
1659 # common instance policy options
1660 INSTANCE_POLICY_OPTS = [
1661 IPOLICY_BOUNDS_SPECS_OPT,
1662 IPOLICY_DISK_TEMPLATES,
1664 IPOLICY_SPINDLE_RATIO,
1667 # instance policy split specs options
1668 SPLIT_ISPECS_OPTS = [
1669 SPECS_CPU_COUNT_OPT,
1670 SPECS_DISK_COUNT_OPT,
1671 SPECS_DISK_SIZE_OPT,
1673 SPECS_NIC_COUNT_OPT,
1677 class _ShowUsage(Exception):
1678 """Exception class for L{_ParseArgs}.
1681 def __init__(self, exit_error):
1682 """Initializes instances of this class.
1684 @type exit_error: bool
1685 @param exit_error: Whether to report failure on exit
1688 Exception.__init__(self)
1689 self.exit_error = exit_error
1692 class _ShowVersion(Exception):
1693 """Exception class for L{_ParseArgs}.
1698 def _ParseArgs(binary, argv, commands, aliases, env_override):
1699 """Parser for the command line arguments.
1701 This function parses the arguments and returns the function which
1702 must be executed together with its (modified) arguments.
1704 @param binary: Script name
1705 @param argv: Command line arguments
1706 @param commands: Dictionary containing command definitions
1707 @param aliases: dictionary with command aliases {"alias": "target", ...}
1708 @param env_override: list of env variables allowed for default args
1709 @raise _ShowUsage: If usage description should be shown
1710 @raise _ShowVersion: If version should be shown
1713 assert not (env_override - set(commands))
1714 assert not (set(aliases.keys()) & set(commands.keys()))
1719 # No option or command given
1720 raise _ShowUsage(exit_error=True)
1722 if cmd == "--version":
1723 raise _ShowVersion()
1724 elif cmd == "--help":
1725 raise _ShowUsage(exit_error=False)
1726 elif not (cmd in commands or cmd in aliases):
1727 raise _ShowUsage(exit_error=True)
1729 # get command, unalias it, and look it up in commands
1731 if aliases[cmd] not in commands:
1732 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1733 " command '%s'" % (cmd, aliases[cmd]))
1737 if cmd in env_override:
1738 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1739 env_args = os.environ.get(args_env_name)
1741 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1743 func, args_def, parser_opts, usage, description = commands[cmd]
1744 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1745 description=description,
1746 formatter=TitledHelpFormatter(),
1747 usage="%%prog %s %s" % (cmd, usage))
1748 parser.disable_interspersed_args()
1749 options, args = parser.parse_args(args=argv[2:])
1751 if not _CheckArguments(cmd, args_def, args):
1752 return None, None, None
1754 return func, options, args
1757 def _FormatUsage(binary, commands):
1758 """Generates a nice description of all commands.
1760 @param binary: Script name
1761 @param commands: Dictionary containing command definitions
1764 # compute the max line length for cmd + usage
1765 mlen = min(60, max(map(len, commands)))
1767 yield "Usage: %s {command} [options...] [argument...]" % binary
1768 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1772 # and format a nice command list
1773 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1774 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1775 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1776 for line in help_lines:
1777 yield " %-*s %s" % (mlen, "", line)
1782 def _CheckArguments(cmd, args_def, args):
1783 """Verifies the arguments using the argument definition.
1787 1. Abort with error if values specified by user but none expected.
1789 1. For each argument in definition
1791 1. Keep running count of minimum number of values (min_count)
1792 1. Keep running count of maximum number of values (max_count)
1793 1. If it has an unlimited number of values
1795 1. Abort with error if it's not the last argument in the definition
1797 1. If last argument has limited number of values
1799 1. Abort with error if number of values doesn't match or is too large
1801 1. Abort with error if user didn't pass enough values (min_count)
1804 if args and not args_def:
1805 ToStderr("Error: Command %s expects no arguments", cmd)
1812 last_idx = len(args_def) - 1
1814 for idx, arg in enumerate(args_def):
1815 if min_count is None:
1817 elif arg.min is not None:
1818 min_count += arg.min
1820 if max_count is None:
1822 elif arg.max is not None:
1823 max_count += arg.max
1826 check_max = (arg.max is not None)
1828 elif arg.max is None:
1829 raise errors.ProgrammerError("Only the last argument can have max=None")
1832 # Command with exact number of arguments
1833 if (min_count is not None and max_count is not None and
1834 min_count == max_count and len(args) != min_count):
1835 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1838 # Command with limited number of arguments
1839 if max_count is not None and len(args) > max_count:
1840 ToStderr("Error: Command %s expects only %d argument(s)",
1844 # Command with some required arguments
1845 if min_count is not None and len(args) < min_count:
1846 ToStderr("Error: Command %s expects at least %d argument(s)",
1853 def SplitNodeOption(value):
1854 """Splits the value of a --node option.
1857 if value and ":" in value:
1858 return value.split(":", 1)
1860 return (value, None)
1863 def CalculateOSNames(os_name, os_variants):
1864 """Calculates all the names an OS can be called, according to its variants.
1866 @type os_name: string
1867 @param os_name: base name of the os
1868 @type os_variants: list or None
1869 @param os_variants: list of supported variants
1871 @return: list of valid names
1875 return ["%s+%s" % (os_name, v) for v in os_variants]
1880 def ParseFields(selected, default):
1881 """Parses the values of "--field"-like options.
1883 @type selected: string or None
1884 @param selected: User-selected options
1886 @param default: Default fields
1889 if selected is None:
1892 if selected.startswith("+"):
1893 return default + selected[1:].split(",")
1895 return selected.split(",")
1898 UsesRPC = rpc.RunWithRPC
1901 def AskUser(text, choices=None):
1902 """Ask the user a question.
1904 @param text: the question to ask
1906 @param choices: list with elements tuples (input_char, return_value,
1907 description); if not given, it will default to: [('y', True,
1908 'Perform the operation'), ('n', False, 'Do no do the operation')];
1909 note that the '?' char is reserved for help
1911 @return: one of the return values from the choices list; if input is
1912 not possible (i.e. not running with a tty, we return the last
1917 choices = [("y", True, "Perform the operation"),
1918 ("n", False, "Do not perform the operation")]
1919 if not choices or not isinstance(choices, list):
1920 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1921 for entry in choices:
1922 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1923 raise errors.ProgrammerError("Invalid choices element to AskUser")
1925 answer = choices[-1][1]
1927 for line in text.splitlines():
1928 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1929 text = "\n".join(new_text)
1931 f = file("/dev/tty", "a+")
1935 chars = [entry[0] for entry in choices]
1936 chars[-1] = "[%s]" % chars[-1]
1938 maps = dict([(entry[0], entry[1]) for entry in choices])
1942 f.write("/".join(chars))
1944 line = f.readline(2).strip().lower()
1949 for entry in choices:
1950 f.write(" %s - %s\n" % (entry[0], entry[2]))
1958 class JobSubmittedException(Exception):
1959 """Job was submitted, client should exit.
1961 This exception has one argument, the ID of the job that was
1962 submitted. The handler should print this ID.
1964 This is not an error, just a structured way to exit from clients.
1969 def SendJob(ops, cl=None):
1970 """Function to submit an opcode without waiting for the results.
1973 @param ops: list of opcodes
1974 @type cl: luxi.Client
1975 @param cl: the luxi client to use for communicating with the master;
1976 if None, a new client will be created
1982 job_id = cl.SubmitJob(ops)
1987 def GenericPollJob(job_id, cbs, report_cbs):
1988 """Generic job-polling function.
1990 @type job_id: number
1991 @param job_id: Job ID
1992 @type cbs: Instance of L{JobPollCbBase}
1993 @param cbs: Data callbacks
1994 @type report_cbs: Instance of L{JobPollReportCbBase}
1995 @param report_cbs: Reporting callbacks
1998 prev_job_info = None
1999 prev_logmsg_serial = None
2004 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2007 # job not found, go away!
2008 raise errors.JobLost("Job with id %s lost" % job_id)
2010 if result == constants.JOB_NOTCHANGED:
2011 report_cbs.ReportNotChanged(job_id, status)
2016 # Split result, a tuple of (field values, log entries)
2017 (job_info, log_entries) = result
2018 (status, ) = job_info
2021 for log_entry in log_entries:
2022 (serial, timestamp, log_type, message) = log_entry
2023 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2025 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2027 # TODO: Handle canceled and archived jobs
2028 elif status in (constants.JOB_STATUS_SUCCESS,
2029 constants.JOB_STATUS_ERROR,
2030 constants.JOB_STATUS_CANCELING,
2031 constants.JOB_STATUS_CANCELED):
2034 prev_job_info = job_info
2036 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2038 raise errors.JobLost("Job with id %s lost" % job_id)
2040 status, opstatus, result = jobs[0]
2042 if status == constants.JOB_STATUS_SUCCESS:
2045 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2046 raise errors.OpExecError("Job was canceled")
2049 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2050 if status == constants.OP_STATUS_SUCCESS:
2052 elif status == constants.OP_STATUS_ERROR:
2053 errors.MaybeRaise(msg)
2056 raise errors.OpExecError("partial failure (opcode %d): %s" %
2059 raise errors.OpExecError(str(msg))
2061 # default failure mode
2062 raise errors.OpExecError(result)
2065 class JobPollCbBase:
2066 """Base class for L{GenericPollJob} callbacks.
2070 """Initializes this class.
2074 def WaitForJobChangeOnce(self, job_id, fields,
2075 prev_job_info, prev_log_serial):
2076 """Waits for changes on a job.
2079 raise NotImplementedError()
2081 def QueryJobs(self, job_ids, fields):
2082 """Returns the selected fields for the selected job IDs.
2084 @type job_ids: list of numbers
2085 @param job_ids: Job IDs
2086 @type fields: list of strings
2087 @param fields: Fields
2090 raise NotImplementedError()
2093 class JobPollReportCbBase:
2094 """Base class for L{GenericPollJob} reporting callbacks.
2098 """Initializes this class.
2102 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2103 """Handles a log message.
2106 raise NotImplementedError()
2108 def ReportNotChanged(self, job_id, status):
2109 """Called for if a job hasn't changed in a while.
2111 @type job_id: number
2112 @param job_id: Job ID
2113 @type status: string or None
2114 @param status: Job status if available
2117 raise NotImplementedError()
2120 class _LuxiJobPollCb(JobPollCbBase):
2121 def __init__(self, cl):
2122 """Initializes this class.
2125 JobPollCbBase.__init__(self)
2128 def WaitForJobChangeOnce(self, job_id, fields,
2129 prev_job_info, prev_log_serial):
2130 """Waits for changes on a job.
2133 return self.cl.WaitForJobChangeOnce(job_id, fields,
2134 prev_job_info, prev_log_serial)
2136 def QueryJobs(self, job_ids, fields):
2137 """Returns the selected fields for the selected job IDs.
2140 return self.cl.QueryJobs(job_ids, fields)
2143 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2144 def __init__(self, feedback_fn):
2145 """Initializes this class.
2148 JobPollReportCbBase.__init__(self)
2150 self.feedback_fn = feedback_fn
2152 assert callable(feedback_fn)
2154 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2155 """Handles a log message.
2158 self.feedback_fn((timestamp, log_type, log_msg))
2160 def ReportNotChanged(self, job_id, status):
2161 """Called if a job hasn't changed in a while.
2167 class StdioJobPollReportCb(JobPollReportCbBase):
2169 """Initializes this class.
2172 JobPollReportCbBase.__init__(self)
2174 self.notified_queued = False
2175 self.notified_waitlock = False
2177 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2178 """Handles a log message.
2181 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2182 FormatLogMessage(log_type, log_msg))
2184 def ReportNotChanged(self, job_id, status):
2185 """Called if a job hasn't changed in a while.
2191 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2192 ToStderr("Job %s is waiting in queue", job_id)
2193 self.notified_queued = True
2195 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2196 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2197 self.notified_waitlock = True
2200 def FormatLogMessage(log_type, log_msg):
2201 """Formats a job message according to its type.
2204 if log_type != constants.ELOG_MESSAGE:
2205 log_msg = str(log_msg)
2207 return utils.SafeEncode(log_msg)
2210 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2211 """Function to poll for the result of a job.
2213 @type job_id: job identified
2214 @param job_id: the job to poll for results
2215 @type cl: luxi.Client
2216 @param cl: the luxi client to use for communicating with the master;
2217 if None, a new client will be created
2223 if reporter is None:
2225 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2227 reporter = StdioJobPollReportCb()
2229 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2231 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2234 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2235 """Legacy function to submit an opcode.
2237 This is just a simple wrapper over the construction of the processor
2238 instance. It should be extended to better handle feedback and
2239 interaction functions.
2245 SetGenericOpcodeOpts([op], opts)
2247 job_id = SendJob([op], cl=cl)
2249 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2252 return op_results[0]
2255 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2256 """Wrapper around SubmitOpCode or SendJob.
2258 This function will decide, based on the 'opts' parameter, whether to
2259 submit and wait for the result of the opcode (and return it), or
2260 whether to just send the job and print its identifier. It is used in
2261 order to simplify the implementation of the '--submit' option.
2263 It will also process the opcodes if we're sending the via SendJob
2264 (otherwise SubmitOpCode does it).
2267 if opts and opts.submit_only:
2269 SetGenericOpcodeOpts(job, opts)
2270 job_id = SendJob(job, cl=cl)
2271 raise JobSubmittedException(job_id)
2273 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2276 def _InitReasonTrail(op, opts):
2277 """Builds the first part of the reason trail
2279 Builds the initial part of the reason trail, adding the user provided reason
2280 (if it exists) and the name of the command starting the operation.
2282 @param op: the opcode the reason trail will be added to
2283 @param opts: the command line options selected by the user
2286 assert len(sys.argv) >= 2
2290 trail.append((constants.OPCODE_REASON_SRC_USER,
2294 binary = os.path.basename(sys.argv[0])
2295 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2296 command = sys.argv[1]
2297 trail.append((source, command, utils.EpochNano()))
2301 def SetGenericOpcodeOpts(opcode_list, options):
2302 """Processor for generic options.
2304 This function updates the given opcodes based on generic command
2305 line options (like debug, dry-run, etc.).
2307 @param opcode_list: list of opcodes
2308 @param options: command line options or None
2309 @return: None (in-place modification)
2314 for op in opcode_list:
2315 op.debug_level = options.debug
2316 if hasattr(options, "dry_run"):
2317 op.dry_run = options.dry_run
2318 if getattr(options, "priority", None) is not None:
2319 op.priority = options.priority
2320 _InitReasonTrail(op, options)
2323 def GetClient(query=False):
2324 """Connects to the a luxi socket and returns a client.
2326 @type query: boolean
2327 @param query: this signifies that the client will only be
2328 used for queries; if the build-time parameter
2329 enable-split-queries is enabled, then the client will be
2330 connected to the query socket instead of the masterd socket
2333 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2335 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2336 address = pathutils.MASTER_SOCKET
2337 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2338 address = pathutils.QUERY_SOCKET
2340 address = override_socket
2341 elif query and constants.ENABLE_SPLIT_QUERY:
2342 address = pathutils.QUERY_SOCKET
2345 # TODO: Cache object?
2347 client = luxi.Client(address=address)
2348 except luxi.NoMasterError:
2349 ss = ssconf.SimpleStore()
2351 # Try to read ssconf file
2354 except errors.ConfigurationError:
2355 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2356 " not part of a cluster",
2359 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2360 if master != myself:
2361 raise errors.OpPrereqError("This is not the master node, please connect"
2362 " to node '%s' and rerun the command" %
2363 master, errors.ECODE_INVAL)
2368 def FormatError(err):
2369 """Return a formatted error message for a given error.
2371 This function takes an exception instance and returns a tuple
2372 consisting of two values: first, the recommended exit code, and
2373 second, a string describing the error message (not
2374 newline-terminated).
2380 if isinstance(err, errors.ConfigurationError):
2381 txt = "Corrupt configuration file: %s" % msg
2383 obuf.write(txt + "\n")
2384 obuf.write("Aborting.")
2386 elif isinstance(err, errors.HooksAbort):
2387 obuf.write("Failure: hooks execution failed:\n")
2388 for node, script, out in err.args[0]:
2390 obuf.write(" node: %s, script: %s, output: %s\n" %
2391 (node, script, out))
2393 obuf.write(" node: %s, script: %s (no output)\n" %
2395 elif isinstance(err, errors.HooksFailure):
2396 obuf.write("Failure: hooks general failure: %s" % msg)
2397 elif isinstance(err, errors.ResolverError):
2398 this_host = netutils.Hostname.GetSysName()
2399 if err.args[0] == this_host:
2400 msg = "Failure: can't resolve my own hostname ('%s')"
2402 msg = "Failure: can't resolve hostname '%s'"
2403 obuf.write(msg % err.args[0])
2404 elif isinstance(err, errors.OpPrereqError):
2405 if len(err.args) == 2:
2406 obuf.write("Failure: prerequisites not met for this"
2407 " operation:\nerror type: %s, error details:\n%s" %
2408 (err.args[1], err.args[0]))
2410 obuf.write("Failure: prerequisites not met for this"
2411 " operation:\n%s" % msg)
2412 elif isinstance(err, errors.OpExecError):
2413 obuf.write("Failure: command execution error:\n%s" % msg)
2414 elif isinstance(err, errors.TagError):
2415 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2416 elif isinstance(err, errors.JobQueueDrainError):
2417 obuf.write("Failure: the job queue is marked for drain and doesn't"
2418 " accept new requests\n")
2419 elif isinstance(err, errors.JobQueueFull):
2420 obuf.write("Failure: the job queue is full and doesn't accept new"
2421 " job submissions until old jobs are archived\n")
2422 elif isinstance(err, errors.TypeEnforcementError):
2423 obuf.write("Parameter Error: %s" % msg)
2424 elif isinstance(err, errors.ParameterError):
2425 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2426 elif isinstance(err, luxi.NoMasterError):
2427 if err.args[0] == pathutils.MASTER_SOCKET:
2428 daemon = "the master daemon"
2429 elif err.args[0] == pathutils.QUERY_SOCKET:
2430 daemon = "the config daemon"
2432 daemon = "socket '%s'" % str(err.args[0])
2433 obuf.write("Cannot communicate with %s.\nIs the process running"
2434 " and listening for connections?" % daemon)
2435 elif isinstance(err, luxi.TimeoutError):
2436 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2437 " been submitted and will continue to run even if the call"
2438 " timed out. Useful commands in this situation are \"gnt-job"
2439 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2441 elif isinstance(err, luxi.PermissionError):
2442 obuf.write("It seems you don't have permissions to connect to the"
2443 " master daemon.\nPlease retry as a different user.")
2444 elif isinstance(err, luxi.ProtocolError):
2445 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2447 elif isinstance(err, errors.JobLost):
2448 obuf.write("Error checking job status: %s" % msg)
2449 elif isinstance(err, errors.QueryFilterParseError):
2450 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2451 obuf.write("\n".join(err.GetDetails()))
2452 elif isinstance(err, errors.GenericError):
2453 obuf.write("Unhandled Ganeti error: %s" % msg)
2454 elif isinstance(err, JobSubmittedException):
2455 obuf.write("JobID: %s\n" % err.args[0])
2458 obuf.write("Unhandled exception: %s" % msg)
2459 return retcode, obuf.getvalue().rstrip("\n")
2462 def GenericMain(commands, override=None, aliases=None,
2463 env_override=frozenset()):
2464 """Generic main function for all the gnt-* commands.
2466 @param commands: a dictionary with a special structure, see the design doc
2467 for command line handling.
2468 @param override: if not None, we expect a dictionary with keys that will
2469 override command line options; this can be used to pass
2470 options from the scripts to generic functions
2471 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2472 @param env_override: list of environment names which are allowed to submit
2473 default args for commands
2476 # save the program name and the entire command line for later logging
2478 binary = os.path.basename(sys.argv[0])
2480 binary = sys.argv[0]
2482 if len(sys.argv) >= 2:
2483 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2487 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2489 binary = "<unknown program>"
2490 cmdline = "<unknown>"
2496 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2498 except _ShowVersion:
2499 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2500 constants.RELEASE_VERSION)
2501 return constants.EXIT_SUCCESS
2502 except _ShowUsage, err:
2503 for line in _FormatUsage(binary, commands):
2507 return constants.EXIT_FAILURE
2509 return constants.EXIT_SUCCESS
2510 except errors.ParameterError, err:
2511 result, err_msg = FormatError(err)
2515 if func is None: # parse error
2518 if override is not None:
2519 for key, val in override.iteritems():
2520 setattr(options, key, val)
2522 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2523 stderr_logging=True)
2525 logging.info("Command line: %s", cmdline)
2528 result = func(options, args)
2529 except (errors.GenericError, luxi.ProtocolError,
2530 JobSubmittedException), err:
2531 result, err_msg = FormatError(err)
2532 logging.exception("Error during command processing")
2534 except KeyboardInterrupt:
2535 result = constants.EXIT_FAILURE
2536 ToStderr("Aborted. Note that if the operation created any jobs, they"
2537 " might have been submitted and"
2538 " will continue to run in the background.")
2539 except IOError, err:
2540 if err.errno == errno.EPIPE:
2541 # our terminal went away, we'll exit
2542 sys.exit(constants.EXIT_FAILURE)
2549 def ParseNicOption(optvalue):
2550 """Parses the value of the --net option(s).
2554 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2555 except (TypeError, ValueError), err:
2556 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2559 nics = [{}] * nic_max
2560 for nidx, ndict in optvalue:
2563 if not isinstance(ndict, dict):
2564 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2565 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2567 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2574 def GenericInstanceCreate(mode, opts, args):
2575 """Add an instance to the cluster via either creation or import.
2577 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2578 @param opts: the command line options selected by the user
2580 @param args: should contain only one element, the new instance name
2582 @return: the desired exit code
2587 (pnode, snode) = SplitNodeOption(opts.node)
2592 hypervisor, hvparams = opts.hypervisor
2595 nics = ParseNicOption(opts.nics)
2599 elif mode == constants.INSTANCE_CREATE:
2600 # default of one nic, all auto
2606 if opts.disk_template == constants.DT_DISKLESS:
2607 if opts.disks or opts.sd_size is not None:
2608 raise errors.OpPrereqError("Diskless instance but disk"
2609 " information passed", errors.ECODE_INVAL)
2612 if (not opts.disks and not opts.sd_size
2613 and mode == constants.INSTANCE_CREATE):
2614 raise errors.OpPrereqError("No disk information specified",
2616 if opts.disks and opts.sd_size is not None:
2617 raise errors.OpPrereqError("Please use either the '--disk' or"
2618 " '-s' option", errors.ECODE_INVAL)
2619 if opts.sd_size is not None:
2620 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2624 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2625 except ValueError, err:
2626 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2628 disks = [{}] * disk_max
2631 for didx, ddict in opts.disks:
2633 if not isinstance(ddict, dict):
2634 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2635 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2636 elif constants.IDISK_SIZE in ddict:
2637 if constants.IDISK_ADOPT in ddict:
2638 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2639 " (disk %d)" % didx, errors.ECODE_INVAL)
2641 ddict[constants.IDISK_SIZE] = \
2642 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2643 except ValueError, err:
2644 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2645 (didx, err), errors.ECODE_INVAL)
2646 elif constants.IDISK_ADOPT in ddict:
2647 if mode == constants.INSTANCE_IMPORT:
2648 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2649 " import", errors.ECODE_INVAL)
2650 ddict[constants.IDISK_SIZE] = 0
2652 raise errors.OpPrereqError("Missing size or adoption source for"
2653 " disk %d" % didx, errors.ECODE_INVAL)
2656 if opts.tags is not None:
2657 tags = opts.tags.split(",")
2661 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2662 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2664 if mode == constants.INSTANCE_CREATE:
2667 force_variant = opts.force_variant
2670 no_install = opts.no_install
2671 identify_defaults = False
2672 elif mode == constants.INSTANCE_IMPORT:
2675 force_variant = False
2676 src_node = opts.src_node
2677 src_path = opts.src_dir
2679 identify_defaults = opts.identify_defaults
2681 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2683 op = opcodes.OpInstanceCreate(instance_name=instance,
2685 disk_template=opts.disk_template,
2687 conflicts_check=opts.conflicts_check,
2688 pnode=pnode, snode=snode,
2689 ip_check=opts.ip_check,
2690 name_check=opts.name_check,
2691 wait_for_sync=opts.wait_for_sync,
2692 file_storage_dir=opts.file_storage_dir,
2693 file_driver=opts.file_driver,
2694 iallocator=opts.iallocator,
2695 hypervisor=hypervisor,
2697 beparams=opts.beparams,
2698 osparams=opts.osparams,
2702 force_variant=force_variant,
2706 no_install=no_install,
2707 identify_defaults=identify_defaults,
2708 ignore_ipolicy=opts.ignore_ipolicy)
2710 SubmitOrSend(op, opts)
2714 class _RunWhileClusterStoppedHelper:
2715 """Helper class for L{RunWhileClusterStopped} to simplify state management
2718 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2719 """Initializes this class.
2721 @type feedback_fn: callable
2722 @param feedback_fn: Feedback function
2723 @type cluster_name: string
2724 @param cluster_name: Cluster name
2725 @type master_node: string
2726 @param master_node Master node name
2727 @type online_nodes: list
2728 @param online_nodes: List of names of online nodes
2731 self.feedback_fn = feedback_fn
2732 self.cluster_name = cluster_name
2733 self.master_node = master_node
2734 self.online_nodes = online_nodes
2736 self.ssh = ssh.SshRunner(self.cluster_name)
2738 self.nonmaster_nodes = [name for name in online_nodes
2739 if name != master_node]
2741 assert self.master_node not in self.nonmaster_nodes
2743 def _RunCmd(self, node_name, cmd):
2744 """Runs a command on the local or a remote machine.
2746 @type node_name: string
2747 @param node_name: Machine name
2752 if node_name is None or node_name == self.master_node:
2753 # No need to use SSH
2754 result = utils.RunCmd(cmd)
2756 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2757 utils.ShellQuoteArgs(cmd))
2760 errmsg = ["Failed to run command %s" % result.cmd]
2762 errmsg.append("on node %s" % node_name)
2763 errmsg.append(": exitcode %s and error %s" %
2764 (result.exit_code, result.output))
2765 raise errors.OpExecError(" ".join(errmsg))
2767 def Call(self, fn, *args):
2768 """Call function while all daemons are stopped.
2771 @param fn: Function to be called
2774 # Pause watcher by acquiring an exclusive lock on watcher state file
2775 self.feedback_fn("Blocking watcher")
2776 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2778 # TODO: Currently, this just blocks. There's no timeout.
2779 # TODO: Should it be a shared lock?
2780 watcher_block.Exclusive(blocking=True)
2782 # Stop master daemons, so that no new jobs can come in and all running
2784 self.feedback_fn("Stopping master daemons")
2785 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2787 # Stop daemons on all nodes
2788 for node_name in self.online_nodes:
2789 self.feedback_fn("Stopping daemons on %s" % node_name)
2790 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2792 # All daemons are shut down now
2794 return fn(self, *args)
2795 except Exception, err:
2796 _, errmsg = FormatError(err)
2797 logging.exception("Caught exception")
2798 self.feedback_fn(errmsg)
2801 # Start cluster again, master node last
2802 for node_name in self.nonmaster_nodes + [self.master_node]:
2803 self.feedback_fn("Starting daemons on %s" % node_name)
2804 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2807 watcher_block.Close()
2810 def RunWhileClusterStopped(feedback_fn, fn, *args):
2811 """Calls a function while all cluster daemons are stopped.
2813 @type feedback_fn: callable
2814 @param feedback_fn: Feedback function
2816 @param fn: Function to be called when daemons are stopped
2819 feedback_fn("Gathering cluster information")
2821 # This ensures we're running on the master daemon
2824 (cluster_name, master_node) = \
2825 cl.QueryConfigValues(["cluster_name", "master_node"])
2827 online_nodes = GetOnlineNodes([], cl=cl)
2829 # Don't keep a reference to the client. The master daemon will go away.
2832 assert master_node in online_nodes
2834 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2835 online_nodes).Call(fn, *args)
2838 def GenerateTable(headers, fields, separator, data,
2839 numfields=None, unitfields=None,
2841 """Prints a table with headers and different fields.
2844 @param headers: dictionary mapping field names to headers for
2847 @param fields: the field names corresponding to each row in
2849 @param separator: the separator to be used; if this is None,
2850 the default 'smart' algorithm is used which computes optimal
2851 field width, otherwise just the separator is used between
2854 @param data: a list of lists, each sublist being one row to be output
2855 @type numfields: list
2856 @param numfields: a list with the fields that hold numeric
2857 values and thus should be right-aligned
2858 @type unitfields: list
2859 @param unitfields: a list with the fields that hold numeric
2860 values that should be formatted with the units field
2861 @type units: string or None
2862 @param units: the units we should use for formatting, or None for
2863 automatic choice (human-readable for non-separator usage, otherwise
2864 megabytes); this is a one-letter string
2873 if numfields is None:
2875 if unitfields is None:
2878 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2879 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2882 for field in fields:
2883 if headers and field not in headers:
2884 # TODO: handle better unknown fields (either revert to old
2885 # style of raising exception, or deal more intelligently with
2887 headers[field] = field
2888 if separator is not None:
2889 format_fields.append("%s")
2890 elif numfields.Matches(field):
2891 format_fields.append("%*s")
2893 format_fields.append("%-*s")
2895 if separator is None:
2896 mlens = [0 for name in fields]
2897 format_str = " ".join(format_fields)
2899 format_str = separator.replace("%", "%%").join(format_fields)
2904 for idx, val in enumerate(row):
2905 if unitfields.Matches(fields[idx]):
2908 except (TypeError, ValueError):
2911 val = row[idx] = utils.FormatUnit(val, units)
2912 val = row[idx] = str(val)
2913 if separator is None:
2914 mlens[idx] = max(mlens[idx], len(val))
2919 for idx, name in enumerate(fields):
2921 if separator is None:
2922 mlens[idx] = max(mlens[idx], len(hdr))
2923 args.append(mlens[idx])
2925 result.append(format_str % tuple(args))
2927 if separator is None:
2928 assert len(mlens) == len(fields)
2930 if fields and not numfields.Matches(fields[-1]):
2936 line = ["-" for _ in fields]
2937 for idx in range(len(fields)):
2938 if separator is None:
2939 args.append(mlens[idx])
2940 args.append(line[idx])
2941 result.append(format_str % tuple(args))
2946 def _FormatBool(value):
2947 """Formats a boolean value as a string.
2955 #: Default formatting for query results; (callback, align right)
2956 _DEFAULT_FORMAT_QUERY = {
2957 constants.QFT_TEXT: (str, False),
2958 constants.QFT_BOOL: (_FormatBool, False),
2959 constants.QFT_NUMBER: (str, True),
2960 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2961 constants.QFT_OTHER: (str, False),
2962 constants.QFT_UNKNOWN: (str, False),
2966 def _GetColumnFormatter(fdef, override, unit):
2967 """Returns formatting function for a field.
2969 @type fdef: L{objects.QueryFieldDefinition}
2970 @type override: dict
2971 @param override: Dictionary for overriding field formatting functions,
2972 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2974 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2975 @rtype: tuple; (callable, bool)
2976 @return: Returns the function to format a value (takes one parameter) and a
2977 boolean for aligning the value on the right-hand side
2980 fmt = override.get(fdef.name, None)
2984 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2986 if fdef.kind == constants.QFT_UNIT:
2987 # Can't keep this information in the static dictionary
2988 return (lambda value: utils.FormatUnit(value, unit), True)
2990 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2994 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2997 class _QueryColumnFormatter:
2998 """Callable class for formatting fields of a query.
3001 def __init__(self, fn, status_fn, verbose):
3002 """Initializes this class.
3005 @param fn: Formatting function
3006 @type status_fn: callable
3007 @param status_fn: Function to report fields' status
3008 @type verbose: boolean
3009 @param verbose: whether to use verbose field descriptions or not
3013 self._status_fn = status_fn
3014 self._verbose = verbose
3016 def __call__(self, data):
3017 """Returns a field's string representation.
3020 (status, value) = data
3023 self._status_fn(status)
3025 if status == constants.RS_NORMAL:
3026 return self._fn(value)
3028 assert value is None, \
3029 "Found value %r for abnormal status %s" % (value, status)
3031 return FormatResultError(status, self._verbose)
3034 def FormatResultError(status, verbose):
3035 """Formats result status other than L{constants.RS_NORMAL}.
3037 @param status: The result status
3038 @type verbose: boolean
3039 @param verbose: Whether to return the verbose text
3040 @return: Text of result status
3043 assert status != constants.RS_NORMAL, \
3044 "FormatResultError called with status equal to constants.RS_NORMAL"
3046 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3048 raise NotImplementedError("Unknown status %s" % status)
3055 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3056 header=False, verbose=False):
3057 """Formats data in L{objects.QueryResponse}.
3059 @type result: L{objects.QueryResponse}
3060 @param result: result of query operation
3062 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3063 see L{utils.text.FormatUnit}
3064 @type format_override: dict
3065 @param format_override: Dictionary for overriding field formatting functions,
3066 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3067 @type separator: string or None
3068 @param separator: String used to separate fields
3070 @param header: Whether to output header row
3071 @type verbose: boolean
3072 @param verbose: whether to use verbose field descriptions or not
3081 if format_override is None:
3082 format_override = {}
3084 stats = dict.fromkeys(constants.RS_ALL, 0)
3086 def _RecordStatus(status):
3091 for fdef in result.fields:
3092 assert fdef.title and fdef.name
3093 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3094 columns.append(TableColumn(fdef.title,
3095 _QueryColumnFormatter(fn, _RecordStatus,
3099 table = FormatTable(result.data, columns, header, separator)
3101 # Collect statistics
3102 assert len(stats) == len(constants.RS_ALL)
3103 assert compat.all(count >= 0 for count in stats.values())
3105 # Determine overall status. If there was no data, unknown fields must be
3106 # detected via the field definitions.
3107 if (stats[constants.RS_UNKNOWN] or
3108 (not result.data and _GetUnknownFields(result.fields))):
3110 elif compat.any(count > 0 for key, count in stats.items()
3111 if key != constants.RS_NORMAL):
3112 status = QR_INCOMPLETE
3116 return (status, table)
3119 def _GetUnknownFields(fdefs):
3120 """Returns list of unknown fields included in C{fdefs}.
3122 @type fdefs: list of L{objects.QueryFieldDefinition}
3125 return [fdef for fdef in fdefs
3126 if fdef.kind == constants.QFT_UNKNOWN]
3129 def _WarnUnknownFields(fdefs):
3130 """Prints a warning to stderr if a query included unknown fields.
3132 @type fdefs: list of L{objects.QueryFieldDefinition}
3135 unknown = _GetUnknownFields(fdefs)
3137 ToStderr("Warning: Queried for unknown fields %s",
3138 utils.CommaJoin(fdef.name for fdef in unknown))
3144 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3145 format_override=None, verbose=False, force_filter=False,
3146 namefield=None, qfilter=None, isnumeric=False):
3147 """Generic implementation for listing all items of a resource.
3149 @param resource: One of L{constants.QR_VIA_LUXI}
3150 @type fields: list of strings
3151 @param fields: List of fields to query for
3152 @type names: list of strings
3153 @param names: Names of items to query for
3154 @type unit: string or None
3155 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3156 None for automatic choice (human-readable for non-separator usage,
3157 otherwise megabytes); this is a one-letter string
3158 @type separator: string or None
3159 @param separator: String used to separate fields
3161 @param header: Whether to show header row
3162 @type force_filter: bool
3163 @param force_filter: Whether to always treat names as filter
3164 @type format_override: dict
3165 @param format_override: Dictionary for overriding field formatting functions,
3166 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3167 @type verbose: boolean
3168 @param verbose: whether to use verbose field descriptions or not
3169 @type namefield: string
3170 @param namefield: Name of field to use for simple filters (see
3171 L{qlang.MakeFilter} for details)
3172 @type qfilter: list or None
3173 @param qfilter: Query filter (in addition to names)
3174 @param isnumeric: bool
3175 @param isnumeric: Whether the namefield's type is numeric, and therefore
3176 any simple filters built by namefield should use integer values to
3183 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3184 isnumeric=isnumeric)
3187 qfilter = namefilter
3188 elif namefilter is not None:
3189 qfilter = [qlang.OP_AND, namefilter, qfilter]
3194 response = cl.Query(resource, fields, qfilter)
3196 found_unknown = _WarnUnknownFields(response.fields)
3198 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3200 format_override=format_override,
3206 assert ((found_unknown and status == QR_UNKNOWN) or
3207 (not found_unknown and status != QR_UNKNOWN))
3209 if status == QR_UNKNOWN:
3210 return constants.EXIT_UNKNOWN_FIELD
3212 # TODO: Should the list command fail if not all data could be collected?
3213 return constants.EXIT_SUCCESS
3216 def _FieldDescValues(fdef):
3217 """Helper function for L{GenericListFields} to get query field description.
3219 @type fdef: L{objects.QueryFieldDefinition}
3225 _QFT_NAMES.get(fdef.kind, fdef.kind),
3231 def GenericListFields(resource, fields, separator, header, cl=None):
3232 """Generic implementation for listing fields for a resource.
3234 @param resource: One of L{constants.QR_VIA_LUXI}
3235 @type fields: list of strings
3236 @param fields: List of fields to query for
3237 @type separator: string or None
3238 @param separator: String used to separate fields
3240 @param header: Whether to show header row
3249 response = cl.QueryFields(resource, fields)
3251 found_unknown = _WarnUnknownFields(response.fields)
3254 TableColumn("Name", str, False),
3255 TableColumn("Type", str, False),
3256 TableColumn("Title", str, False),
3257 TableColumn("Description", str, False),
3260 rows = map(_FieldDescValues, response.fields)
3262 for line in FormatTable(rows, columns, header, separator):
3266 return constants.EXIT_UNKNOWN_FIELD
3268 return constants.EXIT_SUCCESS
3272 """Describes a column for L{FormatTable}.
3275 def __init__(self, title, fn, align_right):
3276 """Initializes this class.
3279 @param title: Column title
3281 @param fn: Formatting function
3282 @type align_right: bool
3283 @param align_right: Whether to align values on the right-hand side
3288 self.align_right = align_right
3291 def _GetColFormatString(width, align_right):
3292 """Returns the format string for a field.
3300 return "%%%s%ss" % (sign, width)
3303 def FormatTable(rows, columns, header, separator):
3304 """Formats data as a table.
3306 @type rows: list of lists
3307 @param rows: Row data, one list per row
3308 @type columns: list of L{TableColumn}
3309 @param columns: Column descriptions
3311 @param header: Whether to show header row
3312 @type separator: string or None
3313 @param separator: String used to separate columns
3317 data = [[col.title for col in columns]]
3318 colwidth = [len(col.title) for col in columns]
3321 colwidth = [0 for _ in columns]
3325 assert len(row) == len(columns)
3327 formatted = [col.format(value) for value, col in zip(row, columns)]
3329 if separator is None:
3330 # Update column widths
3331 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3332 # Modifying a list's items while iterating is fine
3333 colwidth[idx] = max(oldwidth, len(value))
3335 data.append(formatted)
3337 if separator is not None:
3338 # Return early if a separator is used
3339 return [separator.join(row) for row in data]
3341 if columns and not columns[-1].align_right:
3342 # Avoid unnecessary spaces at end of line
3345 # Build format string
3346 fmt = " ".join([_GetColFormatString(width, col.align_right)
3347 for col, width in zip(columns, colwidth)])
3349 return [fmt % tuple(row) for row in data]
3352 def FormatTimestamp(ts):
3353 """Formats a given timestamp.
3356 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3359 @return: a string with the formatted timestamp
3362 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3366 return utils.FormatTime(sec, usecs=usecs)
3369 def ParseTimespec(value):
3370 """Parse a time specification.
3372 The following suffixed will be recognized:
3380 Without any suffix, the value will be taken to be in seconds.
3385 raise errors.OpPrereqError("Empty time specification passed",
3394 if value[-1] not in suffix_map:
3397 except (TypeError, ValueError):
3398 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3401 multiplier = suffix_map[value[-1]]
3403 if not value: # no data left after stripping the suffix
3404 raise errors.OpPrereqError("Invalid time specification (only"
3405 " suffix passed)", errors.ECODE_INVAL)
3407 value = int(value) * multiplier
3408 except (TypeError, ValueError):
3409 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3414 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3415 filter_master=False, nodegroup=None):
3416 """Returns the names of online nodes.
3418 This function will also log a warning on stderr with the names of
3421 @param nodes: if not empty, use only this subset of nodes (minus the
3423 @param cl: if not None, luxi client to use
3424 @type nowarn: boolean
3425 @param nowarn: by default, this function will output a note with the
3426 offline nodes that are skipped; if this parameter is True the
3427 note is not displayed
3428 @type secondary_ips: boolean
3429 @param secondary_ips: if True, return the secondary IPs instead of the
3430 names, useful for doing network traffic over the replication interface
3432 @type filter_master: boolean
3433 @param filter_master: if True, do not return the master node in the list
3434 (useful in coordination with secondary_ips where we cannot check our
3435 node name against the list)
3436 @type nodegroup: string
3437 @param nodegroup: If set, only return nodes in this node group
3446 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3448 if nodegroup is not None:
3449 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3450 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3453 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3456 if len(qfilter) > 1:
3457 final_filter = [qlang.OP_AND] + qfilter
3459 assert len(qfilter) == 1
3460 final_filter = qfilter[0]
3464 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3466 def _IsOffline(row):
3467 (_, (_, offline), _) = row
3471 ((_, name), _, _) = row
3475 (_, _, (_, sip)) = row
3478 (offline, online) = compat.partition(result.data, _IsOffline)
3480 if offline and not nowarn:
3481 ToStderr("Note: skipping offline node(s): %s" %
3482 utils.CommaJoin(map(_GetName, offline)))
3489 return map(fn, online)
3492 def _ToStream(stream, txt, *args):
3493 """Write a message to a stream, bypassing the logging system
3495 @type stream: file object
3496 @param stream: the file to which we should write
3498 @param txt: the message
3504 stream.write(txt % args)
3509 except IOError, err:
3510 if err.errno == errno.EPIPE:
3511 # our terminal went away, we'll exit
3512 sys.exit(constants.EXIT_FAILURE)
3517 def ToStdout(txt, *args):
3518 """Write a message to stdout only, bypassing the logging system
3520 This is just a wrapper over _ToStream.
3523 @param txt: the message
3526 _ToStream(sys.stdout, txt, *args)
3529 def ToStderr(txt, *args):
3530 """Write a message to stderr only, bypassing the logging system
3532 This is just a wrapper over _ToStream.
3535 @param txt: the message
3538 _ToStream(sys.stderr, txt, *args)
3541 class JobExecutor(object):
3542 """Class which manages the submission and execution of multiple jobs.
3544 Note that instances of this class should not be reused between
3548 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3553 self.verbose = verbose
3556 self.feedback_fn = feedback_fn
3557 self._counter = itertools.count()
3560 def _IfName(name, fmt):
3561 """Helper function for formatting name.
3569 def QueueJob(self, name, *ops):
3570 """Record a job for later submit.
3573 @param name: a description of the job, will be used in WaitJobSet
3576 SetGenericOpcodeOpts(ops, self.opts)
3577 self.queue.append((self._counter.next(), name, ops))
3579 def AddJobId(self, name, status, job_id):
3580 """Adds a job ID to the internal queue.
3583 self.jobs.append((self._counter.next(), status, job_id, name))
3585 def SubmitPending(self, each=False):
3586 """Submit all pending jobs.
3591 for (_, _, ops) in self.queue:
3592 # SubmitJob will remove the success status, but raise an exception if
3593 # the submission fails, so we'll notice that anyway.
3594 results.append([True, self.cl.SubmitJob(ops)[0]])
3596 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3597 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3598 self.jobs.append((idx, status, data, name))
3600 def _ChooseJob(self):
3601 """Choose a non-waiting/queued job to poll next.
3604 assert self.jobs, "_ChooseJob called with empty job list"
3606 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3610 for job_data, status in zip(self.jobs, result):
3611 if (isinstance(status, list) and status and
3612 status[0] in (constants.JOB_STATUS_QUEUED,
3613 constants.JOB_STATUS_WAITING,
3614 constants.JOB_STATUS_CANCELING)):
3615 # job is still present and waiting
3617 # good candidate found (either running job or lost job)
3618 self.jobs.remove(job_data)
3622 return self.jobs.pop(0)
3624 def GetResults(self):
3625 """Wait for and return the results of all jobs.
3628 @return: list of tuples (success, job results), in the same order
3629 as the submitted jobs; if a job has failed, instead of the result
3630 there will be the error message
3634 self.SubmitPending()
3637 ok_jobs = [row[2] for row in self.jobs if row[1]]
3639 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3641 # first, remove any non-submitted jobs
3642 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3643 for idx, _, jid, name in failures:
3644 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3645 results.append((idx, False, jid))
3648 (idx, _, jid, name) = self._ChooseJob()
3649 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3651 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3653 except errors.JobLost, err:
3654 _, job_result = FormatError(err)
3655 ToStderr("Job %s%s has been archived, cannot check its result",
3656 jid, self._IfName(name, " for %s"))
3658 except (errors.GenericError, luxi.ProtocolError), err:
3659 _, job_result = FormatError(err)
3661 # the error message will always be shown, verbose or not
3662 ToStderr("Job %s%s has failed: %s",
3663 jid, self._IfName(name, " for %s"), job_result)
3665 results.append((idx, success, job_result))
3667 # sort based on the index, then drop it
3669 results = [i[1:] for i in results]
3673 def WaitOrShow(self, wait):
3674 """Wait for job results or only print the job IDs.
3677 @param wait: whether to wait or not
3681 return self.GetResults()
3684 self.SubmitPending()
3685 for _, status, result, name in self.jobs:
3687 ToStdout("%s: %s", result, name)
3689 ToStderr("Failure for %s: %s", name, result)
3690 return [row[1:3] for row in self.jobs]
3693 def FormatParamsDictInfo(param_dict, actual):
3694 """Formats a parameter dictionary.
3696 @type param_dict: dict
3697 @param param_dict: the own parameters
3699 @param actual: the current parameter set (including defaults)
3701 @return: dictionary where the value of each parameter is either a fully
3702 formatted string or a dictionary containing formatted strings
3706 for (key, data) in actual.items():
3707 if isinstance(data, dict) and data:
3708 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3710 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3714 def _FormatListInfoDefault(data, def_data):
3715 if data is not None:
3716 ret = utils.CommaJoin(data)
3718 ret = "default (%s)" % utils.CommaJoin(def_data)
3722 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3723 """Formats an instance policy.
3725 @type custom_ipolicy: dict
3726 @param custom_ipolicy: own policy
3727 @type eff_ipolicy: dict
3728 @param eff_ipolicy: effective policy (including defaults); ignored for
3730 @type iscluster: bool
3731 @param iscluster: the policy is at cluster level
3732 @rtype: list of pairs
3733 @return: formatted data, suitable for L{PrintGenericInfo}
3737 eff_ipolicy = custom_ipolicy
3739 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX, {})
3742 FormatParamsDictInfo(custom_minmax.get(key, {}),
3743 eff_ipolicy[constants.ISPECS_MINMAX][key]))
3744 for key in constants.ISPECS_MINMAX_KEYS
3747 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3749 (constants.ISPECS_STD,
3750 FormatParamsDictInfo(stdspecs, stdspecs))
3754 ("enabled disk templates",
3755 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3756 eff_ipolicy[constants.IPOLICY_DTS]))
3759 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3760 for key in constants.IPOLICY_PARAMETERS
3765 def _PrintSpecsParameters(buf, specs):
3766 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3767 buf.write(",".join(values))
3770 def PrintIPolicyCommand(buf, ipolicy, isgroup):
3771 """Print the command option used to generate the given instance policy.
3773 Currently only the parts dealing with specs are supported.
3776 @param buf: stream to write into
3778 @param ipolicy: instance policy
3780 @param isgroup: whether the policy is at group level
3784 stdspecs = ipolicy.get("std")
3786 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3787 _PrintSpecsParameters(buf, stdspecs)
3788 minmax = ipolicy.get("minmax")
3790 minspecs = minmax.get("min")
3791 maxspecs = minmax.get("max")
3792 if minspecs and maxspecs:
3793 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3795 _PrintSpecsParameters(buf, minspecs)
3797 _PrintSpecsParameters(buf, maxspecs)
3800 def ConfirmOperation(names, list_type, text, extra=""):
3801 """Ask the user to confirm an operation on a list of list_type.
3803 This function is used to request confirmation for doing an operation
3804 on a given list of list_type.
3807 @param names: the list of names that we display when
3808 we ask for confirmation
3809 @type list_type: str
3810 @param list_type: Human readable name for elements in the list (e.g. nodes)
3812 @param text: the operation that the user should confirm
3814 @return: True or False depending on user's confirmation.
3818 msg = ("The %s will operate on %d %s.\n%s"
3819 "Do you want to continue?" % (text, count, list_type, extra))
3820 affected = (("\nAffected %s:\n" % list_type) +
3821 "\n".join([" %s" % name for name in names]))
3823 choices = [("y", True, "Yes, execute the %s" % text),
3824 ("n", False, "No, abort the %s" % text)]
3827 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3830 question = msg + affected
3832 choice = AskUser(question, choices)
3835 choice = AskUser(msg + affected, choices)
3839 def _MaybeParseUnit(elements):
3840 """Parses and returns an array of potential values with units.
3844 for k, v in elements.items():
3845 if v == constants.VALUE_DEFAULT:
3848 parsed[k] = utils.ParseUnit(v)
3852 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3853 ispecs_disk_count, ispecs_disk_size,
3854 ispecs_nic_count, group_ipolicy, fill_all):
3857 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3858 if ispecs_disk_size:
3859 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3860 except (TypeError, ValueError, errors.UnitParseError), err:
3861 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3863 (ispecs_disk_size, ispecs_mem_size, err),
3866 # prepare ipolicy dict
3867 ispecs_transposed = {
3868 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3869 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3870 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3871 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3872 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3875 # first, check that the values given are correct
3877 forced_type = TISPECS_GROUP_TYPES
3879 forced_type = TISPECS_CLUSTER_TYPES
3880 for specs in ispecs_transposed.values():
3881 assert type(specs) is dict
3882 utils.ForceDictType(specs, forced_type)
3886 constants.ISPECS_MIN: {},
3887 constants.ISPECS_MAX: {},
3888 constants.ISPECS_STD: {},
3890 for (name, specs) in ispecs_transposed.iteritems():
3891 assert name in constants.ISPECS_PARAMETERS
3892 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3893 assert key in ispecs
3894 ispecs[key][name] = val
3895 ipolicy[constants.ISPECS_MINMAX] = {}
3896 for key in constants.ISPECS_MINMAX_KEYS:
3898 ipolicy[constants.ISPECS_MINMAX][key] = \
3899 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3901 ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3903 ipolicy[constants.ISPECS_STD] = \
3904 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3905 ispecs[constants.ISPECS_STD])
3907 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3910 def _ParseSpecUnit(spec, keyname):
3912 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3915 ret[k] = utils.ParseUnit(ret[k])
3916 except (TypeError, ValueError, errors.UnitParseError), err:
3917 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3918 " specs: %s" % (k, ret[k], keyname, err)),
3923 def _ParseISpec(spec, keyname, required):
3924 ret = _ParseSpecUnit(spec, keyname)
3925 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3926 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3927 if required and missing:
3928 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3929 (keyname, utils.CommaJoin(missing)),
3934 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3936 if minmax_ispecs and allowed_values and len(minmax_ispecs) == 1:
3937 for (key, spec) in minmax_ispecs.items():
3938 # This loop is executed exactly once
3939 if key in allowed_values and not spec:
3944 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3945 group_ipolicy, allowed_values):
3946 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3947 if found_allowed is not None:
3948 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3949 elif minmax_ispecs is not None:
3951 for (key, spec) in minmax_ispecs.items():
3952 if key not in constants.ISPECS_MINMAX_KEYS:
3953 msg = "Invalid key in bounds instance specifications: %s" % key
3954 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3955 minmax_out[key] = _ParseISpec(spec, key, True)
3956 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3957 if std_ispecs is not None:
3958 assert not group_ipolicy # This is not an option for gnt-group
3959 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3962 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3963 ispecs_cpu_count=None,
3964 ispecs_disk_count=None,
3965 ispecs_disk_size=None,
3966 ispecs_nic_count=None,
3969 ipolicy_disk_templates=None,
3970 ipolicy_vcpu_ratio=None,
3971 ipolicy_spindle_ratio=None,
3972 group_ipolicy=False,
3973 allowed_values=None,
3975 """Creation of instance policy based on command line options.
3977 @param fill_all: whether for cluster policies we should ensure that
3978 all values are filled
3981 assert not (fill_all and allowed_values)
3983 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
3984 ispecs_disk_size or ispecs_nic_count)
3985 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
3986 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
3987 " together with any --ipolicy-xxx-specs option",
3990 ipolicy_out = objects.MakeEmptyIPolicy()
3993 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3994 ispecs_disk_count, ispecs_disk_size,
3995 ispecs_nic_count, group_ipolicy, fill_all)
3996 elif (minmax_ispecs is not None or std_ispecs is not None):
3997 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3998 group_ipolicy, allowed_values)
4000 if ipolicy_disk_templates is not None:
4001 if allowed_values and ipolicy_disk_templates in allowed_values:
4002 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4004 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4005 if ipolicy_vcpu_ratio is not None:
4006 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4007 if ipolicy_spindle_ratio is not None:
4008 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4010 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4012 if not group_ipolicy and fill_all:
4013 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4018 def _SerializeGenericInfo(buf, data, level, afterkey=False):
4019 """Formatting core of L{PrintGenericInfo}.
4021 @param buf: (string) stream to accumulate the result into
4022 @param data: data to format
4024 @param level: depth in the data hierarchy, used for indenting
4025 @type afterkey: bool
4026 @param afterkey: True when we are in the middle of a line after a key (used
4027 to properly add newlines or indentation)
4031 if isinstance(data, dict):
4040 for key in sorted(data):
4042 buf.write(baseind * level)
4047 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4048 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4049 # list of tuples (an ordered dictionary)
4055 for (key, val) in data:
4057 buf.write(baseind * level)
4062 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4063 elif isinstance(data, list):
4074 buf.write(baseind * level)
4078 buf.write(baseind[1:])
4079 _SerializeGenericInfo(buf, item, level + 1)
4081 # This branch should be only taken for strings, but it's practically
4082 # impossible to guarantee that no other types are produced somewhere
4083 buf.write(str(data))
4087 def PrintGenericInfo(data):
4088 """Print information formatted according to the hierarchy.
4090 The output is a valid YAML string.
4092 @param data: the data to print. It's a hierarchical structure whose elements
4094 - dictionaries, where keys are strings and values are of any of the
4096 - lists of pairs (key, value), where key is a string and value is of
4097 any of the types listed here; it's a way to encode ordered
4099 - lists of any of the types listed here
4104 _SerializeGenericInfo(buf, data, 0)
4105 ToStdout(buf.getvalue().rstrip("\n"))