4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
56 "ADD_RESERVED_IPS_OPT",
68 "CLUSTER_DOMAIN_SECRET_OPT",
86 "FILESTORE_DRIVER_OPT",
94 "GLOBAL_SHARED_FILEDIR_OPT",
100 "DEFAULT_IALLOCATOR_OPT",
101 "IDENTIFY_DEFAULTS_OPT",
102 "IGNORE_CONSIST_OPT",
104 "IGNORE_FAILURES_OPT",
105 "IGNORE_OFFLINE_OPT",
106 "IGNORE_REMOVE_FAILURES_OPT",
107 "IGNORE_SECONDARIES_OPT",
111 "MAINTAIN_NODE_HEALTH_OPT",
113 "MASTER_NETMASK_OPT",
115 "MIGRATION_MODE_OPT",
120 "NEW_CLUSTER_CERT_OPT",
121 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
122 "NEW_CONFD_HMAC_KEY_OPT",
125 "NEW_SPICE_CERT_OPT",
127 "NOCONFLICTSCHECK_OPT",
128 "NODE_FORCE_JOIN_OPT",
130 "NODE_PLACEMENT_OPT",
134 "NODRBD_STORAGE_OPT",
140 "NOMODIFY_ETCHOSTS_OPT",
141 "NOMODIFY_SSH_SETUP_OPT",
145 "NORUNTIME_CHGS_OPT",
148 "NOSSH_KEYCHECK_OPT",
162 "PREALLOC_WIPE_DISKS_OPT",
163 "PRIMARY_IP_VERSION_OPT",
169 "REMOVE_INSTANCE_OPT",
170 "REMOVE_RESERVED_IPS_OPT",
176 "SECONDARY_ONLY_OPT",
180 "SHUTDOWN_TIMEOUT_OPT",
182 "SPECS_CPU_COUNT_OPT",
183 "SPECS_DISK_COUNT_OPT",
184 "SPECS_DISK_SIZE_OPT",
185 "SPECS_MEM_SIZE_OPT",
186 "SPECS_NIC_COUNT_OPT",
187 "IPOLICY_DISK_TEMPLATES",
188 "IPOLICY_VCPU_RATIO",
194 "STARTUP_PAUSED_OPT",
203 "USE_EXTERNAL_MIP_SCRIPT",
210 "IGNORE_IPOLICY_OPT",
211 "INSTANCE_POLICY_OPTS",
212 "ALLOW_ARBITPARAMS_OPT",
213 # Generic functions for CLI programs
215 "CreateIPolicyFromOpts",
217 "GenericInstanceCreate",
223 "JobSubmittedException",
225 "RunWhileClusterStopped",
229 # Formatting functions
230 "ToStderr", "ToStdout",
233 "FormatParameterDict",
242 # command line options support infrastructure
243 "ARGS_MANY_INSTANCES",
246 "ARGS_MANY_NETWORKS",
266 "OPT_COMPL_INST_ADD_NODES",
267 "OPT_COMPL_MANY_NODES",
268 "OPT_COMPL_ONE_IALLOCATOR",
269 "OPT_COMPL_ONE_INSTANCE",
270 "OPT_COMPL_ONE_NODE",
271 "OPT_COMPL_ONE_NODEGROUP",
272 "OPT_COMPL_ONE_NETWORK",
274 "OPT_COMPL_ONE_EXTSTORAGE",
279 "COMMON_CREATE_OPTS",
285 #: Priorities (sorted)
287 ("low", constants.OP_PRIO_LOW),
288 ("normal", constants.OP_PRIO_NORMAL),
289 ("high", constants.OP_PRIO_HIGH),
292 #: Priority dictionary for easier lookup
293 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
294 # we migrate to Python 2.6
295 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
297 # Query result status for clients
300 QR_INCOMPLETE) = range(3)
302 #: Maximum batch size for ChooseJob
306 # constants used to create InstancePolicy dictionary
307 TISPECS_GROUP_TYPES = {
308 constants.ISPECS_MIN: constants.VTYPE_INT,
309 constants.ISPECS_MAX: constants.VTYPE_INT,
312 TISPECS_CLUSTER_TYPES = {
313 constants.ISPECS_MIN: constants.VTYPE_INT,
314 constants.ISPECS_MAX: constants.VTYPE_INT,
315 constants.ISPECS_STD: constants.VTYPE_INT,
320 def __init__(self, min=0, max=None): # pylint: disable=W0622
325 return ("<%s min=%s max=%s>" %
326 (self.__class__.__name__, self.min, self.max))
329 class ArgSuggest(_Argument):
330 """Suggesting argument.
332 Value can be any of the ones passed to the constructor.
335 # pylint: disable=W0622
336 def __init__(self, min=0, max=None, choices=None):
337 _Argument.__init__(self, min=min, max=max)
338 self.choices = choices
341 return ("<%s min=%s max=%s choices=%r>" %
342 (self.__class__.__name__, self.min, self.max, self.choices))
345 class ArgChoice(ArgSuggest):
348 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
349 but value must be one of the choices.
354 class ArgUnknown(_Argument):
355 """Unknown argument to program (e.g. determined at runtime).
360 class ArgInstance(_Argument):
361 """Instances argument.
366 class ArgNode(_Argument):
372 class ArgNetwork(_Argument):
377 class ArgGroup(_Argument):
378 """Node group argument.
383 class ArgJobId(_Argument):
389 class ArgFile(_Argument):
390 """File path argument.
395 class ArgCommand(_Argument):
401 class ArgHost(_Argument):
407 class ArgOs(_Argument):
413 class ArgExtStorage(_Argument):
414 """ExtStorage argument.
420 ARGS_MANY_INSTANCES = [ArgInstance()]
421 ARGS_MANY_NETWORKS = [ArgNetwork()]
422 ARGS_MANY_NODES = [ArgNode()]
423 ARGS_MANY_GROUPS = [ArgGroup()]
424 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
425 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
426 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
428 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
429 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
432 def _ExtractTagsObject(opts, args):
433 """Extract the tag type object.
435 Note that this function will modify its args parameter.
438 if not hasattr(opts, "tag_type"):
439 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
441 if kind == constants.TAG_CLUSTER:
443 elif kind in (constants.TAG_NODEGROUP,
445 constants.TAG_NETWORK,
446 constants.TAG_INSTANCE):
448 raise errors.OpPrereqError("no arguments passed to the command")
452 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
456 def _ExtendTags(opts, args):
457 """Extend the args if a source file has been given.
459 This function will extend the tags with the contents of the file
460 passed in the 'tags_source' attribute of the opts parameter. A file
461 named '-' will be replaced by stdin.
464 fname = opts.tags_source
470 new_fh = open(fname, "r")
473 # we don't use the nice 'new_data = [line.strip() for line in fh]'
474 # because of python bug 1633941
476 line = new_fh.readline()
479 new_data.append(line.strip())
482 args.extend(new_data)
485 def ListTags(opts, args):
486 """List the tags on a given object.
488 This is a generic implementation that knows how to deal with all
489 three cases of tag objects (cluster, node, instance). The opts
490 argument is expected to contain a tag_type field denoting what
491 object type we work on.
494 kind, name = _ExtractTagsObject(opts, args)
496 result = cl.QueryTags(kind, name)
497 result = list(result)
503 def AddTags(opts, args):
504 """Add tags on a given object.
506 This is a generic implementation that knows how to deal with all
507 three cases of tag objects (cluster, node, instance). The opts
508 argument is expected to contain a tag_type field denoting what
509 object type we work on.
512 kind, name = _ExtractTagsObject(opts, args)
513 _ExtendTags(opts, args)
515 raise errors.OpPrereqError("No tags to be added")
516 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
517 SubmitOrSend(op, opts)
520 def RemoveTags(opts, args):
521 """Remove tags from a given object.
523 This is a generic implementation that knows how to deal with all
524 three cases of tag objects (cluster, node, instance). The opts
525 argument is expected to contain a tag_type field denoting what
526 object type we work on.
529 kind, name = _ExtractTagsObject(opts, args)
530 _ExtendTags(opts, args)
532 raise errors.OpPrereqError("No tags to be removed")
533 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
534 SubmitOrSend(op, opts)
537 def check_unit(option, opt, value): # pylint: disable=W0613
538 """OptParsers custom converter for units.
542 return utils.ParseUnit(value)
543 except errors.UnitParseError, err:
544 raise OptionValueError("option %s: %s" % (opt, err))
547 def _SplitKeyVal(opt, data):
548 """Convert a KeyVal string into a dict.
550 This function will convert a key=val[,...] string into a dict. Empty
551 values will be converted specially: keys which have the prefix 'no_'
552 will have the value=False and the prefix stripped, the others will
556 @param opt: a string holding the option name for which we process the
557 data, used in building error messages
559 @param data: a string of the format key=val,key=val,...
561 @return: {key=val, key=val}
562 @raises errors.ParameterError: if there are duplicate keys
567 for elem in utils.UnescapeAndSplit(data, sep=","):
569 key, val = elem.split("=", 1)
571 if elem.startswith(NO_PREFIX):
572 key, val = elem[len(NO_PREFIX):], False
573 elif elem.startswith(UN_PREFIX):
574 key, val = elem[len(UN_PREFIX):], None
576 key, val = elem, True
578 raise errors.ParameterError("Duplicate key '%s' in option %s" %
584 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
585 """Custom parser for ident:key=val,key=val options.
587 This will store the parsed values as a tuple (ident, {key: val}). As such,
588 multiple uses of this option via action=append is possible.
592 ident, rest = value, ""
594 ident, rest = value.split(":", 1)
596 if ident.startswith(NO_PREFIX):
598 msg = "Cannot pass options when removing parameter groups: %s" % value
599 raise errors.ParameterError(msg)
600 retval = (ident[len(NO_PREFIX):], False)
601 elif (ident.startswith(UN_PREFIX) and
602 (len(ident) <= len(UN_PREFIX) or
603 not ident[len(UN_PREFIX)][0].isdigit())):
605 msg = "Cannot pass options when removing parameter groups: %s" % value
606 raise errors.ParameterError(msg)
607 retval = (ident[len(UN_PREFIX):], None)
609 kv_dict = _SplitKeyVal(opt, rest)
610 retval = (ident, kv_dict)
614 def check_key_val(option, opt, value): # pylint: disable=W0613
615 """Custom parser class for key=val,key=val options.
617 This will store the parsed values as a dict {key: val}.
620 return _SplitKeyVal(opt, value)
623 def check_bool(option, opt, value): # pylint: disable=W0613
624 """Custom parser for yes/no options.
626 This will store the parsed value as either True or False.
629 value = value.lower()
630 if value == constants.VALUE_FALSE or value == "no":
632 elif value == constants.VALUE_TRUE or value == "yes":
635 raise errors.ParameterError("Invalid boolean value '%s'" % value)
638 def check_list(option, opt, value): # pylint: disable=W0613
639 """Custom parser for comma-separated lists.
642 # we have to make this explicit check since "".split(",") is [""],
643 # not an empty list :(
647 return utils.UnescapeAndSplit(value)
650 def check_maybefloat(option, opt, value): # pylint: disable=W0613
651 """Custom parser for float numbers which might be also defaults.
654 value = value.lower()
656 if value == constants.VALUE_DEFAULT:
662 # completion_suggestion is normally a list. Using numeric values not evaluating
663 # to False for dynamic completion.
664 (OPT_COMPL_MANY_NODES,
666 OPT_COMPL_ONE_INSTANCE,
668 OPT_COMPL_ONE_EXTSTORAGE,
669 OPT_COMPL_ONE_IALLOCATOR,
670 OPT_COMPL_ONE_NETWORK,
671 OPT_COMPL_INST_ADD_NODES,
672 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
674 OPT_COMPL_ALL = frozenset([
675 OPT_COMPL_MANY_NODES,
677 OPT_COMPL_ONE_INSTANCE,
679 OPT_COMPL_ONE_EXTSTORAGE,
680 OPT_COMPL_ONE_IALLOCATOR,
681 OPT_COMPL_ONE_NETWORK,
682 OPT_COMPL_INST_ADD_NODES,
683 OPT_COMPL_ONE_NODEGROUP,
687 class CliOption(Option):
688 """Custom option class for optparse.
691 ATTRS = Option.ATTRS + [
692 "completion_suggest",
694 TYPES = Option.TYPES + (
702 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
703 TYPE_CHECKER["identkeyval"] = check_ident_key_val
704 TYPE_CHECKER["keyval"] = check_key_val
705 TYPE_CHECKER["unit"] = check_unit
706 TYPE_CHECKER["bool"] = check_bool
707 TYPE_CHECKER["list"] = check_list
708 TYPE_CHECKER["maybefloat"] = check_maybefloat
711 # optparse.py sets make_option, so we do it for our own option class, too
712 cli_option = CliOption
717 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
718 help="Increase debugging level")
720 NOHDR_OPT = cli_option("--no-headers", default=False,
721 action="store_true", dest="no_headers",
722 help="Don't display column headers")
724 SEP_OPT = cli_option("--separator", default=None,
725 action="store", dest="separator",
726 help=("Separator between output fields"
727 " (defaults to one space)"))
729 USEUNITS_OPT = cli_option("--units", default=None,
730 dest="units", choices=("h", "m", "g", "t"),
731 help="Specify units for output (one of h/m/g/t)")
733 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
734 type="string", metavar="FIELDS",
735 help="Comma separated list of output fields")
737 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
738 default=False, help="Force the operation")
740 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
741 default=False, help="Do not require confirmation")
743 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
744 action="store_true", default=False,
745 help=("Ignore offline nodes and do as much"
748 TAG_ADD_OPT = cli_option("--tags", dest="tags",
749 default=None, help="Comma-separated list of instance"
752 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
753 default=None, help="File with tag names")
755 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
756 default=False, action="store_true",
757 help=("Submit the job and return the job ID, but"
758 " don't wait for the job to finish"))
760 SYNC_OPT = cli_option("--sync", dest="do_locking",
761 default=False, action="store_true",
762 help=("Grab locks while doing the queries"
763 " in order to ensure more consistent results"))
765 DRY_RUN_OPT = cli_option("--dry-run", default=False,
767 help=("Do not execute the operation, just run the"
768 " check steps and verify it it could be"
771 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
773 help="Increase the verbosity of the operation")
775 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
776 action="store_true", dest="simulate_errors",
777 help="Debugging option that makes the operation"
778 " treat most runtime checks as failed")
780 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
781 default=True, action="store_false",
782 help="Don't wait for sync (DANGEROUS!)")
784 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
785 action="store_true", default=False,
786 help="Enable offline instance")
788 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
789 action="store_true", default=False,
790 help="Disable down instance")
792 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
793 help=("Custom disk setup (%s)" %
794 utils.CommaJoin(constants.DISK_TEMPLATES)),
795 default=None, metavar="TEMPL",
796 choices=list(constants.DISK_TEMPLATES))
798 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
799 help="Do not create any network cards for"
802 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
803 help="Relative path under default cluster-wide"
804 " file storage dir to store file-based disks",
805 default=None, metavar="<DIR>")
807 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
808 help="Driver to use for image files",
809 default="loop", metavar="<DRIVER>",
810 choices=list(constants.FILE_DRIVER))
812 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
813 help="Select nodes for the instance automatically"
814 " using the <NAME> iallocator plugin",
815 default=None, type="string",
816 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
818 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
820 help="Set the default instance allocator plugin",
821 default=None, type="string",
822 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
824 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
826 completion_suggest=OPT_COMPL_ONE_OS)
828 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
829 type="keyval", default={},
830 help="OS parameters")
832 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
833 action="store_true", default=False,
834 help="Force an unknown variant")
836 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
837 action="store_true", default=False,
838 help="Do not install the OS (will"
841 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
842 dest="allow_runtime_chgs",
843 default=True, action="store_false",
844 help="Don't allow runtime changes")
846 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
847 type="keyval", default={},
848 help="Backend parameters")
850 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
851 default={}, dest="hvparams",
852 help="Hypervisor parameters")
854 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
855 help="Disk template parameters, in the format"
856 " template:option=value,option=value,...",
857 type="identkeyval", action="append", default=[])
859 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
860 type="keyval", default={},
861 help="Memory size specs: list of key=value,"
862 " where key is one of min, max, std"
863 " (in MB or using a unit)")
865 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
866 type="keyval", default={},
867 help="CPU count specs: list of key=value,"
868 " where key is one of min, max, std")
870 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
871 dest="ispecs_disk_count",
872 type="keyval", default={},
873 help="Disk count specs: list of key=value,"
874 " where key is one of min, max, std")
876 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
877 type="keyval", default={},
878 help="Disk size specs: list of key=value,"
879 " where key is one of min, max, std"
880 " (in MB or using a unit)")
882 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
883 type="keyval", default={},
884 help="NIC count specs: list of key=value,"
885 " where key is one of min, max, std")
887 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
888 dest="ipolicy_disk_templates",
889 type="list", default=None,
890 help="Comma-separated list of"
891 " enabled disk templates")
893 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
894 dest="ipolicy_vcpu_ratio",
895 type="maybefloat", default=None,
896 help="The maximum allowed vcpu-to-cpu ratio")
898 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
899 dest="ipolicy_spindle_ratio",
900 type="maybefloat", default=None,
901 help=("The maximum allowed instances to"
904 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
905 help="Hypervisor and hypervisor options, in the"
906 " format hypervisor:option=value,option=value,...",
907 default=None, type="identkeyval")
909 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
910 help="Hypervisor and hypervisor options, in the"
911 " format hypervisor:option=value,option=value,...",
912 default=[], action="append", type="identkeyval")
914 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
915 action="store_false",
916 help="Don't check that the instance's IP"
919 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
920 default=True, action="store_false",
921 help="Don't check that the instance's name"
924 NET_OPT = cli_option("--net",
925 help="NIC parameters", default=[],
926 dest="nics", action="append", type="identkeyval")
928 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
929 dest="disks", action="append", type="identkeyval")
931 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
932 help="Comma-separated list of disks"
933 " indices to act on (e.g. 0,2) (optional,"
934 " defaults to all disks)")
936 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
937 help="Enforces a single-disk configuration using the"
938 " given disk size, in MiB unless a suffix is used",
939 default=None, type="unit", metavar="<size>")
941 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
942 dest="ignore_consistency",
943 action="store_true", default=False,
944 help="Ignore the consistency of the disks on"
947 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
948 dest="allow_failover",
949 action="store_true", default=False,
950 help="If migration is not possible fallback to"
953 NONLIVE_OPT = cli_option("--non-live", dest="live",
954 default=True, action="store_false",
955 help="Do a non-live migration (this usually means"
956 " freeze the instance, save the state, transfer and"
957 " only then resume running on the secondary node)")
959 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
961 choices=list(constants.HT_MIGRATION_MODES),
962 help="Override default migration mode (choose"
963 " either live or non-live")
965 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
966 help="Target node and optional secondary node",
967 metavar="<pnode>[:<snode>]",
968 completion_suggest=OPT_COMPL_INST_ADD_NODES)
970 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
971 action="append", metavar="<node>",
972 help="Use only this node (can be used multiple"
973 " times, if not given defaults to all nodes)",
974 completion_suggest=OPT_COMPL_ONE_NODE)
976 NODEGROUP_OPT_NAME = "--node-group"
977 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
979 help="Node group (name or uuid)",
980 metavar="<nodegroup>",
981 default=None, type="string",
982 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
984 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
986 completion_suggest=OPT_COMPL_ONE_NODE)
988 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
989 action="store_false",
990 help="Don't start the instance after creation")
992 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
993 action="store_true", default=False,
994 help="Show command instead of executing it")
996 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
997 default=False, action="store_true",
998 help="Instead of performing the migration, try to"
999 " recover from a failed cleanup. This is safe"
1000 " to run even if the instance is healthy, but it"
1001 " will create extra replication traffic and "
1002 " disrupt briefly the replication (like during the"
1005 STATIC_OPT = cli_option("-s", "--static", dest="static",
1006 action="store_true", default=False,
1007 help="Only show configuration data, not runtime data")
1009 ALL_OPT = cli_option("--all", dest="show_all",
1010 default=False, action="store_true",
1011 help="Show info on all instances on the cluster."
1012 " This can take a long time to run, use wisely")
1014 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1015 action="store_true", default=False,
1016 help="Interactive OS reinstall, lists available"
1017 " OS templates for selection")
1019 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1020 action="store_true", default=False,
1021 help="Remove the instance from the cluster"
1022 " configuration even if there are failures"
1023 " during the removal process")
1025 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1026 dest="ignore_remove_failures",
1027 action="store_true", default=False,
1028 help="Remove the instance from the"
1029 " cluster configuration even if there"
1030 " are failures during the removal"
1033 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1034 action="store_true", default=False,
1035 help="Remove the instance from the cluster")
1037 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1038 help="Specifies the new node for the instance",
1039 metavar="NODE", default=None,
1040 completion_suggest=OPT_COMPL_ONE_NODE)
1042 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1043 help="Specifies the new secondary node",
1044 metavar="NODE", default=None,
1045 completion_suggest=OPT_COMPL_ONE_NODE)
1047 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1048 default=False, action="store_true",
1049 help="Replace the disk(s) on the primary"
1050 " node (applies only to internally mirrored"
1051 " disk templates, e.g. %s)" %
1052 utils.CommaJoin(constants.DTS_INT_MIRROR))
1054 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1055 default=False, action="store_true",
1056 help="Replace the disk(s) on the secondary"
1057 " node (applies only to internally mirrored"
1058 " disk templates, e.g. %s)" %
1059 utils.CommaJoin(constants.DTS_INT_MIRROR))
1061 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1062 default=False, action="store_true",
1063 help="Lock all nodes and auto-promote as needed"
1066 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1067 default=False, action="store_true",
1068 help="Automatically replace faulty disks"
1069 " (applies only to internally mirrored"
1070 " disk templates, e.g. %s)" %
1071 utils.CommaJoin(constants.DTS_INT_MIRROR))
1073 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1074 default=False, action="store_true",
1075 help="Ignore current recorded size"
1076 " (useful for forcing activation when"
1077 " the recorded size is wrong)")
1079 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1081 completion_suggest=OPT_COMPL_ONE_NODE)
1083 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1086 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1087 help="Specify the secondary ip for the node",
1088 metavar="ADDRESS", default=None)
1090 READD_OPT = cli_option("--readd", dest="readd",
1091 default=False, action="store_true",
1092 help="Readd old node after replacing it")
1094 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1095 default=True, action="store_false",
1096 help="Disable SSH key fingerprint checking")
1098 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1099 default=False, action="store_true",
1100 help="Force the joining of a node")
1102 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1103 type="bool", default=None, metavar=_YORNO,
1104 help="Set the master_candidate flag on the node")
1106 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1107 type="bool", default=None,
1108 help=("Set the offline flag on the node"
1109 " (cluster does not communicate with offline"
1112 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1113 type="bool", default=None,
1114 help=("Set the drained flag on the node"
1115 " (excluded from allocation operations)"))
1117 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1118 type="bool", default=None, metavar=_YORNO,
1119 help="Set the master_capable flag on the node")
1121 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1122 type="bool", default=None, metavar=_YORNO,
1123 help="Set the vm_capable flag on the node")
1125 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1126 type="bool", default=None, metavar=_YORNO,
1127 help="Set the allocatable flag on a volume")
1129 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1130 help="Disable support for lvm based instances"
1132 action="store_false", default=True)
1134 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1135 dest="enabled_hypervisors",
1136 help="Comma-separated list of hypervisors",
1137 type="string", default=None)
1139 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1140 type="keyval", default={},
1141 help="NIC parameters")
1143 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1144 dest="candidate_pool_size", type="int",
1145 help="Set the candidate pool size")
1147 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1148 help=("Enables LVM and specifies the volume group"
1149 " name (cluster-wide) for disk allocation"
1150 " [%s]" % constants.DEFAULT_VG),
1151 metavar="VG", default=None)
1153 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1154 help="Destroy cluster", action="store_true")
1156 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1157 help="Skip node agreement check (dangerous)",
1158 action="store_true", default=False)
1160 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1161 help="Specify the mac prefix for the instance IP"
1162 " addresses, in the format XX:XX:XX",
1166 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1167 help="Specify the node interface (cluster-wide)"
1168 " on which the master IP address will be added"
1169 " (cluster init default: %s)" %
1170 constants.DEFAULT_BRIDGE,
1174 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1175 help="Specify the netmask of the master IP",
1179 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1180 dest="use_external_mip_script",
1181 help="Specify whether to run a user-provided"
1182 " script for the master IP address turnup and"
1183 " turndown operations",
1184 type="bool", metavar=_YORNO, default=None)
1186 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1187 help="Specify the default directory (cluster-"
1188 "wide) for storing the file-based disks [%s]" %
1189 constants.DEFAULT_FILE_STORAGE_DIR,
1191 default=constants.DEFAULT_FILE_STORAGE_DIR)
1193 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1194 dest="shared_file_storage_dir",
1195 help="Specify the default directory (cluster-"
1196 "wide) for storing the shared file-based"
1198 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1199 metavar="SHAREDDIR",
1200 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1202 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1203 help="Don't modify /etc/hosts",
1204 action="store_false", default=True)
1206 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1207 help="Don't initialize SSH keys",
1208 action="store_false", default=True)
1210 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1211 help="Enable parseable error messages",
1212 action="store_true", default=False)
1214 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1215 help="Skip N+1 memory redundancy tests",
1216 action="store_true", default=False)
1218 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1219 help="Type of reboot: soft/hard/full",
1220 default=constants.INSTANCE_REBOOT_HARD,
1222 choices=list(constants.REBOOT_TYPES))
1224 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1225 dest="ignore_secondaries",
1226 default=False, action="store_true",
1227 help="Ignore errors from secondaries")
1229 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1230 action="store_false", default=True,
1231 help="Don't shutdown the instance (unsafe)")
1233 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1234 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1235 help="Maximum time to wait")
1237 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1238 dest="shutdown_timeout", type="int",
1239 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1240 help="Maximum time to wait for instance shutdown")
1242 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1244 help=("Number of seconds between repetions of the"
1247 EARLY_RELEASE_OPT = cli_option("--early-release",
1248 dest="early_release", default=False,
1249 action="store_true",
1250 help="Release the locks on the secondary"
1253 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1254 dest="new_cluster_cert",
1255 default=False, action="store_true",
1256 help="Generate a new cluster certificate")
1258 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1260 help="File containing new RAPI certificate")
1262 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1263 default=None, action="store_true",
1264 help=("Generate a new self-signed RAPI"
1267 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1269 help="File containing new SPICE certificate")
1271 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1273 help="File containing the certificate of the CA"
1274 " which signed the SPICE certificate")
1276 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1277 dest="new_spice_cert", default=None,
1278 action="store_true",
1279 help=("Generate a new self-signed SPICE"
1282 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1283 dest="new_confd_hmac_key",
1284 default=False, action="store_true",
1285 help=("Create a new HMAC key for %s" %
1288 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1289 dest="cluster_domain_secret",
1291 help=("Load new new cluster domain"
1292 " secret from file"))
1294 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1295 dest="new_cluster_domain_secret",
1296 default=False, action="store_true",
1297 help=("Create a new cluster domain"
1300 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1301 dest="use_replication_network",
1302 help="Whether to use the replication network"
1303 " for talking to the nodes",
1304 action="store_true", default=False)
1306 MAINTAIN_NODE_HEALTH_OPT = \
1307 cli_option("--maintain-node-health", dest="maintain_node_health",
1308 metavar=_YORNO, default=None, type="bool",
1309 help="Configure the cluster to automatically maintain node"
1310 " health, by shutting down unknown instances, shutting down"
1311 " unknown DRBD devices, etc.")
1313 IDENTIFY_DEFAULTS_OPT = \
1314 cli_option("--identify-defaults", dest="identify_defaults",
1315 default=False, action="store_true",
1316 help="Identify which saved instance parameters are equal to"
1317 " the current cluster defaults and set them as such, instead"
1318 " of marking them as overridden")
1320 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1321 action="store", dest="uid_pool",
1322 help=("A list of user-ids or user-id"
1323 " ranges separated by commas"))
1325 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1326 action="store", dest="add_uids",
1327 help=("A list of user-ids or user-id"
1328 " ranges separated by commas, to be"
1329 " added to the user-id pool"))
1331 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1332 action="store", dest="remove_uids",
1333 help=("A list of user-ids or user-id"
1334 " ranges separated by commas, to be"
1335 " removed from the user-id pool"))
1337 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1338 action="store", dest="reserved_lvs",
1339 help=("A comma-separated list of reserved"
1340 " logical volumes names, that will be"
1341 " ignored by cluster verify"))
1343 ROMAN_OPT = cli_option("--roman",
1344 dest="roman_integers", default=False,
1345 action="store_true",
1346 help="Use roman numbers for positive integers")
1348 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1349 action="store", default=None,
1350 help="Specifies usermode helper for DRBD")
1352 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1353 action="store_false", default=True,
1354 help="Disable support for DRBD")
1356 PRIMARY_IP_VERSION_OPT = \
1357 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1358 action="store", dest="primary_ip_version",
1359 metavar="%d|%d" % (constants.IP4_VERSION,
1360 constants.IP6_VERSION),
1361 help="Cluster-wide IP version for primary IP")
1363 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1364 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1365 choices=_PRIONAME_TO_VALUE.keys(),
1366 help="Priority for opcode processing")
1368 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1369 type="bool", default=None, metavar=_YORNO,
1370 help="Sets the hidden flag on the OS")
1372 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1373 type="bool", default=None, metavar=_YORNO,
1374 help="Sets the blacklisted flag on the OS")
1376 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1377 type="bool", metavar=_YORNO,
1378 dest="prealloc_wipe_disks",
1379 help=("Wipe disks prior to instance"
1382 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1383 type="keyval", default=None,
1384 help="Node parameters")
1386 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1387 action="store", metavar="POLICY", default=None,
1388 help="Allocation policy for the node group")
1390 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1391 type="bool", metavar=_YORNO,
1392 dest="node_powered",
1393 help="Specify if the SoR for node is powered")
1395 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1396 default=constants.OOB_TIMEOUT,
1397 help="Maximum time to wait for out-of-band helper")
1399 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1400 default=constants.OOB_POWER_DELAY,
1401 help="Time in seconds to wait between power-ons")
1403 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1404 action="store_true", default=False,
1405 help=("Whether command argument should be treated"
1408 NO_REMEMBER_OPT = cli_option("--no-remember",
1410 action="store_true", default=False,
1411 help="Perform but do not record the change"
1412 " in the configuration")
1414 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1415 default=False, action="store_true",
1416 help="Evacuate primary instances only")
1418 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1419 default=False, action="store_true",
1420 help="Evacuate secondary instances only"
1421 " (applies only to internally mirrored"
1422 " disk templates, e.g. %s)" %
1423 utils.CommaJoin(constants.DTS_INT_MIRROR))
1425 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1426 action="store_true", default=False,
1427 help="Pause instance at startup")
1429 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1430 help="Destination node group (name or uuid)",
1431 default=None, action="append",
1432 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1434 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1435 action="append", dest="ignore_errors",
1436 choices=list(constants.CV_ALL_ECODES_STRINGS),
1437 help="Error code to be ignored")
1439 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1441 help=("Specify disk state information in the"
1443 " storage_type/identifier:option=value,...;"
1444 " note this is unused for now"),
1447 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1449 help=("Specify hypervisor state information in the"
1450 " format hypervisor:option=value,...;"
1451 " note this is unused for now"),
1454 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1455 action="store_true", default=False,
1456 help="Ignore instance policy violations")
1458 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1459 help="Sets the instance's runtime memory,"
1460 " ballooning it up or down to the new value",
1461 default=None, type="unit", metavar="<size>")
1463 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1464 action="store_true", default=False,
1465 help="Marks the grow as absolute instead of the"
1466 " (default) relative mode")
1468 NETWORK_OPT = cli_option("--network",
1469 action="store", default=None, dest="network",
1470 help="IP network in CIDR notation")
1472 GATEWAY_OPT = cli_option("--gateway",
1473 action="store", default=None, dest="gateway",
1474 help="IP address of the router (gateway)")
1476 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1477 action="store", default=None,
1478 dest="add_reserved_ips",
1479 help="Comma-separated list of"
1480 " reserved IPs to add")
1482 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1483 action="store", default=None,
1484 dest="remove_reserved_ips",
1485 help="Comma-delimited list of"
1486 " reserved IPs to remove")
1488 NETWORK_TYPE_OPT = cli_option("--network-type",
1489 action="store", default=None, dest="network_type",
1490 help="Network type: private, public, None")
1492 NETWORK6_OPT = cli_option("--network6",
1493 action="store", default=None, dest="network6",
1494 help="IP network in CIDR notation")
1496 GATEWAY6_OPT = cli_option("--gateway6",
1497 action="store", default=None, dest="gateway6",
1498 help="IP6 address of the router (gateway)")
1500 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1501 dest="conflicts_check",
1503 action="store_false",
1504 help="Don't check for conflicting IPs")
1506 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1507 action="store_true", default=False,
1508 help="Enable disk/nic hotplug")
1510 ALLOW_ARBITPARAMS_OPT = cli_option("--allow-arbit-params",
1511 dest="allow_arbit_params",
1512 action="store_true", default=None,
1513 help="Allow arbitrary parameters"
1514 " to be passed to --disk(s)"
1515 " option (used by ExtStorage)")
1517 #: Options provided by all commands
1518 COMMON_OPTS = [DEBUG_OPT]
1520 # common options for creating instances. add and import then add their own
1522 COMMON_CREATE_OPTS = [
1527 FILESTORE_DRIVER_OPT,
1533 NOCONFLICTSCHECK_OPT,
1545 # common instance policy options
1546 INSTANCE_POLICY_OPTS = [
1547 SPECS_CPU_COUNT_OPT,
1548 SPECS_DISK_COUNT_OPT,
1549 SPECS_DISK_SIZE_OPT,
1551 SPECS_NIC_COUNT_OPT,
1552 IPOLICY_DISK_TEMPLATES,
1554 IPOLICY_SPINDLE_RATIO,
1558 def _ParseArgs(argv, commands, aliases, env_override):
1559 """Parser for the command line arguments.
1561 This function parses the arguments and returns the function which
1562 must be executed together with its (modified) arguments.
1564 @param argv: the command line
1565 @param commands: dictionary with special contents, see the design
1566 doc for cmdline handling
1567 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1568 @param env_override: list of env variables allowed for default args
1571 assert not (env_override - set(commands))
1574 binary = "<command>"
1576 binary = argv[0].split("/")[-1]
1578 if len(argv) > 1 and argv[1] == "--version":
1579 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1580 constants.RELEASE_VERSION)
1581 # Quit right away. That way we don't have to care about this special
1582 # argument. optparse.py does it the same.
1585 if len(argv) < 2 or not (argv[1] in commands or
1586 argv[1] in aliases):
1587 # let's do a nice thing
1588 sortedcmds = commands.keys()
1591 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1592 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1595 # compute the max line length for cmd + usage
1596 mlen = max([len(" %s" % cmd) for cmd in commands])
1597 mlen = min(60, mlen) # should not get here...
1599 # and format a nice command list
1600 ToStdout("Commands:")
1601 for cmd in sortedcmds:
1602 cmdstr = " %s" % (cmd,)
1603 help_text = commands[cmd][4]
1604 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1605 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1606 for line in help_lines:
1607 ToStdout("%-*s %s", mlen, "", line)
1611 return None, None, None
1613 # get command, unalias it, and look it up in commands
1617 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1620 if aliases[cmd] not in commands:
1621 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1622 " command '%s'" % (cmd, aliases[cmd]))
1626 if cmd in env_override:
1627 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1628 env_args = os.environ.get(args_env_name)
1630 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1632 func, args_def, parser_opts, usage, description = commands[cmd]
1633 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1634 description=description,
1635 formatter=TitledHelpFormatter(),
1636 usage="%%prog %s %s" % (cmd, usage))
1637 parser.disable_interspersed_args()
1638 options, args = parser.parse_args(args=argv[1:])
1640 if not _CheckArguments(cmd, args_def, args):
1641 return None, None, None
1643 return func, options, args
1646 def _CheckArguments(cmd, args_def, args):
1647 """Verifies the arguments using the argument definition.
1651 1. Abort with error if values specified by user but none expected.
1653 1. For each argument in definition
1655 1. Keep running count of minimum number of values (min_count)
1656 1. Keep running count of maximum number of values (max_count)
1657 1. If it has an unlimited number of values
1659 1. Abort with error if it's not the last argument in the definition
1661 1. If last argument has limited number of values
1663 1. Abort with error if number of values doesn't match or is too large
1665 1. Abort with error if user didn't pass enough values (min_count)
1668 if args and not args_def:
1669 ToStderr("Error: Command %s expects no arguments", cmd)
1676 last_idx = len(args_def) - 1
1678 for idx, arg in enumerate(args_def):
1679 if min_count is None:
1681 elif arg.min is not None:
1682 min_count += arg.min
1684 if max_count is None:
1686 elif arg.max is not None:
1687 max_count += arg.max
1690 check_max = (arg.max is not None)
1692 elif arg.max is None:
1693 raise errors.ProgrammerError("Only the last argument can have max=None")
1696 # Command with exact number of arguments
1697 if (min_count is not None and max_count is not None and
1698 min_count == max_count and len(args) != min_count):
1699 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1702 # Command with limited number of arguments
1703 if max_count is not None and len(args) > max_count:
1704 ToStderr("Error: Command %s expects only %d argument(s)",
1708 # Command with some required arguments
1709 if min_count is not None and len(args) < min_count:
1710 ToStderr("Error: Command %s expects at least %d argument(s)",
1717 def SplitNodeOption(value):
1718 """Splits the value of a --node option.
1721 if value and ":" in value:
1722 return value.split(":", 1)
1724 return (value, None)
1727 def CalculateOSNames(os_name, os_variants):
1728 """Calculates all the names an OS can be called, according to its variants.
1730 @type os_name: string
1731 @param os_name: base name of the os
1732 @type os_variants: list or None
1733 @param os_variants: list of supported variants
1735 @return: list of valid names
1739 return ["%s+%s" % (os_name, v) for v in os_variants]
1744 def ParseFields(selected, default):
1745 """Parses the values of "--field"-like options.
1747 @type selected: string or None
1748 @param selected: User-selected options
1750 @param default: Default fields
1753 if selected is None:
1756 if selected.startswith("+"):
1757 return default + selected[1:].split(",")
1759 return selected.split(",")
1762 UsesRPC = rpc.RunWithRPC
1765 def AskUser(text, choices=None):
1766 """Ask the user a question.
1768 @param text: the question to ask
1770 @param choices: list with elements tuples (input_char, return_value,
1771 description); if not given, it will default to: [('y', True,
1772 'Perform the operation'), ('n', False, 'Do no do the operation')];
1773 note that the '?' char is reserved for help
1775 @return: one of the return values from the choices list; if input is
1776 not possible (i.e. not running with a tty, we return the last
1781 choices = [("y", True, "Perform the operation"),
1782 ("n", False, "Do not perform the operation")]
1783 if not choices or not isinstance(choices, list):
1784 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1785 for entry in choices:
1786 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1787 raise errors.ProgrammerError("Invalid choices element to AskUser")
1789 answer = choices[-1][1]
1791 for line in text.splitlines():
1792 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1793 text = "\n".join(new_text)
1795 f = file("/dev/tty", "a+")
1799 chars = [entry[0] for entry in choices]
1800 chars[-1] = "[%s]" % chars[-1]
1802 maps = dict([(entry[0], entry[1]) for entry in choices])
1806 f.write("/".join(chars))
1808 line = f.readline(2).strip().lower()
1813 for entry in choices:
1814 f.write(" %s - %s\n" % (entry[0], entry[2]))
1822 class JobSubmittedException(Exception):
1823 """Job was submitted, client should exit.
1825 This exception has one argument, the ID of the job that was
1826 submitted. The handler should print this ID.
1828 This is not an error, just a structured way to exit from clients.
1833 def SendJob(ops, cl=None):
1834 """Function to submit an opcode without waiting for the results.
1837 @param ops: list of opcodes
1838 @type cl: luxi.Client
1839 @param cl: the luxi client to use for communicating with the master;
1840 if None, a new client will be created
1846 job_id = cl.SubmitJob(ops)
1851 def GenericPollJob(job_id, cbs, report_cbs):
1852 """Generic job-polling function.
1854 @type job_id: number
1855 @param job_id: Job ID
1856 @type cbs: Instance of L{JobPollCbBase}
1857 @param cbs: Data callbacks
1858 @type report_cbs: Instance of L{JobPollReportCbBase}
1859 @param report_cbs: Reporting callbacks
1862 prev_job_info = None
1863 prev_logmsg_serial = None
1868 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1871 # job not found, go away!
1872 raise errors.JobLost("Job with id %s lost" % job_id)
1874 if result == constants.JOB_NOTCHANGED:
1875 report_cbs.ReportNotChanged(job_id, status)
1880 # Split result, a tuple of (field values, log entries)
1881 (job_info, log_entries) = result
1882 (status, ) = job_info
1885 for log_entry in log_entries:
1886 (serial, timestamp, log_type, message) = log_entry
1887 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1889 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1891 # TODO: Handle canceled and archived jobs
1892 elif status in (constants.JOB_STATUS_SUCCESS,
1893 constants.JOB_STATUS_ERROR,
1894 constants.JOB_STATUS_CANCELING,
1895 constants.JOB_STATUS_CANCELED):
1898 prev_job_info = job_info
1900 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1902 raise errors.JobLost("Job with id %s lost" % job_id)
1904 status, opstatus, result = jobs[0]
1906 if status == constants.JOB_STATUS_SUCCESS:
1909 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1910 raise errors.OpExecError("Job was canceled")
1913 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1914 if status == constants.OP_STATUS_SUCCESS:
1916 elif status == constants.OP_STATUS_ERROR:
1917 errors.MaybeRaise(msg)
1920 raise errors.OpExecError("partial failure (opcode %d): %s" %
1923 raise errors.OpExecError(str(msg))
1925 # default failure mode
1926 raise errors.OpExecError(result)
1929 class JobPollCbBase:
1930 """Base class for L{GenericPollJob} callbacks.
1934 """Initializes this class.
1938 def WaitForJobChangeOnce(self, job_id, fields,
1939 prev_job_info, prev_log_serial):
1940 """Waits for changes on a job.
1943 raise NotImplementedError()
1945 def QueryJobs(self, job_ids, fields):
1946 """Returns the selected fields for the selected job IDs.
1948 @type job_ids: list of numbers
1949 @param job_ids: Job IDs
1950 @type fields: list of strings
1951 @param fields: Fields
1954 raise NotImplementedError()
1957 class JobPollReportCbBase:
1958 """Base class for L{GenericPollJob} reporting callbacks.
1962 """Initializes this class.
1966 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1967 """Handles a log message.
1970 raise NotImplementedError()
1972 def ReportNotChanged(self, job_id, status):
1973 """Called for if a job hasn't changed in a while.
1975 @type job_id: number
1976 @param job_id: Job ID
1977 @type status: string or None
1978 @param status: Job status if available
1981 raise NotImplementedError()
1984 class _LuxiJobPollCb(JobPollCbBase):
1985 def __init__(self, cl):
1986 """Initializes this class.
1989 JobPollCbBase.__init__(self)
1992 def WaitForJobChangeOnce(self, job_id, fields,
1993 prev_job_info, prev_log_serial):
1994 """Waits for changes on a job.
1997 return self.cl.WaitForJobChangeOnce(job_id, fields,
1998 prev_job_info, prev_log_serial)
2000 def QueryJobs(self, job_ids, fields):
2001 """Returns the selected fields for the selected job IDs.
2004 return self.cl.QueryJobs(job_ids, fields)
2007 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2008 def __init__(self, feedback_fn):
2009 """Initializes this class.
2012 JobPollReportCbBase.__init__(self)
2014 self.feedback_fn = feedback_fn
2016 assert callable(feedback_fn)
2018 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2019 """Handles a log message.
2022 self.feedback_fn((timestamp, log_type, log_msg))
2024 def ReportNotChanged(self, job_id, status):
2025 """Called if a job hasn't changed in a while.
2031 class StdioJobPollReportCb(JobPollReportCbBase):
2033 """Initializes this class.
2036 JobPollReportCbBase.__init__(self)
2038 self.notified_queued = False
2039 self.notified_waitlock = False
2041 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2042 """Handles a log message.
2045 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2046 FormatLogMessage(log_type, log_msg))
2048 def ReportNotChanged(self, job_id, status):
2049 """Called if a job hasn't changed in a while.
2055 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2056 ToStderr("Job %s is waiting in queue", job_id)
2057 self.notified_queued = True
2059 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2060 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2061 self.notified_waitlock = True
2064 def FormatLogMessage(log_type, log_msg):
2065 """Formats a job message according to its type.
2068 if log_type != constants.ELOG_MESSAGE:
2069 log_msg = str(log_msg)
2071 return utils.SafeEncode(log_msg)
2074 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2075 """Function to poll for the result of a job.
2077 @type job_id: job identified
2078 @param job_id: the job to poll for results
2079 @type cl: luxi.Client
2080 @param cl: the luxi client to use for communicating with the master;
2081 if None, a new client will be created
2087 if reporter is None:
2089 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2091 reporter = StdioJobPollReportCb()
2093 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2095 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2098 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2099 """Legacy function to submit an opcode.
2101 This is just a simple wrapper over the construction of the processor
2102 instance. It should be extended to better handle feedback and
2103 interaction functions.
2109 SetGenericOpcodeOpts([op], opts)
2111 job_id = SendJob([op], cl=cl)
2113 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2116 return op_results[0]
2119 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2120 """Wrapper around SubmitOpCode or SendJob.
2122 This function will decide, based on the 'opts' parameter, whether to
2123 submit and wait for the result of the opcode (and return it), or
2124 whether to just send the job and print its identifier. It is used in
2125 order to simplify the implementation of the '--submit' option.
2127 It will also process the opcodes if we're sending the via SendJob
2128 (otherwise SubmitOpCode does it).
2131 if opts and opts.submit_only:
2133 SetGenericOpcodeOpts(job, opts)
2134 job_id = SendJob(job, cl=cl)
2135 raise JobSubmittedException(job_id)
2137 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2140 def SetGenericOpcodeOpts(opcode_list, options):
2141 """Processor for generic options.
2143 This function updates the given opcodes based on generic command
2144 line options (like debug, dry-run, etc.).
2146 @param opcode_list: list of opcodes
2147 @param options: command line options or None
2148 @return: None (in-place modification)
2153 for op in opcode_list:
2154 op.debug_level = options.debug
2155 if hasattr(options, "dry_run"):
2156 op.dry_run = options.dry_run
2157 if getattr(options, "priority", None) is not None:
2158 op.priority = _PRIONAME_TO_VALUE[options.priority]
2162 # TODO: Cache object?
2164 client = luxi.Client()
2165 except luxi.NoMasterError:
2166 ss = ssconf.SimpleStore()
2168 # Try to read ssconf file
2171 except errors.ConfigurationError:
2172 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2173 " not part of a cluster")
2175 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2176 if master != myself:
2177 raise errors.OpPrereqError("This is not the master node, please connect"
2178 " to node '%s' and rerun the command" %
2184 def FormatError(err):
2185 """Return a formatted error message for a given error.
2187 This function takes an exception instance and returns a tuple
2188 consisting of two values: first, the recommended exit code, and
2189 second, a string describing the error message (not
2190 newline-terminated).
2196 if isinstance(err, errors.ConfigurationError):
2197 txt = "Corrupt configuration file: %s" % msg
2199 obuf.write(txt + "\n")
2200 obuf.write("Aborting.")
2202 elif isinstance(err, errors.HooksAbort):
2203 obuf.write("Failure: hooks execution failed:\n")
2204 for node, script, out in err.args[0]:
2206 obuf.write(" node: %s, script: %s, output: %s\n" %
2207 (node, script, out))
2209 obuf.write(" node: %s, script: %s (no output)\n" %
2211 elif isinstance(err, errors.HooksFailure):
2212 obuf.write("Failure: hooks general failure: %s" % msg)
2213 elif isinstance(err, errors.ResolverError):
2214 this_host = netutils.Hostname.GetSysName()
2215 if err.args[0] == this_host:
2216 msg = "Failure: can't resolve my own hostname ('%s')"
2218 msg = "Failure: can't resolve hostname '%s'"
2219 obuf.write(msg % err.args[0])
2220 elif isinstance(err, errors.OpPrereqError):
2221 if len(err.args) == 2:
2222 obuf.write("Failure: prerequisites not met for this"
2223 " operation:\nerror type: %s, error details:\n%s" %
2224 (err.args[1], err.args[0]))
2226 obuf.write("Failure: prerequisites not met for this"
2227 " operation:\n%s" % msg)
2228 elif isinstance(err, errors.OpExecError):
2229 obuf.write("Failure: command execution error:\n%s" % msg)
2230 elif isinstance(err, errors.TagError):
2231 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2232 elif isinstance(err, errors.JobQueueDrainError):
2233 obuf.write("Failure: the job queue is marked for drain and doesn't"
2234 " accept new requests\n")
2235 elif isinstance(err, errors.JobQueueFull):
2236 obuf.write("Failure: the job queue is full and doesn't accept new"
2237 " job submissions until old jobs are archived\n")
2238 elif isinstance(err, errors.TypeEnforcementError):
2239 obuf.write("Parameter Error: %s" % msg)
2240 elif isinstance(err, errors.ParameterError):
2241 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2242 elif isinstance(err, luxi.NoMasterError):
2243 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2244 " and listening for connections?")
2245 elif isinstance(err, luxi.TimeoutError):
2246 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2247 " been submitted and will continue to run even if the call"
2248 " timed out. Useful commands in this situation are \"gnt-job"
2249 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2251 elif isinstance(err, luxi.PermissionError):
2252 obuf.write("It seems you don't have permissions to connect to the"
2253 " master daemon.\nPlease retry as a different user.")
2254 elif isinstance(err, luxi.ProtocolError):
2255 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2257 elif isinstance(err, errors.JobLost):
2258 obuf.write("Error checking job status: %s" % msg)
2259 elif isinstance(err, errors.QueryFilterParseError):
2260 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2261 obuf.write("\n".join(err.GetDetails()))
2262 elif isinstance(err, errors.GenericError):
2263 obuf.write("Unhandled Ganeti error: %s" % msg)
2264 elif isinstance(err, JobSubmittedException):
2265 obuf.write("JobID: %s\n" % err.args[0])
2268 obuf.write("Unhandled exception: %s" % msg)
2269 return retcode, obuf.getvalue().rstrip("\n")
2272 def GenericMain(commands, override=None, aliases=None,
2273 env_override=frozenset()):
2274 """Generic main function for all the gnt-* commands.
2276 @param commands: a dictionary with a special structure, see the design doc
2277 for command line handling.
2278 @param override: if not None, we expect a dictionary with keys that will
2279 override command line options; this can be used to pass
2280 options from the scripts to generic functions
2281 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2282 @param env_override: list of environment names which are allowed to submit
2283 default args for commands
2286 # save the program name and the entire command line for later logging
2288 binary = os.path.basename(sys.argv[0])
2290 binary = sys.argv[0]
2292 if len(sys.argv) >= 2:
2293 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2297 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2299 binary = "<unknown program>"
2300 cmdline = "<unknown>"
2306 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2307 except errors.ParameterError, err:
2308 result, err_msg = FormatError(err)
2312 if func is None: # parse error
2315 if override is not None:
2316 for key, val in override.iteritems():
2317 setattr(options, key, val)
2319 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2320 stderr_logging=True)
2322 logging.info("Command line: %s", cmdline)
2325 result = func(options, args)
2326 except (errors.GenericError, luxi.ProtocolError,
2327 JobSubmittedException), err:
2328 result, err_msg = FormatError(err)
2329 logging.exception("Error during command processing")
2331 except KeyboardInterrupt:
2332 result = constants.EXIT_FAILURE
2333 ToStderr("Aborted. Note that if the operation created any jobs, they"
2334 " might have been submitted and"
2335 " will continue to run in the background.")
2336 except IOError, err:
2337 if err.errno == errno.EPIPE:
2338 # our terminal went away, we'll exit
2339 sys.exit(constants.EXIT_FAILURE)
2346 def ParseNicOption(optvalue):
2347 """Parses the value of the --net option(s).
2351 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2352 except (TypeError, ValueError), err:
2353 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2355 nics = [{}] * nic_max
2356 for nidx, ndict in optvalue:
2359 if not isinstance(ndict, dict):
2360 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2361 " got %s" % (nidx, ndict))
2363 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2370 def GenericInstanceCreate(mode, opts, args):
2371 """Add an instance to the cluster via either creation or import.
2373 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2374 @param opts: the command line options selected by the user
2376 @param args: should contain only one element, the new instance name
2378 @return: the desired exit code
2383 (pnode, snode) = SplitNodeOption(opts.node)
2388 hypervisor, hvparams = opts.hypervisor
2391 nics = ParseNicOption(opts.nics)
2395 elif mode == constants.INSTANCE_CREATE:
2396 # default of one nic, all auto
2402 if opts.disk_template == constants.DT_DISKLESS:
2403 if opts.disks or opts.sd_size is not None:
2404 raise errors.OpPrereqError("Diskless instance but disk"
2405 " information passed")
2408 if (not opts.disks and not opts.sd_size
2409 and mode == constants.INSTANCE_CREATE):
2410 raise errors.OpPrereqError("No disk information specified")
2411 if opts.disks and opts.sd_size is not None:
2412 raise errors.OpPrereqError("Please use either the '--disk' or"
2414 if opts.sd_size is not None:
2415 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2419 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2420 except ValueError, err:
2421 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2422 disks = [{}] * disk_max
2425 for didx, ddict in opts.disks:
2427 if not isinstance(ddict, dict):
2428 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2429 raise errors.OpPrereqError(msg)
2430 elif constants.IDISK_SIZE in ddict:
2431 if constants.IDISK_ADOPT in ddict:
2432 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2433 " (disk %d)" % didx)
2435 ddict[constants.IDISK_SIZE] = \
2436 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2437 except ValueError, err:
2438 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2440 elif constants.IDISK_ADOPT in ddict:
2441 if mode == constants.INSTANCE_IMPORT:
2442 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2444 ddict[constants.IDISK_SIZE] = 0
2446 raise errors.OpPrereqError("Missing size or adoption source for"
2450 if opts.tags is not None:
2451 tags = opts.tags.split(",")
2455 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2456 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2458 if mode == constants.INSTANCE_CREATE:
2461 force_variant = opts.force_variant
2464 no_install = opts.no_install
2465 identify_defaults = False
2466 elif mode == constants.INSTANCE_IMPORT:
2469 force_variant = False
2470 src_node = opts.src_node
2471 src_path = opts.src_dir
2473 identify_defaults = opts.identify_defaults
2475 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2482 op = opcodes.OpInstanceCreate(instance_name=instance,
2484 disk_template=opts.disk_template,
2486 conflicts_check=opts.conflicts_check,
2487 pnode=pnode, snode=snode,
2488 ip_check=opts.ip_check,
2489 name_check=opts.name_check,
2490 wait_for_sync=opts.wait_for_sync,
2491 file_storage_dir=opts.file_storage_dir,
2492 file_driver=opts.file_driver,
2493 iallocator=opts.iallocator,
2494 hypervisor=hypervisor,
2496 beparams=opts.beparams,
2497 osparams=opts.osparams,
2501 force_variant=force_variant,
2506 no_install=no_install,
2507 identify_defaults=identify_defaults,
2508 ignore_ipolicy=opts.ignore_ipolicy)
2510 SubmitOrSend(op, opts)
2514 class _RunWhileClusterStoppedHelper:
2515 """Helper class for L{RunWhileClusterStopped} to simplify state management
2518 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2519 """Initializes this class.
2521 @type feedback_fn: callable
2522 @param feedback_fn: Feedback function
2523 @type cluster_name: string
2524 @param cluster_name: Cluster name
2525 @type master_node: string
2526 @param master_node Master node name
2527 @type online_nodes: list
2528 @param online_nodes: List of names of online nodes
2531 self.feedback_fn = feedback_fn
2532 self.cluster_name = cluster_name
2533 self.master_node = master_node
2534 self.online_nodes = online_nodes
2536 self.ssh = ssh.SshRunner(self.cluster_name)
2538 self.nonmaster_nodes = [name for name in online_nodes
2539 if name != master_node]
2541 assert self.master_node not in self.nonmaster_nodes
2543 def _RunCmd(self, node_name, cmd):
2544 """Runs a command on the local or a remote machine.
2546 @type node_name: string
2547 @param node_name: Machine name
2552 if node_name is None or node_name == self.master_node:
2553 # No need to use SSH
2554 result = utils.RunCmd(cmd)
2556 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2559 errmsg = ["Failed to run command %s" % result.cmd]
2561 errmsg.append("on node %s" % node_name)
2562 errmsg.append(": exitcode %s and error %s" %
2563 (result.exit_code, result.output))
2564 raise errors.OpExecError(" ".join(errmsg))
2566 def Call(self, fn, *args):
2567 """Call function while all daemons are stopped.
2570 @param fn: Function to be called
2573 # Pause watcher by acquiring an exclusive lock on watcher state file
2574 self.feedback_fn("Blocking watcher")
2575 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2577 # TODO: Currently, this just blocks. There's no timeout.
2578 # TODO: Should it be a shared lock?
2579 watcher_block.Exclusive(blocking=True)
2581 # Stop master daemons, so that no new jobs can come in and all running
2583 self.feedback_fn("Stopping master daemons")
2584 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2586 # Stop daemons on all nodes
2587 for node_name in self.online_nodes:
2588 self.feedback_fn("Stopping daemons on %s" % node_name)
2589 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2591 # All daemons are shut down now
2593 return fn(self, *args)
2594 except Exception, err:
2595 _, errmsg = FormatError(err)
2596 logging.exception("Caught exception")
2597 self.feedback_fn(errmsg)
2600 # Start cluster again, master node last
2601 for node_name in self.nonmaster_nodes + [self.master_node]:
2602 self.feedback_fn("Starting daemons on %s" % node_name)
2603 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2606 watcher_block.Close()
2609 def RunWhileClusterStopped(feedback_fn, fn, *args):
2610 """Calls a function while all cluster daemons are stopped.
2612 @type feedback_fn: callable
2613 @param feedback_fn: Feedback function
2615 @param fn: Function to be called when daemons are stopped
2618 feedback_fn("Gathering cluster information")
2620 # This ensures we're running on the master daemon
2623 (cluster_name, master_node) = \
2624 cl.QueryConfigValues(["cluster_name", "master_node"])
2626 online_nodes = GetOnlineNodes([], cl=cl)
2628 # Don't keep a reference to the client. The master daemon will go away.
2631 assert master_node in online_nodes
2633 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2634 online_nodes).Call(fn, *args)
2637 def GenerateTable(headers, fields, separator, data,
2638 numfields=None, unitfields=None,
2640 """Prints a table with headers and different fields.
2643 @param headers: dictionary mapping field names to headers for
2646 @param fields: the field names corresponding to each row in
2648 @param separator: the separator to be used; if this is None,
2649 the default 'smart' algorithm is used which computes optimal
2650 field width, otherwise just the separator is used between
2653 @param data: a list of lists, each sublist being one row to be output
2654 @type numfields: list
2655 @param numfields: a list with the fields that hold numeric
2656 values and thus should be right-aligned
2657 @type unitfields: list
2658 @param unitfields: a list with the fields that hold numeric
2659 values that should be formatted with the units field
2660 @type units: string or None
2661 @param units: the units we should use for formatting, or None for
2662 automatic choice (human-readable for non-separator usage, otherwise
2663 megabytes); this is a one-letter string
2672 if numfields is None:
2674 if unitfields is None:
2677 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2678 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2681 for field in fields:
2682 if headers and field not in headers:
2683 # TODO: handle better unknown fields (either revert to old
2684 # style of raising exception, or deal more intelligently with
2686 headers[field] = field
2687 if separator is not None:
2688 format_fields.append("%s")
2689 elif numfields.Matches(field):
2690 format_fields.append("%*s")
2692 format_fields.append("%-*s")
2694 if separator is None:
2695 mlens = [0 for name in fields]
2696 format_str = " ".join(format_fields)
2698 format_str = separator.replace("%", "%%").join(format_fields)
2703 for idx, val in enumerate(row):
2704 if unitfields.Matches(fields[idx]):
2707 except (TypeError, ValueError):
2710 val = row[idx] = utils.FormatUnit(val, units)
2711 val = row[idx] = str(val)
2712 if separator is None:
2713 mlens[idx] = max(mlens[idx], len(val))
2718 for idx, name in enumerate(fields):
2720 if separator is None:
2721 mlens[idx] = max(mlens[idx], len(hdr))
2722 args.append(mlens[idx])
2724 result.append(format_str % tuple(args))
2726 if separator is None:
2727 assert len(mlens) == len(fields)
2729 if fields and not numfields.Matches(fields[-1]):
2735 line = ["-" for _ in fields]
2736 for idx in range(len(fields)):
2737 if separator is None:
2738 args.append(mlens[idx])
2739 args.append(line[idx])
2740 result.append(format_str % tuple(args))
2745 def _FormatBool(value):
2746 """Formats a boolean value as a string.
2754 #: Default formatting for query results; (callback, align right)
2755 _DEFAULT_FORMAT_QUERY = {
2756 constants.QFT_TEXT: (str, False),
2757 constants.QFT_BOOL: (_FormatBool, False),
2758 constants.QFT_NUMBER: (str, True),
2759 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2760 constants.QFT_OTHER: (str, False),
2761 constants.QFT_UNKNOWN: (str, False),
2765 def _GetColumnFormatter(fdef, override, unit):
2766 """Returns formatting function for a field.
2768 @type fdef: L{objects.QueryFieldDefinition}
2769 @type override: dict
2770 @param override: Dictionary for overriding field formatting functions,
2771 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2773 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2774 @rtype: tuple; (callable, bool)
2775 @return: Returns the function to format a value (takes one parameter) and a
2776 boolean for aligning the value on the right-hand side
2779 fmt = override.get(fdef.name, None)
2783 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2785 if fdef.kind == constants.QFT_UNIT:
2786 # Can't keep this information in the static dictionary
2787 return (lambda value: utils.FormatUnit(value, unit), True)
2789 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2793 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2796 class _QueryColumnFormatter:
2797 """Callable class for formatting fields of a query.
2800 def __init__(self, fn, status_fn, verbose):
2801 """Initializes this class.
2804 @param fn: Formatting function
2805 @type status_fn: callable
2806 @param status_fn: Function to report fields' status
2807 @type verbose: boolean
2808 @param verbose: whether to use verbose field descriptions or not
2812 self._status_fn = status_fn
2813 self._verbose = verbose
2815 def __call__(self, data):
2816 """Returns a field's string representation.
2819 (status, value) = data
2822 self._status_fn(status)
2824 if status == constants.RS_NORMAL:
2825 return self._fn(value)
2827 assert value is None, \
2828 "Found value %r for abnormal status %s" % (value, status)
2830 return FormatResultError(status, self._verbose)
2833 def FormatResultError(status, verbose):
2834 """Formats result status other than L{constants.RS_NORMAL}.
2836 @param status: The result status
2837 @type verbose: boolean
2838 @param verbose: Whether to return the verbose text
2839 @return: Text of result status
2842 assert status != constants.RS_NORMAL, \
2843 "FormatResultError called with status equal to constants.RS_NORMAL"
2845 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2847 raise NotImplementedError("Unknown status %s" % status)
2854 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2855 header=False, verbose=False):
2856 """Formats data in L{objects.QueryResponse}.
2858 @type result: L{objects.QueryResponse}
2859 @param result: result of query operation
2861 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2862 see L{utils.text.FormatUnit}
2863 @type format_override: dict
2864 @param format_override: Dictionary for overriding field formatting functions,
2865 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2866 @type separator: string or None
2867 @param separator: String used to separate fields
2869 @param header: Whether to output header row
2870 @type verbose: boolean
2871 @param verbose: whether to use verbose field descriptions or not
2880 if format_override is None:
2881 format_override = {}
2883 stats = dict.fromkeys(constants.RS_ALL, 0)
2885 def _RecordStatus(status):
2890 for fdef in result.fields:
2891 assert fdef.title and fdef.name
2892 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2893 columns.append(TableColumn(fdef.title,
2894 _QueryColumnFormatter(fn, _RecordStatus,
2898 table = FormatTable(result.data, columns, header, separator)
2900 # Collect statistics
2901 assert len(stats) == len(constants.RS_ALL)
2902 assert compat.all(count >= 0 for count in stats.values())
2904 # Determine overall status. If there was no data, unknown fields must be
2905 # detected via the field definitions.
2906 if (stats[constants.RS_UNKNOWN] or
2907 (not result.data and _GetUnknownFields(result.fields))):
2909 elif compat.any(count > 0 for key, count in stats.items()
2910 if key != constants.RS_NORMAL):
2911 status = QR_INCOMPLETE
2915 return (status, table)
2918 def _GetUnknownFields(fdefs):
2919 """Returns list of unknown fields included in C{fdefs}.
2921 @type fdefs: list of L{objects.QueryFieldDefinition}
2924 return [fdef for fdef in fdefs
2925 if fdef.kind == constants.QFT_UNKNOWN]
2928 def _WarnUnknownFields(fdefs):
2929 """Prints a warning to stderr if a query included unknown fields.
2931 @type fdefs: list of L{objects.QueryFieldDefinition}
2934 unknown = _GetUnknownFields(fdefs)
2936 ToStderr("Warning: Queried for unknown fields %s",
2937 utils.CommaJoin(fdef.name for fdef in unknown))
2943 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2944 format_override=None, verbose=False, force_filter=False,
2945 namefield=None, qfilter=None):
2946 """Generic implementation for listing all items of a resource.
2948 @param resource: One of L{constants.QR_VIA_LUXI}
2949 @type fields: list of strings
2950 @param fields: List of fields to query for
2951 @type names: list of strings
2952 @param names: Names of items to query for
2953 @type unit: string or None
2954 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2955 None for automatic choice (human-readable for non-separator usage,
2956 otherwise megabytes); this is a one-letter string
2957 @type separator: string or None
2958 @param separator: String used to separate fields
2960 @param header: Whether to show header row
2961 @type force_filter: bool
2962 @param force_filter: Whether to always treat names as filter
2963 @type format_override: dict
2964 @param format_override: Dictionary for overriding field formatting functions,
2965 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2966 @type verbose: boolean
2967 @param verbose: whether to use verbose field descriptions or not
2968 @type namefield: string
2969 @param namefield: Name of field to use for simple filters (see
2970 L{qlang.MakeFilter} for details)
2971 @type qfilter: list or None
2972 @param qfilter: Query filter (in addition to names)
2978 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2981 qfilter = namefilter
2982 elif namefilter is not None:
2983 qfilter = [qlang.OP_AND, namefilter, qfilter]
2988 response = cl.Query(resource, fields, qfilter)
2990 found_unknown = _WarnUnknownFields(response.fields)
2992 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2994 format_override=format_override,
3000 assert ((found_unknown and status == QR_UNKNOWN) or
3001 (not found_unknown and status != QR_UNKNOWN))
3003 if status == QR_UNKNOWN:
3004 return constants.EXIT_UNKNOWN_FIELD
3006 # TODO: Should the list command fail if not all data could be collected?
3007 return constants.EXIT_SUCCESS
3010 def GenericListFields(resource, fields, separator, header, cl=None):
3011 """Generic implementation for listing fields for a resource.
3013 @param resource: One of L{constants.QR_VIA_LUXI}
3014 @type fields: list of strings
3015 @param fields: List of fields to query for
3016 @type separator: string or None
3017 @param separator: String used to separate fields
3019 @param header: Whether to show header row
3028 response = cl.QueryFields(resource, fields)
3030 found_unknown = _WarnUnknownFields(response.fields)
3033 TableColumn("Name", str, False),
3034 TableColumn("Title", str, False),
3035 TableColumn("Description", str, False),
3038 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3040 for line in FormatTable(rows, columns, header, separator):
3044 return constants.EXIT_UNKNOWN_FIELD
3046 return constants.EXIT_SUCCESS
3050 """Describes a column for L{FormatTable}.
3053 def __init__(self, title, fn, align_right):
3054 """Initializes this class.
3057 @param title: Column title
3059 @param fn: Formatting function
3060 @type align_right: bool
3061 @param align_right: Whether to align values on the right-hand side
3066 self.align_right = align_right
3069 def _GetColFormatString(width, align_right):
3070 """Returns the format string for a field.
3078 return "%%%s%ss" % (sign, width)
3081 def FormatTable(rows, columns, header, separator):
3082 """Formats data as a table.
3084 @type rows: list of lists
3085 @param rows: Row data, one list per row
3086 @type columns: list of L{TableColumn}
3087 @param columns: Column descriptions
3089 @param header: Whether to show header row
3090 @type separator: string or None
3091 @param separator: String used to separate columns
3095 data = [[col.title for col in columns]]
3096 colwidth = [len(col.title) for col in columns]
3099 colwidth = [0 for _ in columns]
3103 assert len(row) == len(columns)
3105 formatted = [col.format(value) for value, col in zip(row, columns)]
3107 if separator is None:
3108 # Update column widths
3109 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3110 # Modifying a list's items while iterating is fine
3111 colwidth[idx] = max(oldwidth, len(value))
3113 data.append(formatted)
3115 if separator is not None:
3116 # Return early if a separator is used
3117 return [separator.join(row) for row in data]
3119 if columns and not columns[-1].align_right:
3120 # Avoid unnecessary spaces at end of line
3123 # Build format string
3124 fmt = " ".join([_GetColFormatString(width, col.align_right)
3125 for col, width in zip(columns, colwidth)])
3127 return [fmt % tuple(row) for row in data]
3130 def FormatTimestamp(ts):
3131 """Formats a given timestamp.
3134 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3137 @return: a string with the formatted timestamp
3140 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3144 return utils.FormatTime(sec, usecs=usecs)
3147 def ParseTimespec(value):
3148 """Parse a time specification.
3150 The following suffixed will be recognized:
3158 Without any suffix, the value will be taken to be in seconds.
3163 raise errors.OpPrereqError("Empty time specification passed")
3171 if value[-1] not in suffix_map:
3174 except (TypeError, ValueError):
3175 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3177 multiplier = suffix_map[value[-1]]
3179 if not value: # no data left after stripping the suffix
3180 raise errors.OpPrereqError("Invalid time specification (only"
3183 value = int(value) * multiplier
3184 except (TypeError, ValueError):
3185 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3189 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3190 filter_master=False, nodegroup=None):
3191 """Returns the names of online nodes.
3193 This function will also log a warning on stderr with the names of
3196 @param nodes: if not empty, use only this subset of nodes (minus the
3198 @param cl: if not None, luxi client to use
3199 @type nowarn: boolean
3200 @param nowarn: by default, this function will output a note with the
3201 offline nodes that are skipped; if this parameter is True the
3202 note is not displayed
3203 @type secondary_ips: boolean
3204 @param secondary_ips: if True, return the secondary IPs instead of the
3205 names, useful for doing network traffic over the replication interface
3207 @type filter_master: boolean
3208 @param filter_master: if True, do not return the master node in the list
3209 (useful in coordination with secondary_ips where we cannot check our
3210 node name against the list)
3211 @type nodegroup: string
3212 @param nodegroup: If set, only return nodes in this node group
3221 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3223 if nodegroup is not None:
3224 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3225 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3228 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3231 if len(qfilter) > 1:
3232 final_filter = [qlang.OP_AND] + qfilter
3234 assert len(qfilter) == 1
3235 final_filter = qfilter[0]
3239 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3241 def _IsOffline(row):
3242 (_, (_, offline), _) = row
3246 ((_, name), _, _) = row
3250 (_, _, (_, sip)) = row
3253 (offline, online) = compat.partition(result.data, _IsOffline)
3255 if offline and not nowarn:
3256 ToStderr("Note: skipping offline node(s): %s" %
3257 utils.CommaJoin(map(_GetName, offline)))
3264 return map(fn, online)
3267 def _ToStream(stream, txt, *args):
3268 """Write a message to a stream, bypassing the logging system
3270 @type stream: file object
3271 @param stream: the file to which we should write
3273 @param txt: the message
3279 stream.write(txt % args)
3284 except IOError, err:
3285 if err.errno == errno.EPIPE:
3286 # our terminal went away, we'll exit
3287 sys.exit(constants.EXIT_FAILURE)
3292 def ToStdout(txt, *args):
3293 """Write a message to stdout only, bypassing the logging system
3295 This is just a wrapper over _ToStream.
3298 @param txt: the message
3301 _ToStream(sys.stdout, txt, *args)
3304 def ToStderr(txt, *args):
3305 """Write a message to stderr only, bypassing the logging system
3307 This is just a wrapper over _ToStream.
3310 @param txt: the message
3313 _ToStream(sys.stderr, txt, *args)
3316 class JobExecutor(object):
3317 """Class which manages the submission and execution of multiple jobs.
3319 Note that instances of this class should not be reused between
3323 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3328 self.verbose = verbose
3331 self.feedback_fn = feedback_fn
3332 self._counter = itertools.count()
3335 def _IfName(name, fmt):
3336 """Helper function for formatting name.
3344 def QueueJob(self, name, *ops):
3345 """Record a job for later submit.
3348 @param name: a description of the job, will be used in WaitJobSet
3351 SetGenericOpcodeOpts(ops, self.opts)
3352 self.queue.append((self._counter.next(), name, ops))
3354 def AddJobId(self, name, status, job_id):
3355 """Adds a job ID to the internal queue.
3358 self.jobs.append((self._counter.next(), status, job_id, name))
3360 def SubmitPending(self, each=False):
3361 """Submit all pending jobs.
3366 for (_, _, ops) in self.queue:
3367 # SubmitJob will remove the success status, but raise an exception if
3368 # the submission fails, so we'll notice that anyway.
3369 results.append([True, self.cl.SubmitJob(ops)[0]])
3371 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3372 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3373 self.jobs.append((idx, status, data, name))
3375 def _ChooseJob(self):
3376 """Choose a non-waiting/queued job to poll next.
3379 assert self.jobs, "_ChooseJob called with empty job list"
3381 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3385 for job_data, status in zip(self.jobs, result):
3386 if (isinstance(status, list) and status and
3387 status[0] in (constants.JOB_STATUS_QUEUED,
3388 constants.JOB_STATUS_WAITING,
3389 constants.JOB_STATUS_CANCELING)):
3390 # job is still present and waiting
3392 # good candidate found (either running job or lost job)
3393 self.jobs.remove(job_data)
3397 return self.jobs.pop(0)
3399 def GetResults(self):
3400 """Wait for and return the results of all jobs.
3403 @return: list of tuples (success, job results), in the same order
3404 as the submitted jobs; if a job has failed, instead of the result
3405 there will be the error message
3409 self.SubmitPending()
3412 ok_jobs = [row[2] for row in self.jobs if row[1]]
3414 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3416 # first, remove any non-submitted jobs
3417 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3418 for idx, _, jid, name in failures:
3419 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3420 results.append((idx, False, jid))
3423 (idx, _, jid, name) = self._ChooseJob()
3424 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3426 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3428 except errors.JobLost, err:
3429 _, job_result = FormatError(err)
3430 ToStderr("Job %s%s has been archived, cannot check its result",
3431 jid, self._IfName(name, " for %s"))
3433 except (errors.GenericError, luxi.ProtocolError), err:
3434 _, job_result = FormatError(err)
3436 # the error message will always be shown, verbose or not
3437 ToStderr("Job %s%s has failed: %s",
3438 jid, self._IfName(name, " for %s"), job_result)
3440 results.append((idx, success, job_result))
3442 # sort based on the index, then drop it
3444 results = [i[1:] for i in results]
3448 def WaitOrShow(self, wait):
3449 """Wait for job results or only print the job IDs.
3452 @param wait: whether to wait or not
3456 return self.GetResults()
3459 self.SubmitPending()
3460 for _, status, result, name in self.jobs:
3462 ToStdout("%s: %s", result, name)
3464 ToStderr("Failure for %s: %s", name, result)
3465 return [row[1:3] for row in self.jobs]
3468 def FormatParameterDict(buf, param_dict, actual, level=1):
3469 """Formats a parameter dictionary.
3471 @type buf: L{StringIO}
3472 @param buf: the buffer into which to write
3473 @type param_dict: dict
3474 @param param_dict: the own parameters
3476 @param actual: the current parameter set (including defaults)
3477 @param level: Level of indent
3480 indent = " " * level
3482 for key in sorted(actual):
3484 buf.write("%s- %s:" % (indent, key))
3486 if isinstance(data, dict) and data:
3488 FormatParameterDict(buf, param_dict.get(key, {}), data,
3491 val = param_dict.get(key, "default (%s)" % data)
3492 buf.write(" %s\n" % val)
3495 def ConfirmOperation(names, list_type, text, extra=""):
3496 """Ask the user to confirm an operation on a list of list_type.
3498 This function is used to request confirmation for doing an operation
3499 on a given list of list_type.
3502 @param names: the list of names that we display when
3503 we ask for confirmation
3504 @type list_type: str
3505 @param list_type: Human readable name for elements in the list (e.g. nodes)
3507 @param text: the operation that the user should confirm
3509 @return: True or False depending on user's confirmation.
3513 msg = ("The %s will operate on %d %s.\n%s"
3514 "Do you want to continue?" % (text, count, list_type, extra))
3515 affected = (("\nAffected %s:\n" % list_type) +
3516 "\n".join([" %s" % name for name in names]))
3518 choices = [("y", True, "Yes, execute the %s" % text),
3519 ("n", False, "No, abort the %s" % text)]
3522 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3525 question = msg + affected
3527 choice = AskUser(question, choices)
3530 choice = AskUser(msg + affected, choices)
3534 def _MaybeParseUnit(elements):
3535 """Parses and returns an array of potential values with units.
3539 for k, v in elements.items():
3540 if v == constants.VALUE_DEFAULT:
3543 parsed[k] = utils.ParseUnit(v)
3547 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3548 ispecs_cpu_count=None,
3549 ispecs_disk_count=None,
3550 ispecs_disk_size=None,
3551 ispecs_nic_count=None,
3552 ipolicy_disk_templates=None,
3553 ipolicy_vcpu_ratio=None,
3554 ipolicy_spindle_ratio=None,
3555 group_ipolicy=False,
3556 allowed_values=None,
3558 """Creation of instance policy based on command line options.
3560 @param fill_all: whether for cluster policies we should ensure that
3561 all values are filled
3567 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3568 if ispecs_disk_size:
3569 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3570 except (TypeError, ValueError, errors.UnitParseError), err:
3571 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3573 (ispecs_disk_size, ispecs_mem_size, err),
3576 # prepare ipolicy dict
3577 ipolicy_transposed = {
3578 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3579 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3580 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3581 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3582 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3585 # first, check that the values given are correct
3587 forced_type = TISPECS_GROUP_TYPES
3589 forced_type = TISPECS_CLUSTER_TYPES
3591 for specs in ipolicy_transposed.values():
3592 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3595 ipolicy_out = objects.MakeEmptyIPolicy()
3596 for name, specs in ipolicy_transposed.iteritems():
3597 assert name in constants.ISPECS_PARAMETERS
3598 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3599 ipolicy_out[key][name] = val
3601 # no filldict for non-dicts
3602 if not group_ipolicy and fill_all:
3603 if ipolicy_disk_templates is None:
3604 ipolicy_disk_templates = constants.DISK_TEMPLATES
3605 if ipolicy_vcpu_ratio is None:
3606 ipolicy_vcpu_ratio = \
3607 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3608 if ipolicy_spindle_ratio is None:
3609 ipolicy_spindle_ratio = \
3610 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3611 if ipolicy_disk_templates is not None:
3612 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3613 if ipolicy_vcpu_ratio is not None:
3614 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3615 if ipolicy_spindle_ratio is not None:
3616 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3618 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)