4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 "ENABLED_DISK_TEMPLATES_OPT",
89 "FILESTORE_DRIVER_OPT",
97 "GLOBAL_SHARED_FILEDIR_OPT",
102 "DEFAULT_IALLOCATOR_OPT",
103 "IDENTIFY_DEFAULTS_OPT",
104 "IGNORE_CONSIST_OPT",
106 "IGNORE_FAILURES_OPT",
107 "IGNORE_OFFLINE_OPT",
108 "IGNORE_REMOVE_FAILURES_OPT",
109 "IGNORE_SECONDARIES_OPT",
113 "MAINTAIN_NODE_HEALTH_OPT",
115 "MASTER_NETMASK_OPT",
117 "MIGRATION_MODE_OPT",
121 "NEW_CLUSTER_CERT_OPT",
122 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123 "NEW_CONFD_HMAC_KEY_OPT",
127 "NEW_SPICE_CERT_OPT",
129 "NOCONFLICTSCHECK_OPT",
130 "NODE_FORCE_JOIN_OPT",
132 "NODE_PLACEMENT_OPT",
136 "NODRBD_STORAGE_OPT",
142 "NOMODIFY_ETCHOSTS_OPT",
143 "NOMODIFY_SSH_SETUP_OPT",
147 "NORUNTIME_CHGS_OPT",
150 "NOSSH_KEYCHECK_OPT",
164 "PREALLOC_WIPE_DISKS_OPT",
165 "PRIMARY_IP_VERSION_OPT",
172 "REMOVE_INSTANCE_OPT",
173 "REMOVE_RESERVED_IPS_OPT",
179 "SECONDARY_ONLY_OPT",
184 "SHUTDOWN_TIMEOUT_OPT",
186 "SPECS_CPU_COUNT_OPT",
187 "SPECS_DISK_COUNT_OPT",
188 "SPECS_DISK_SIZE_OPT",
189 "SPECS_MEM_SIZE_OPT",
190 "SPECS_NIC_COUNT_OPT",
191 "IPOLICY_DISK_TEMPLATES",
192 "IPOLICY_VCPU_RATIO",
198 "STARTUP_PAUSED_OPT",
207 "USE_EXTERNAL_MIP_SCRIPT",
215 "IGNORE_IPOLICY_OPT",
216 "INSTANCE_POLICY_OPTS",
217 # Generic functions for CLI programs
219 "CreateIPolicyFromOpts",
221 "GenericInstanceCreate",
227 "JobSubmittedException",
229 "RunWhileClusterStopped",
233 # Formatting functions
234 "ToStderr", "ToStdout",
237 "FormatParamsDictInfo",
248 # command line options support infrastructure
249 "ARGS_MANY_INSTANCES",
252 "ARGS_MANY_NETWORKS",
272 "OPT_COMPL_INST_ADD_NODES",
273 "OPT_COMPL_MANY_NODES",
274 "OPT_COMPL_ONE_IALLOCATOR",
275 "OPT_COMPL_ONE_INSTANCE",
276 "OPT_COMPL_ONE_NODE",
277 "OPT_COMPL_ONE_NODEGROUP",
278 "OPT_COMPL_ONE_NETWORK",
280 "OPT_COMPL_ONE_EXTSTORAGE",
285 "COMMON_CREATE_OPTS",
291 #: Priorities (sorted)
293 ("low", constants.OP_PRIO_LOW),
294 ("normal", constants.OP_PRIO_NORMAL),
295 ("high", constants.OP_PRIO_HIGH),
298 #: Priority dictionary for easier lookup
299 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
300 # we migrate to Python 2.6
301 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
303 # Query result status for clients
306 QR_INCOMPLETE) = range(3)
308 #: Maximum batch size for ChooseJob
312 # constants used to create InstancePolicy dictionary
313 TISPECS_GROUP_TYPES = {
314 constants.ISPECS_MIN: constants.VTYPE_INT,
315 constants.ISPECS_MAX: constants.VTYPE_INT,
318 TISPECS_CLUSTER_TYPES = {
319 constants.ISPECS_MIN: constants.VTYPE_INT,
320 constants.ISPECS_MAX: constants.VTYPE_INT,
321 constants.ISPECS_STD: constants.VTYPE_INT,
324 #: User-friendly names for query2 field types
326 constants.QFT_UNKNOWN: "Unknown",
327 constants.QFT_TEXT: "Text",
328 constants.QFT_BOOL: "Boolean",
329 constants.QFT_NUMBER: "Number",
330 constants.QFT_UNIT: "Storage size",
331 constants.QFT_TIMESTAMP: "Timestamp",
332 constants.QFT_OTHER: "Custom",
337 def __init__(self, min=0, max=None): # pylint: disable=W0622
342 return ("<%s min=%s max=%s>" %
343 (self.__class__.__name__, self.min, self.max))
346 class ArgSuggest(_Argument):
347 """Suggesting argument.
349 Value can be any of the ones passed to the constructor.
352 # pylint: disable=W0622
353 def __init__(self, min=0, max=None, choices=None):
354 _Argument.__init__(self, min=min, max=max)
355 self.choices = choices
358 return ("<%s min=%s max=%s choices=%r>" %
359 (self.__class__.__name__, self.min, self.max, self.choices))
362 class ArgChoice(ArgSuggest):
365 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
366 but value must be one of the choices.
371 class ArgUnknown(_Argument):
372 """Unknown argument to program (e.g. determined at runtime).
377 class ArgInstance(_Argument):
378 """Instances argument.
383 class ArgNode(_Argument):
389 class ArgNetwork(_Argument):
395 class ArgGroup(_Argument):
396 """Node group argument.
401 class ArgJobId(_Argument):
407 class ArgFile(_Argument):
408 """File path argument.
413 class ArgCommand(_Argument):
419 class ArgHost(_Argument):
425 class ArgOs(_Argument):
431 class ArgExtStorage(_Argument):
432 """ExtStorage argument.
438 ARGS_MANY_INSTANCES = [ArgInstance()]
439 ARGS_MANY_NETWORKS = [ArgNetwork()]
440 ARGS_MANY_NODES = [ArgNode()]
441 ARGS_MANY_GROUPS = [ArgGroup()]
442 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
443 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
444 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
446 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
447 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
450 def _ExtractTagsObject(opts, args):
451 """Extract the tag type object.
453 Note that this function will modify its args parameter.
456 if not hasattr(opts, "tag_type"):
457 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
459 if kind == constants.TAG_CLUSTER:
461 elif kind in (constants.TAG_NODEGROUP,
463 constants.TAG_NETWORK,
464 constants.TAG_INSTANCE):
466 raise errors.OpPrereqError("no arguments passed to the command",
471 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
475 def _ExtendTags(opts, args):
476 """Extend the args if a source file has been given.
478 This function will extend the tags with the contents of the file
479 passed in the 'tags_source' attribute of the opts parameter. A file
480 named '-' will be replaced by stdin.
483 fname = opts.tags_source
489 new_fh = open(fname, "r")
492 # we don't use the nice 'new_data = [line.strip() for line in fh]'
493 # because of python bug 1633941
495 line = new_fh.readline()
498 new_data.append(line.strip())
501 args.extend(new_data)
504 def ListTags(opts, args):
505 """List the tags on a given object.
507 This is a generic implementation that knows how to deal with all
508 three cases of tag objects (cluster, node, instance). The opts
509 argument is expected to contain a tag_type field denoting what
510 object type we work on.
513 kind, name = _ExtractTagsObject(opts, args)
514 cl = GetClient(query=True)
515 result = cl.QueryTags(kind, name)
516 result = list(result)
522 def AddTags(opts, args):
523 """Add tags on a given object.
525 This is a generic implementation that knows how to deal with all
526 three cases of tag objects (cluster, node, instance). The opts
527 argument is expected to contain a tag_type field denoting what
528 object type we work on.
531 kind, name = _ExtractTagsObject(opts, args)
532 _ExtendTags(opts, args)
534 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
535 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
536 SubmitOrSend(op, opts)
539 def RemoveTags(opts, args):
540 """Remove tags from a given object.
542 This is a generic implementation that knows how to deal with all
543 three cases of tag objects (cluster, node, instance). The opts
544 argument is expected to contain a tag_type field denoting what
545 object type we work on.
548 kind, name = _ExtractTagsObject(opts, args)
549 _ExtendTags(opts, args)
551 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
552 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
553 SubmitOrSend(op, opts)
556 def check_unit(option, opt, value): # pylint: disable=W0613
557 """OptParsers custom converter for units.
561 return utils.ParseUnit(value)
562 except errors.UnitParseError, err:
563 raise OptionValueError("option %s: %s" % (opt, err))
566 def _SplitKeyVal(opt, data):
567 """Convert a KeyVal string into a dict.
569 This function will convert a key=val[,...] string into a dict. Empty
570 values will be converted specially: keys which have the prefix 'no_'
571 will have the value=False and the prefix stripped, the others will
575 @param opt: a string holding the option name for which we process the
576 data, used in building error messages
578 @param data: a string of the format key=val,key=val,...
580 @return: {key=val, key=val}
581 @raises errors.ParameterError: if there are duplicate keys
586 for elem in utils.UnescapeAndSplit(data, sep=","):
588 key, val = elem.split("=", 1)
590 if elem.startswith(NO_PREFIX):
591 key, val = elem[len(NO_PREFIX):], False
592 elif elem.startswith(UN_PREFIX):
593 key, val = elem[len(UN_PREFIX):], None
595 key, val = elem, True
597 raise errors.ParameterError("Duplicate key '%s' in option %s" %
603 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
604 """Custom parser for ident:key=val,key=val options.
606 This will store the parsed values as a tuple (ident, {key: val}). As such,
607 multiple uses of this option via action=append is possible.
611 ident, rest = value, ""
613 ident, rest = value.split(":", 1)
615 if ident.startswith(NO_PREFIX):
617 msg = "Cannot pass options when removing parameter groups: %s" % value
618 raise errors.ParameterError(msg)
619 retval = (ident[len(NO_PREFIX):], False)
620 elif (ident.startswith(UN_PREFIX) and
621 (len(ident) <= len(UN_PREFIX) or
622 not ident[len(UN_PREFIX)][0].isdigit())):
624 msg = "Cannot pass options when removing parameter groups: %s" % value
625 raise errors.ParameterError(msg)
626 retval = (ident[len(UN_PREFIX):], None)
628 kv_dict = _SplitKeyVal(opt, rest)
629 retval = (ident, kv_dict)
633 def check_key_val(option, opt, value): # pylint: disable=W0613
634 """Custom parser class for key=val,key=val options.
636 This will store the parsed values as a dict {key: val}.
639 return _SplitKeyVal(opt, value)
642 def check_bool(option, opt, value): # pylint: disable=W0613
643 """Custom parser for yes/no options.
645 This will store the parsed value as either True or False.
648 value = value.lower()
649 if value == constants.VALUE_FALSE or value == "no":
651 elif value == constants.VALUE_TRUE or value == "yes":
654 raise errors.ParameterError("Invalid boolean value '%s'" % value)
657 def check_list(option, opt, value): # pylint: disable=W0613
658 """Custom parser for comma-separated lists.
661 # we have to make this explicit check since "".split(",") is [""],
662 # not an empty list :(
666 return utils.UnescapeAndSplit(value)
669 def check_maybefloat(option, opt, value): # pylint: disable=W0613
670 """Custom parser for float numbers which might be also defaults.
673 value = value.lower()
675 if value == constants.VALUE_DEFAULT:
681 # completion_suggestion is normally a list. Using numeric values not evaluating
682 # to False for dynamic completion.
683 (OPT_COMPL_MANY_NODES,
685 OPT_COMPL_ONE_INSTANCE,
687 OPT_COMPL_ONE_EXTSTORAGE,
688 OPT_COMPL_ONE_IALLOCATOR,
689 OPT_COMPL_ONE_NETWORK,
690 OPT_COMPL_INST_ADD_NODES,
691 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
693 OPT_COMPL_ALL = compat.UniqueFrozenset([
694 OPT_COMPL_MANY_NODES,
696 OPT_COMPL_ONE_INSTANCE,
698 OPT_COMPL_ONE_EXTSTORAGE,
699 OPT_COMPL_ONE_IALLOCATOR,
700 OPT_COMPL_ONE_NETWORK,
701 OPT_COMPL_INST_ADD_NODES,
702 OPT_COMPL_ONE_NODEGROUP,
706 class CliOption(Option):
707 """Custom option class for optparse.
710 ATTRS = Option.ATTRS + [
711 "completion_suggest",
713 TYPES = Option.TYPES + (
721 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
722 TYPE_CHECKER["identkeyval"] = check_ident_key_val
723 TYPE_CHECKER["keyval"] = check_key_val
724 TYPE_CHECKER["unit"] = check_unit
725 TYPE_CHECKER["bool"] = check_bool
726 TYPE_CHECKER["list"] = check_list
727 TYPE_CHECKER["maybefloat"] = check_maybefloat
730 # optparse.py sets make_option, so we do it for our own option class, too
731 cli_option = CliOption
736 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
737 help="Increase debugging level")
739 NOHDR_OPT = cli_option("--no-headers", default=False,
740 action="store_true", dest="no_headers",
741 help="Don't display column headers")
743 SEP_OPT = cli_option("--separator", default=None,
744 action="store", dest="separator",
745 help=("Separator between output fields"
746 " (defaults to one space)"))
748 USEUNITS_OPT = cli_option("--units", default=None,
749 dest="units", choices=("h", "m", "g", "t"),
750 help="Specify units for output (one of h/m/g/t)")
752 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
753 type="string", metavar="FIELDS",
754 help="Comma separated list of output fields")
756 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
757 default=False, help="Force the operation")
759 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
760 default=False, help="Do not require confirmation")
762 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
763 action="store_true", default=False,
764 help=("Ignore offline nodes and do as much"
767 TAG_ADD_OPT = cli_option("--tags", dest="tags",
768 default=None, help="Comma-separated list of instance"
771 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
772 default=None, help="File with tag names")
774 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
775 default=False, action="store_true",
776 help=("Submit the job and return the job ID, but"
777 " don't wait for the job to finish"))
779 SYNC_OPT = cli_option("--sync", dest="do_locking",
780 default=False, action="store_true",
781 help=("Grab locks while doing the queries"
782 " in order to ensure more consistent results"))
784 DRY_RUN_OPT = cli_option("--dry-run", default=False,
786 help=("Do not execute the operation, just run the"
787 " check steps and verify if it could be"
790 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
792 help="Increase the verbosity of the operation")
794 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
795 action="store_true", dest="simulate_errors",
796 help="Debugging option that makes the operation"
797 " treat most runtime checks as failed")
799 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
800 default=True, action="store_false",
801 help="Don't wait for sync (DANGEROUS!)")
803 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
804 default=False, action="store_true",
805 help="Wait for disks to sync")
807 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
808 action="store_true", default=False,
809 help="Enable offline instance")
811 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
812 action="store_true", default=False,
813 help="Disable down instance")
815 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
816 help=("Custom disk setup (%s)" %
817 utils.CommaJoin(constants.DISK_TEMPLATES)),
818 default=None, metavar="TEMPL",
819 choices=list(constants.DISK_TEMPLATES))
821 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
822 help="Do not create any network cards for"
825 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
826 help="Relative path under default cluster-wide"
827 " file storage dir to store file-based disks",
828 default=None, metavar="<DIR>")
830 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
831 help="Driver to use for image files",
832 default="loop", metavar="<DRIVER>",
833 choices=list(constants.FILE_DRIVER))
835 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
836 help="Select nodes for the instance automatically"
837 " using the <NAME> iallocator plugin",
838 default=None, type="string",
839 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
841 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
843 help="Set the default instance"
845 default=None, type="string",
846 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
848 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
850 completion_suggest=OPT_COMPL_ONE_OS)
852 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
853 type="keyval", default={},
854 help="OS parameters")
856 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
857 action="store_true", default=False,
858 help="Force an unknown variant")
860 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
861 action="store_true", default=False,
862 help="Do not install the OS (will"
865 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
866 dest="allow_runtime_chgs",
867 default=True, action="store_false",
868 help="Don't allow runtime changes")
870 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
871 type="keyval", default={},
872 help="Backend parameters")
874 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
875 default={}, dest="hvparams",
876 help="Hypervisor parameters")
878 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
879 help="Disk template parameters, in the format"
880 " template:option=value,option=value,...",
881 type="identkeyval", action="append", default=[])
883 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
884 type="keyval", default={},
885 help="Memory size specs: list of key=value,"
886 " where key is one of min, max, std"
887 " (in MB or using a unit)")
889 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
890 type="keyval", default={},
891 help="CPU count specs: list of key=value,"
892 " where key is one of min, max, std")
894 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
895 dest="ispecs_disk_count",
896 type="keyval", default={},
897 help="Disk count specs: list of key=value,"
898 " where key is one of min, max, std")
900 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
901 type="keyval", default={},
902 help="Disk size specs: list of key=value,"
903 " where key is one of min, max, std"
904 " (in MB or using a unit)")
906 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
907 type="keyval", default={},
908 help="NIC count specs: list of key=value,"
909 " where key is one of min, max, std")
911 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
912 dest="ipolicy_disk_templates",
913 type="list", default=None,
914 help="Comma-separated list of"
915 " enabled disk templates")
917 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
918 dest="ipolicy_vcpu_ratio",
919 type="maybefloat", default=None,
920 help="The maximum allowed vcpu-to-cpu ratio")
922 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
923 dest="ipolicy_spindle_ratio",
924 type="maybefloat", default=None,
925 help=("The maximum allowed instances to"
928 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
929 help="Hypervisor and hypervisor options, in the"
930 " format hypervisor:option=value,option=value,...",
931 default=None, type="identkeyval")
933 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
934 help="Hypervisor and hypervisor options, in the"
935 " format hypervisor:option=value,option=value,...",
936 default=[], action="append", type="identkeyval")
938 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
939 action="store_false",
940 help="Don't check that the instance's IP"
943 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
944 default=True, action="store_false",
945 help="Don't check that the instance's name"
948 NET_OPT = cli_option("--net",
949 help="NIC parameters", default=[],
950 dest="nics", action="append", type="identkeyval")
952 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
953 dest="disks", action="append", type="identkeyval")
955 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
956 help="Comma-separated list of disks"
957 " indices to act on (e.g. 0,2) (optional,"
958 " defaults to all disks)")
960 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
961 help="Enforces a single-disk configuration using the"
962 " given disk size, in MiB unless a suffix is used",
963 default=None, type="unit", metavar="<size>")
965 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
966 dest="ignore_consistency",
967 action="store_true", default=False,
968 help="Ignore the consistency of the disks on"
971 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
972 dest="allow_failover",
973 action="store_true", default=False,
974 help="If migration is not possible fallback to"
977 NONLIVE_OPT = cli_option("--non-live", dest="live",
978 default=True, action="store_false",
979 help="Do a non-live migration (this usually means"
980 " freeze the instance, save the state, transfer and"
981 " only then resume running on the secondary node)")
983 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
985 choices=list(constants.HT_MIGRATION_MODES),
986 help="Override default migration mode (choose"
987 " either live or non-live")
989 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
990 help="Target node and optional secondary node",
991 metavar="<pnode>[:<snode>]",
992 completion_suggest=OPT_COMPL_INST_ADD_NODES)
994 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
995 action="append", metavar="<node>",
996 help="Use only this node (can be used multiple"
997 " times, if not given defaults to all nodes)",
998 completion_suggest=OPT_COMPL_ONE_NODE)
1000 NODEGROUP_OPT_NAME = "--node-group"
1001 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1003 help="Node group (name or uuid)",
1004 metavar="<nodegroup>",
1005 default=None, type="string",
1006 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1008 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1010 completion_suggest=OPT_COMPL_ONE_NODE)
1012 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1013 action="store_false",
1014 help="Don't start the instance after creation")
1016 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1017 action="store_true", default=False,
1018 help="Show command instead of executing it")
1020 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1021 default=False, action="store_true",
1022 help="Instead of performing the migration, try to"
1023 " recover from a failed cleanup. This is safe"
1024 " to run even if the instance is healthy, but it"
1025 " will create extra replication traffic and "
1026 " disrupt briefly the replication (like during the"
1029 STATIC_OPT = cli_option("-s", "--static", dest="static",
1030 action="store_true", default=False,
1031 help="Only show configuration data, not runtime data")
1033 ALL_OPT = cli_option("--all", dest="show_all",
1034 default=False, action="store_true",
1035 help="Show info on all instances on the cluster."
1036 " This can take a long time to run, use wisely")
1038 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1039 action="store_true", default=False,
1040 help="Interactive OS reinstall, lists available"
1041 " OS templates for selection")
1043 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1044 action="store_true", default=False,
1045 help="Remove the instance from the cluster"
1046 " configuration even if there are failures"
1047 " during the removal process")
1049 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1050 dest="ignore_remove_failures",
1051 action="store_true", default=False,
1052 help="Remove the instance from the"
1053 " cluster configuration even if there"
1054 " are failures during the removal"
1057 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1058 action="store_true", default=False,
1059 help="Remove the instance from the cluster")
1061 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1062 help="Specifies the new node for the instance",
1063 metavar="NODE", default=None,
1064 completion_suggest=OPT_COMPL_ONE_NODE)
1066 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1067 help="Specifies the new secondary node",
1068 metavar="NODE", default=None,
1069 completion_suggest=OPT_COMPL_ONE_NODE)
1071 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1072 help="Specifies the new primary node",
1073 metavar="<node>", default=None,
1074 completion_suggest=OPT_COMPL_ONE_NODE)
1076 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1077 default=False, action="store_true",
1078 help="Replace the disk(s) on the primary"
1079 " node (applies only to internally mirrored"
1080 " disk templates, e.g. %s)" %
1081 utils.CommaJoin(constants.DTS_INT_MIRROR))
1083 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1084 default=False, action="store_true",
1085 help="Replace the disk(s) on the secondary"
1086 " node (applies only to internally mirrored"
1087 " disk templates, e.g. %s)" %
1088 utils.CommaJoin(constants.DTS_INT_MIRROR))
1090 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1091 default=False, action="store_true",
1092 help="Lock all nodes and auto-promote as needed"
1095 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1096 default=False, action="store_true",
1097 help="Automatically replace faulty disks"
1098 " (applies only to internally mirrored"
1099 " disk templates, e.g. %s)" %
1100 utils.CommaJoin(constants.DTS_INT_MIRROR))
1102 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1103 default=False, action="store_true",
1104 help="Ignore current recorded size"
1105 " (useful for forcing activation when"
1106 " the recorded size is wrong)")
1108 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1110 completion_suggest=OPT_COMPL_ONE_NODE)
1112 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1115 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1116 help="Specify the secondary ip for the node",
1117 metavar="ADDRESS", default=None)
1119 READD_OPT = cli_option("--readd", dest="readd",
1120 default=False, action="store_true",
1121 help="Readd old node after replacing it")
1123 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1124 default=True, action="store_false",
1125 help="Disable SSH key fingerprint checking")
1127 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1128 default=False, action="store_true",
1129 help="Force the joining of a node")
1131 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1132 type="bool", default=None, metavar=_YORNO,
1133 help="Set the master_candidate flag on the node")
1135 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1136 type="bool", default=None,
1137 help=("Set the offline flag on the node"
1138 " (cluster does not communicate with offline"
1141 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1142 type="bool", default=None,
1143 help=("Set the drained flag on the node"
1144 " (excluded from allocation operations)"))
1146 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1147 type="bool", default=None, metavar=_YORNO,
1148 help="Set the master_capable flag on the node")
1150 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1151 type="bool", default=None, metavar=_YORNO,
1152 help="Set the vm_capable flag on the node")
1154 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1155 type="bool", default=None, metavar=_YORNO,
1156 help="Set the allocatable flag on a volume")
1158 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1159 help="Disable support for lvm based instances"
1161 action="store_false", default=True)
1163 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1164 dest="enabled_hypervisors",
1165 help="Comma-separated list of hypervisors",
1166 type="string", default=None)
1168 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1169 dest="enabled_disk_templates",
1170 help="Comma-separated list of "
1172 type="string", default=None)
1174 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1175 type="keyval", default={},
1176 help="NIC parameters")
1178 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1179 dest="candidate_pool_size", type="int",
1180 help="Set the candidate pool size")
1182 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1183 help=("Enables LVM and specifies the volume group"
1184 " name (cluster-wide) for disk allocation"
1185 " [%s]" % constants.DEFAULT_VG),
1186 metavar="VG", default=None)
1188 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1189 help="Destroy cluster", action="store_true")
1191 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1192 help="Skip node agreement check (dangerous)",
1193 action="store_true", default=False)
1195 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1196 help="Specify the mac prefix for the instance IP"
1197 " addresses, in the format XX:XX:XX",
1201 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1202 help="Specify the node interface (cluster-wide)"
1203 " on which the master IP address will be added"
1204 " (cluster init default: %s)" %
1205 constants.DEFAULT_BRIDGE,
1209 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1210 help="Specify the netmask of the master IP",
1214 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1215 dest="use_external_mip_script",
1216 help="Specify whether to run a"
1217 " user-provided script for the master"
1218 " IP address turnup and"
1219 " turndown operations",
1220 type="bool", metavar=_YORNO, default=None)
1222 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1223 help="Specify the default directory (cluster-"
1224 "wide) for storing the file-based disks [%s]" %
1225 pathutils.DEFAULT_FILE_STORAGE_DIR,
1227 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1229 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1230 "--shared-file-storage-dir",
1231 dest="shared_file_storage_dir",
1232 help="Specify the default directory (cluster-wide) for storing the"
1233 " shared file-based disks [%s]" %
1234 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1235 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1237 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1238 help="Don't modify %s" % pathutils.ETC_HOSTS,
1239 action="store_false", default=True)
1241 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1242 help="Don't initialize SSH keys",
1243 action="store_false", default=True)
1245 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1246 help="Enable parseable error messages",
1247 action="store_true", default=False)
1249 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1250 help="Skip N+1 memory redundancy tests",
1251 action="store_true", default=False)
1253 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1254 help="Type of reboot: soft/hard/full",
1255 default=constants.INSTANCE_REBOOT_HARD,
1257 choices=list(constants.REBOOT_TYPES))
1259 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1260 dest="ignore_secondaries",
1261 default=False, action="store_true",
1262 help="Ignore errors from secondaries")
1264 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1265 action="store_false", default=True,
1266 help="Don't shutdown the instance (unsafe)")
1268 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1269 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1270 help="Maximum time to wait")
1272 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1273 dest="shutdown_timeout", type="int",
1274 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1275 help="Maximum time to wait for instance"
1278 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1280 help=("Number of seconds between repetions of the"
1283 EARLY_RELEASE_OPT = cli_option("--early-release",
1284 dest="early_release", default=False,
1285 action="store_true",
1286 help="Release the locks on the secondary"
1289 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1290 dest="new_cluster_cert",
1291 default=False, action="store_true",
1292 help="Generate a new cluster certificate")
1294 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1296 help="File containing new RAPI certificate")
1298 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1299 default=None, action="store_true",
1300 help=("Generate a new self-signed RAPI"
1303 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1305 help="File containing new SPICE certificate")
1307 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1309 help="File containing the certificate of the CA"
1310 " which signed the SPICE certificate")
1312 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1313 dest="new_spice_cert", default=None,
1314 action="store_true",
1315 help=("Generate a new self-signed SPICE"
1318 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1319 dest="new_confd_hmac_key",
1320 default=False, action="store_true",
1321 help=("Create a new HMAC key for %s" %
1324 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1325 dest="cluster_domain_secret",
1327 help=("Load new new cluster domain"
1328 " secret from file"))
1330 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1331 dest="new_cluster_domain_secret",
1332 default=False, action="store_true",
1333 help=("Create a new cluster domain"
1336 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1337 dest="use_replication_network",
1338 help="Whether to use the replication network"
1339 " for talking to the nodes",
1340 action="store_true", default=False)
1342 MAINTAIN_NODE_HEALTH_OPT = \
1343 cli_option("--maintain-node-health", dest="maintain_node_health",
1344 metavar=_YORNO, default=None, type="bool",
1345 help="Configure the cluster to automatically maintain node"
1346 " health, by shutting down unknown instances, shutting down"
1347 " unknown DRBD devices, etc.")
1349 IDENTIFY_DEFAULTS_OPT = \
1350 cli_option("--identify-defaults", dest="identify_defaults",
1351 default=False, action="store_true",
1352 help="Identify which saved instance parameters are equal to"
1353 " the current cluster defaults and set them as such, instead"
1354 " of marking them as overridden")
1356 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1357 action="store", dest="uid_pool",
1358 help=("A list of user-ids or user-id"
1359 " ranges separated by commas"))
1361 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1362 action="store", dest="add_uids",
1363 help=("A list of user-ids or user-id"
1364 " ranges separated by commas, to be"
1365 " added to the user-id pool"))
1367 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1368 action="store", dest="remove_uids",
1369 help=("A list of user-ids or user-id"
1370 " ranges separated by commas, to be"
1371 " removed from the user-id pool"))
1373 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1374 action="store", dest="reserved_lvs",
1375 help=("A comma-separated list of reserved"
1376 " logical volumes names, that will be"
1377 " ignored by cluster verify"))
1379 ROMAN_OPT = cli_option("--roman",
1380 dest="roman_integers", default=False,
1381 action="store_true",
1382 help="Use roman numbers for positive integers")
1384 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1385 action="store", default=None,
1386 help="Specifies usermode helper for DRBD")
1388 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1389 action="store_false", default=True,
1390 help="Disable support for DRBD")
1392 PRIMARY_IP_VERSION_OPT = \
1393 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1394 action="store", dest="primary_ip_version",
1395 metavar="%d|%d" % (constants.IP4_VERSION,
1396 constants.IP6_VERSION),
1397 help="Cluster-wide IP version for primary IP")
1399 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1400 action="store_true",
1401 help="Show machine name for every line in output")
1403 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1404 action="store_true",
1405 help=("Hide successful results and show failures"
1406 " only (determined by the exit code)"))
1408 REASON_OPT = cli_option("--reason", default=None,
1409 help="The reason for executing the command")
1412 def _PriorityOptionCb(option, _, value, parser):
1413 """Callback for processing C{--priority} option.
1416 value = _PRIONAME_TO_VALUE[value]
1418 setattr(parser.values, option.dest, value)
1421 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1422 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1423 choices=_PRIONAME_TO_VALUE.keys(),
1424 action="callback", type="choice",
1425 callback=_PriorityOptionCb,
1426 help="Priority for opcode processing")
1428 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1429 type="bool", default=None, metavar=_YORNO,
1430 help="Sets the hidden flag on the OS")
1432 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1433 type="bool", default=None, metavar=_YORNO,
1434 help="Sets the blacklisted flag on the OS")
1436 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1437 type="bool", metavar=_YORNO,
1438 dest="prealloc_wipe_disks",
1439 help=("Wipe disks prior to instance"
1442 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1443 type="keyval", default=None,
1444 help="Node parameters")
1446 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1447 action="store", metavar="POLICY", default=None,
1448 help="Allocation policy for the node group")
1450 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1451 type="bool", metavar=_YORNO,
1452 dest="node_powered",
1453 help="Specify if the SoR for node is powered")
1455 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1456 default=constants.OOB_TIMEOUT,
1457 help="Maximum time to wait for out-of-band helper")
1459 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1460 default=constants.OOB_POWER_DELAY,
1461 help="Time in seconds to wait between power-ons")
1463 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1464 action="store_true", default=False,
1465 help=("Whether command argument should be treated"
1468 NO_REMEMBER_OPT = cli_option("--no-remember",
1470 action="store_true", default=False,
1471 help="Perform but do not record the change"
1472 " in the configuration")
1474 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1475 default=False, action="store_true",
1476 help="Evacuate primary instances only")
1478 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1479 default=False, action="store_true",
1480 help="Evacuate secondary instances only"
1481 " (applies only to internally mirrored"
1482 " disk templates, e.g. %s)" %
1483 utils.CommaJoin(constants.DTS_INT_MIRROR))
1485 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1486 action="store_true", default=False,
1487 help="Pause instance at startup")
1489 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1490 help="Destination node group (name or uuid)",
1491 default=None, action="append",
1492 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1494 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1495 action="append", dest="ignore_errors",
1496 choices=list(constants.CV_ALL_ECODES_STRINGS),
1497 help="Error code to be ignored")
1499 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1501 help=("Specify disk state information in the"
1503 " storage_type/identifier:option=value,...;"
1504 " note this is unused for now"),
1507 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1509 help=("Specify hypervisor state information in the"
1510 " format hypervisor:option=value,...;"
1511 " note this is unused for now"),
1514 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1515 action="store_true", default=False,
1516 help="Ignore instance policy violations")
1518 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1519 help="Sets the instance's runtime memory,"
1520 " ballooning it up or down to the new value",
1521 default=None, type="unit", metavar="<size>")
1523 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1524 action="store_true", default=False,
1525 help="Marks the grow as absolute instead of the"
1526 " (default) relative mode")
1528 NETWORK_OPT = cli_option("--network",
1529 action="store", default=None, dest="network",
1530 help="IP network in CIDR notation")
1532 GATEWAY_OPT = cli_option("--gateway",
1533 action="store", default=None, dest="gateway",
1534 help="IP address of the router (gateway)")
1536 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1537 action="store", default=None,
1538 dest="add_reserved_ips",
1539 help="Comma-separated list of"
1540 " reserved IPs to add")
1542 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1543 action="store", default=None,
1544 dest="remove_reserved_ips",
1545 help="Comma-delimited list of"
1546 " reserved IPs to remove")
1548 NETWORK6_OPT = cli_option("--network6",
1549 action="store", default=None, dest="network6",
1550 help="IP network in CIDR notation")
1552 GATEWAY6_OPT = cli_option("--gateway6",
1553 action="store", default=None, dest="gateway6",
1554 help="IP6 address of the router (gateway)")
1556 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1557 dest="conflicts_check",
1559 action="store_false",
1560 help="Don't check for conflicting IPs")
1562 #: Options provided by all commands
1563 COMMON_OPTS = [DEBUG_OPT]
1565 # common options for creating instances. add and import then add their own
1567 COMMON_CREATE_OPTS = [
1572 FILESTORE_DRIVER_OPT,
1578 NOCONFLICTSCHECK_OPT,
1590 # common instance policy options
1591 INSTANCE_POLICY_OPTS = [
1592 SPECS_CPU_COUNT_OPT,
1593 SPECS_DISK_COUNT_OPT,
1594 SPECS_DISK_SIZE_OPT,
1596 SPECS_NIC_COUNT_OPT,
1597 IPOLICY_DISK_TEMPLATES,
1599 IPOLICY_SPINDLE_RATIO,
1603 class _ShowUsage(Exception):
1604 """Exception class for L{_ParseArgs}.
1607 def __init__(self, exit_error):
1608 """Initializes instances of this class.
1610 @type exit_error: bool
1611 @param exit_error: Whether to report failure on exit
1614 Exception.__init__(self)
1615 self.exit_error = exit_error
1618 class _ShowVersion(Exception):
1619 """Exception class for L{_ParseArgs}.
1624 def _ParseArgs(binary, argv, commands, aliases, env_override):
1625 """Parser for the command line arguments.
1627 This function parses the arguments and returns the function which
1628 must be executed together with its (modified) arguments.
1630 @param binary: Script name
1631 @param argv: Command line arguments
1632 @param commands: Dictionary containing command definitions
1633 @param aliases: dictionary with command aliases {"alias": "target", ...}
1634 @param env_override: list of env variables allowed for default args
1635 @raise _ShowUsage: If usage description should be shown
1636 @raise _ShowVersion: If version should be shown
1639 assert not (env_override - set(commands))
1640 assert not (set(aliases.keys()) & set(commands.keys()))
1645 # No option or command given
1646 raise _ShowUsage(exit_error=True)
1648 if cmd == "--version":
1649 raise _ShowVersion()
1650 elif cmd == "--help":
1651 raise _ShowUsage(exit_error=False)
1652 elif not (cmd in commands or cmd in aliases):
1653 raise _ShowUsage(exit_error=True)
1655 # get command, unalias it, and look it up in commands
1657 if aliases[cmd] not in commands:
1658 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1659 " command '%s'" % (cmd, aliases[cmd]))
1663 if cmd in env_override:
1664 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1665 env_args = os.environ.get(args_env_name)
1667 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1669 func, args_def, parser_opts, usage, description = commands[cmd]
1670 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1671 description=description,
1672 formatter=TitledHelpFormatter(),
1673 usage="%%prog %s %s" % (cmd, usage))
1674 parser.disable_interspersed_args()
1675 options, args = parser.parse_args(args=argv[2:])
1677 if not _CheckArguments(cmd, args_def, args):
1678 return None, None, None
1680 return func, options, args
1683 def _FormatUsage(binary, commands):
1684 """Generates a nice description of all commands.
1686 @param binary: Script name
1687 @param commands: Dictionary containing command definitions
1690 # compute the max line length for cmd + usage
1691 mlen = min(60, max(map(len, commands)))
1693 yield "Usage: %s {command} [options...] [argument...]" % binary
1694 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1698 # and format a nice command list
1699 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1700 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1701 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1702 for line in help_lines:
1703 yield " %-*s %s" % (mlen, "", line)
1708 def _CheckArguments(cmd, args_def, args):
1709 """Verifies the arguments using the argument definition.
1713 1. Abort with error if values specified by user but none expected.
1715 1. For each argument in definition
1717 1. Keep running count of minimum number of values (min_count)
1718 1. Keep running count of maximum number of values (max_count)
1719 1. If it has an unlimited number of values
1721 1. Abort with error if it's not the last argument in the definition
1723 1. If last argument has limited number of values
1725 1. Abort with error if number of values doesn't match or is too large
1727 1. Abort with error if user didn't pass enough values (min_count)
1730 if args and not args_def:
1731 ToStderr("Error: Command %s expects no arguments", cmd)
1738 last_idx = len(args_def) - 1
1740 for idx, arg in enumerate(args_def):
1741 if min_count is None:
1743 elif arg.min is not None:
1744 min_count += arg.min
1746 if max_count is None:
1748 elif arg.max is not None:
1749 max_count += arg.max
1752 check_max = (arg.max is not None)
1754 elif arg.max is None:
1755 raise errors.ProgrammerError("Only the last argument can have max=None")
1758 # Command with exact number of arguments
1759 if (min_count is not None and max_count is not None and
1760 min_count == max_count and len(args) != min_count):
1761 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1764 # Command with limited number of arguments
1765 if max_count is not None and len(args) > max_count:
1766 ToStderr("Error: Command %s expects only %d argument(s)",
1770 # Command with some required arguments
1771 if min_count is not None and len(args) < min_count:
1772 ToStderr("Error: Command %s expects at least %d argument(s)",
1779 def SplitNodeOption(value):
1780 """Splits the value of a --node option.
1783 if value and ":" in value:
1784 return value.split(":", 1)
1786 return (value, None)
1789 def CalculateOSNames(os_name, os_variants):
1790 """Calculates all the names an OS can be called, according to its variants.
1792 @type os_name: string
1793 @param os_name: base name of the os
1794 @type os_variants: list or None
1795 @param os_variants: list of supported variants
1797 @return: list of valid names
1801 return ["%s+%s" % (os_name, v) for v in os_variants]
1806 def ParseFields(selected, default):
1807 """Parses the values of "--field"-like options.
1809 @type selected: string or None
1810 @param selected: User-selected options
1812 @param default: Default fields
1815 if selected is None:
1818 if selected.startswith("+"):
1819 return default + selected[1:].split(",")
1821 return selected.split(",")
1824 UsesRPC = rpc.RunWithRPC
1827 def AskUser(text, choices=None):
1828 """Ask the user a question.
1830 @param text: the question to ask
1832 @param choices: list with elements tuples (input_char, return_value,
1833 description); if not given, it will default to: [('y', True,
1834 'Perform the operation'), ('n', False, 'Do no do the operation')];
1835 note that the '?' char is reserved for help
1837 @return: one of the return values from the choices list; if input is
1838 not possible (i.e. not running with a tty, we return the last
1843 choices = [("y", True, "Perform the operation"),
1844 ("n", False, "Do not perform the operation")]
1845 if not choices or not isinstance(choices, list):
1846 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1847 for entry in choices:
1848 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1849 raise errors.ProgrammerError("Invalid choices element to AskUser")
1851 answer = choices[-1][1]
1853 for line in text.splitlines():
1854 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1855 text = "\n".join(new_text)
1857 f = file("/dev/tty", "a+")
1861 chars = [entry[0] for entry in choices]
1862 chars[-1] = "[%s]" % chars[-1]
1864 maps = dict([(entry[0], entry[1]) for entry in choices])
1868 f.write("/".join(chars))
1870 line = f.readline(2).strip().lower()
1875 for entry in choices:
1876 f.write(" %s - %s\n" % (entry[0], entry[2]))
1884 class JobSubmittedException(Exception):
1885 """Job was submitted, client should exit.
1887 This exception has one argument, the ID of the job that was
1888 submitted. The handler should print this ID.
1890 This is not an error, just a structured way to exit from clients.
1895 def SendJob(ops, cl=None):
1896 """Function to submit an opcode without waiting for the results.
1899 @param ops: list of opcodes
1900 @type cl: luxi.Client
1901 @param cl: the luxi client to use for communicating with the master;
1902 if None, a new client will be created
1908 job_id = cl.SubmitJob(ops)
1913 def GenericPollJob(job_id, cbs, report_cbs):
1914 """Generic job-polling function.
1916 @type job_id: number
1917 @param job_id: Job ID
1918 @type cbs: Instance of L{JobPollCbBase}
1919 @param cbs: Data callbacks
1920 @type report_cbs: Instance of L{JobPollReportCbBase}
1921 @param report_cbs: Reporting callbacks
1924 prev_job_info = None
1925 prev_logmsg_serial = None
1930 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1933 # job not found, go away!
1934 raise errors.JobLost("Job with id %s lost" % job_id)
1936 if result == constants.JOB_NOTCHANGED:
1937 report_cbs.ReportNotChanged(job_id, status)
1942 # Split result, a tuple of (field values, log entries)
1943 (job_info, log_entries) = result
1944 (status, ) = job_info
1947 for log_entry in log_entries:
1948 (serial, timestamp, log_type, message) = log_entry
1949 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1951 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1953 # TODO: Handle canceled and archived jobs
1954 elif status in (constants.JOB_STATUS_SUCCESS,
1955 constants.JOB_STATUS_ERROR,
1956 constants.JOB_STATUS_CANCELING,
1957 constants.JOB_STATUS_CANCELED):
1960 prev_job_info = job_info
1962 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1964 raise errors.JobLost("Job with id %s lost" % job_id)
1966 status, opstatus, result = jobs[0]
1968 if status == constants.JOB_STATUS_SUCCESS:
1971 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1972 raise errors.OpExecError("Job was canceled")
1975 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1976 if status == constants.OP_STATUS_SUCCESS:
1978 elif status == constants.OP_STATUS_ERROR:
1979 errors.MaybeRaise(msg)
1982 raise errors.OpExecError("partial failure (opcode %d): %s" %
1985 raise errors.OpExecError(str(msg))
1987 # default failure mode
1988 raise errors.OpExecError(result)
1991 class JobPollCbBase:
1992 """Base class for L{GenericPollJob} callbacks.
1996 """Initializes this class.
2000 def WaitForJobChangeOnce(self, job_id, fields,
2001 prev_job_info, prev_log_serial):
2002 """Waits for changes on a job.
2005 raise NotImplementedError()
2007 def QueryJobs(self, job_ids, fields):
2008 """Returns the selected fields for the selected job IDs.
2010 @type job_ids: list of numbers
2011 @param job_ids: Job IDs
2012 @type fields: list of strings
2013 @param fields: Fields
2016 raise NotImplementedError()
2019 class JobPollReportCbBase:
2020 """Base class for L{GenericPollJob} reporting callbacks.
2024 """Initializes this class.
2028 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2029 """Handles a log message.
2032 raise NotImplementedError()
2034 def ReportNotChanged(self, job_id, status):
2035 """Called for if a job hasn't changed in a while.
2037 @type job_id: number
2038 @param job_id: Job ID
2039 @type status: string or None
2040 @param status: Job status if available
2043 raise NotImplementedError()
2046 class _LuxiJobPollCb(JobPollCbBase):
2047 def __init__(self, cl):
2048 """Initializes this class.
2051 JobPollCbBase.__init__(self)
2054 def WaitForJobChangeOnce(self, job_id, fields,
2055 prev_job_info, prev_log_serial):
2056 """Waits for changes on a job.
2059 return self.cl.WaitForJobChangeOnce(job_id, fields,
2060 prev_job_info, prev_log_serial)
2062 def QueryJobs(self, job_ids, fields):
2063 """Returns the selected fields for the selected job IDs.
2066 return self.cl.QueryJobs(job_ids, fields)
2069 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2070 def __init__(self, feedback_fn):
2071 """Initializes this class.
2074 JobPollReportCbBase.__init__(self)
2076 self.feedback_fn = feedback_fn
2078 assert callable(feedback_fn)
2080 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2081 """Handles a log message.
2084 self.feedback_fn((timestamp, log_type, log_msg))
2086 def ReportNotChanged(self, job_id, status):
2087 """Called if a job hasn't changed in a while.
2093 class StdioJobPollReportCb(JobPollReportCbBase):
2095 """Initializes this class.
2098 JobPollReportCbBase.__init__(self)
2100 self.notified_queued = False
2101 self.notified_waitlock = False
2103 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2104 """Handles a log message.
2107 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2108 FormatLogMessage(log_type, log_msg))
2110 def ReportNotChanged(self, job_id, status):
2111 """Called if a job hasn't changed in a while.
2117 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2118 ToStderr("Job %s is waiting in queue", job_id)
2119 self.notified_queued = True
2121 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2122 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2123 self.notified_waitlock = True
2126 def FormatLogMessage(log_type, log_msg):
2127 """Formats a job message according to its type.
2130 if log_type != constants.ELOG_MESSAGE:
2131 log_msg = str(log_msg)
2133 return utils.SafeEncode(log_msg)
2136 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2137 """Function to poll for the result of a job.
2139 @type job_id: job identified
2140 @param job_id: the job to poll for results
2141 @type cl: luxi.Client
2142 @param cl: the luxi client to use for communicating with the master;
2143 if None, a new client will be created
2149 if reporter is None:
2151 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2153 reporter = StdioJobPollReportCb()
2155 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2157 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2160 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2161 """Legacy function to submit an opcode.
2163 This is just a simple wrapper over the construction of the processor
2164 instance. It should be extended to better handle feedback and
2165 interaction functions.
2171 SetGenericOpcodeOpts([op], opts)
2173 job_id = SendJob([op], cl=cl)
2175 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2178 return op_results[0]
2181 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2182 """Wrapper around SubmitOpCode or SendJob.
2184 This function will decide, based on the 'opts' parameter, whether to
2185 submit and wait for the result of the opcode (and return it), or
2186 whether to just send the job and print its identifier. It is used in
2187 order to simplify the implementation of the '--submit' option.
2189 It will also process the opcodes if we're sending the via SendJob
2190 (otherwise SubmitOpCode does it).
2193 if opts and opts.submit_only:
2195 SetGenericOpcodeOpts(job, opts)
2196 job_id = SendJob(job, cl=cl)
2197 raise JobSubmittedException(job_id)
2199 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2202 def SetGenericOpcodeOpts(opcode_list, options):
2203 """Processor for generic options.
2205 This function updates the given opcodes based on generic command
2206 line options (like debug, dry-run, etc.).
2208 @param opcode_list: list of opcodes
2209 @param options: command line options or None
2210 @return: None (in-place modification)
2215 for op in opcode_list:
2216 op.debug_level = options.debug
2217 if hasattr(options, "dry_run"):
2218 op.dry_run = options.dry_run
2219 if getattr(options, "priority", None) is not None:
2220 op.priority = options.priority
2223 def GetClient(query=False):
2224 """Connects to the a luxi socket and returns a client.
2226 @type query: boolean
2227 @param query: this signifies that the client will only be
2228 used for queries; if the build-time parameter
2229 enable-split-queries is enabled, then the client will be
2230 connected to the query socket instead of the masterd socket
2233 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2235 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2236 address = pathutils.MASTER_SOCKET
2237 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2238 address = pathutils.QUERY_SOCKET
2240 address = override_socket
2241 elif query and constants.ENABLE_SPLIT_QUERY:
2242 address = pathutils.QUERY_SOCKET
2245 # TODO: Cache object?
2247 client = luxi.Client(address=address)
2248 except luxi.NoMasterError:
2249 ss = ssconf.SimpleStore()
2251 # Try to read ssconf file
2254 except errors.ConfigurationError:
2255 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2256 " not part of a cluster",
2259 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2260 if master != myself:
2261 raise errors.OpPrereqError("This is not the master node, please connect"
2262 " to node '%s' and rerun the command" %
2263 master, errors.ECODE_INVAL)
2268 def FormatError(err):
2269 """Return a formatted error message for a given error.
2271 This function takes an exception instance and returns a tuple
2272 consisting of two values: first, the recommended exit code, and
2273 second, a string describing the error message (not
2274 newline-terminated).
2280 if isinstance(err, errors.ConfigurationError):
2281 txt = "Corrupt configuration file: %s" % msg
2283 obuf.write(txt + "\n")
2284 obuf.write("Aborting.")
2286 elif isinstance(err, errors.HooksAbort):
2287 obuf.write("Failure: hooks execution failed:\n")
2288 for node, script, out in err.args[0]:
2290 obuf.write(" node: %s, script: %s, output: %s\n" %
2291 (node, script, out))
2293 obuf.write(" node: %s, script: %s (no output)\n" %
2295 elif isinstance(err, errors.HooksFailure):
2296 obuf.write("Failure: hooks general failure: %s" % msg)
2297 elif isinstance(err, errors.ResolverError):
2298 this_host = netutils.Hostname.GetSysName()
2299 if err.args[0] == this_host:
2300 msg = "Failure: can't resolve my own hostname ('%s')"
2302 msg = "Failure: can't resolve hostname '%s'"
2303 obuf.write(msg % err.args[0])
2304 elif isinstance(err, errors.OpPrereqError):
2305 if len(err.args) == 2:
2306 obuf.write("Failure: prerequisites not met for this"
2307 " operation:\nerror type: %s, error details:\n%s" %
2308 (err.args[1], err.args[0]))
2310 obuf.write("Failure: prerequisites not met for this"
2311 " operation:\n%s" % msg)
2312 elif isinstance(err, errors.OpExecError):
2313 obuf.write("Failure: command execution error:\n%s" % msg)
2314 elif isinstance(err, errors.TagError):
2315 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2316 elif isinstance(err, errors.JobQueueDrainError):
2317 obuf.write("Failure: the job queue is marked for drain and doesn't"
2318 " accept new requests\n")
2319 elif isinstance(err, errors.JobQueueFull):
2320 obuf.write("Failure: the job queue is full and doesn't accept new"
2321 " job submissions until old jobs are archived\n")
2322 elif isinstance(err, errors.TypeEnforcementError):
2323 obuf.write("Parameter Error: %s" % msg)
2324 elif isinstance(err, errors.ParameterError):
2325 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2326 elif isinstance(err, luxi.NoMasterError):
2327 if err.args[0] == pathutils.MASTER_SOCKET:
2328 daemon = "the master daemon"
2329 elif err.args[0] == pathutils.QUERY_SOCKET:
2330 daemon = "the config daemon"
2332 daemon = "socket '%s'" % str(err.args[0])
2333 obuf.write("Cannot communicate with %s.\nIs the process running"
2334 " and listening for connections?" % daemon)
2335 elif isinstance(err, luxi.TimeoutError):
2336 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2337 " been submitted and will continue to run even if the call"
2338 " timed out. Useful commands in this situation are \"gnt-job"
2339 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2341 elif isinstance(err, luxi.PermissionError):
2342 obuf.write("It seems you don't have permissions to connect to the"
2343 " master daemon.\nPlease retry as a different user.")
2344 elif isinstance(err, luxi.ProtocolError):
2345 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2347 elif isinstance(err, errors.JobLost):
2348 obuf.write("Error checking job status: %s" % msg)
2349 elif isinstance(err, errors.QueryFilterParseError):
2350 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2351 obuf.write("\n".join(err.GetDetails()))
2352 elif isinstance(err, errors.GenericError):
2353 obuf.write("Unhandled Ganeti error: %s" % msg)
2354 elif isinstance(err, JobSubmittedException):
2355 obuf.write("JobID: %s\n" % err.args[0])
2358 obuf.write("Unhandled exception: %s" % msg)
2359 return retcode, obuf.getvalue().rstrip("\n")
2362 def GenericMain(commands, override=None, aliases=None,
2363 env_override=frozenset()):
2364 """Generic main function for all the gnt-* commands.
2366 @param commands: a dictionary with a special structure, see the design doc
2367 for command line handling.
2368 @param override: if not None, we expect a dictionary with keys that will
2369 override command line options; this can be used to pass
2370 options from the scripts to generic functions
2371 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2372 @param env_override: list of environment names which are allowed to submit
2373 default args for commands
2376 # save the program name and the entire command line for later logging
2378 binary = os.path.basename(sys.argv[0])
2380 binary = sys.argv[0]
2382 if len(sys.argv) >= 2:
2383 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2387 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2389 binary = "<unknown program>"
2390 cmdline = "<unknown>"
2396 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2398 except _ShowVersion:
2399 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2400 constants.RELEASE_VERSION)
2401 return constants.EXIT_SUCCESS
2402 except _ShowUsage, err:
2403 for line in _FormatUsage(binary, commands):
2407 return constants.EXIT_FAILURE
2409 return constants.EXIT_SUCCESS
2410 except errors.ParameterError, err:
2411 result, err_msg = FormatError(err)
2415 if func is None: # parse error
2418 if override is not None:
2419 for key, val in override.iteritems():
2420 setattr(options, key, val)
2422 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2423 stderr_logging=True)
2425 logging.info("Command line: %s", cmdline)
2428 result = func(options, args)
2429 except (errors.GenericError, luxi.ProtocolError,
2430 JobSubmittedException), err:
2431 result, err_msg = FormatError(err)
2432 logging.exception("Error during command processing")
2434 except KeyboardInterrupt:
2435 result = constants.EXIT_FAILURE
2436 ToStderr("Aborted. Note that if the operation created any jobs, they"
2437 " might have been submitted and"
2438 " will continue to run in the background.")
2439 except IOError, err:
2440 if err.errno == errno.EPIPE:
2441 # our terminal went away, we'll exit
2442 sys.exit(constants.EXIT_FAILURE)
2449 def ParseNicOption(optvalue):
2450 """Parses the value of the --net option(s).
2454 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2455 except (TypeError, ValueError), err:
2456 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2459 nics = [{}] * nic_max
2460 for nidx, ndict in optvalue:
2463 if not isinstance(ndict, dict):
2464 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2465 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2467 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2474 def GenericInstanceCreate(mode, opts, args):
2475 """Add an instance to the cluster via either creation or import.
2477 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2478 @param opts: the command line options selected by the user
2480 @param args: should contain only one element, the new instance name
2482 @return: the desired exit code
2487 (pnode, snode) = SplitNodeOption(opts.node)
2492 hypervisor, hvparams = opts.hypervisor
2495 nics = ParseNicOption(opts.nics)
2499 elif mode == constants.INSTANCE_CREATE:
2500 # default of one nic, all auto
2506 if opts.disk_template == constants.DT_DISKLESS:
2507 if opts.disks or opts.sd_size is not None:
2508 raise errors.OpPrereqError("Diskless instance but disk"
2509 " information passed", errors.ECODE_INVAL)
2512 if (not opts.disks and not opts.sd_size
2513 and mode == constants.INSTANCE_CREATE):
2514 raise errors.OpPrereqError("No disk information specified",
2516 if opts.disks and opts.sd_size is not None:
2517 raise errors.OpPrereqError("Please use either the '--disk' or"
2518 " '-s' option", errors.ECODE_INVAL)
2519 if opts.sd_size is not None:
2520 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2524 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2525 except ValueError, err:
2526 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2528 disks = [{}] * disk_max
2531 for didx, ddict in opts.disks:
2533 if not isinstance(ddict, dict):
2534 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2535 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2536 elif constants.IDISK_SIZE in ddict:
2537 if constants.IDISK_ADOPT in ddict:
2538 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2539 " (disk %d)" % didx, errors.ECODE_INVAL)
2541 ddict[constants.IDISK_SIZE] = \
2542 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2543 except ValueError, err:
2544 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2545 (didx, err), errors.ECODE_INVAL)
2546 elif constants.IDISK_ADOPT in ddict:
2547 if mode == constants.INSTANCE_IMPORT:
2548 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2549 " import", errors.ECODE_INVAL)
2550 ddict[constants.IDISK_SIZE] = 0
2552 raise errors.OpPrereqError("Missing size or adoption source for"
2553 " disk %d" % didx, errors.ECODE_INVAL)
2556 if opts.tags is not None:
2557 tags = opts.tags.split(",")
2561 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2562 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2564 if mode == constants.INSTANCE_CREATE:
2567 force_variant = opts.force_variant
2570 no_install = opts.no_install
2571 identify_defaults = False
2572 elif mode == constants.INSTANCE_IMPORT:
2575 force_variant = False
2576 src_node = opts.src_node
2577 src_path = opts.src_dir
2579 identify_defaults = opts.identify_defaults
2581 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2583 op = opcodes.OpInstanceCreate(instance_name=instance,
2585 disk_template=opts.disk_template,
2587 conflicts_check=opts.conflicts_check,
2588 pnode=pnode, snode=snode,
2589 ip_check=opts.ip_check,
2590 name_check=opts.name_check,
2591 wait_for_sync=opts.wait_for_sync,
2592 file_storage_dir=opts.file_storage_dir,
2593 file_driver=opts.file_driver,
2594 iallocator=opts.iallocator,
2595 hypervisor=hypervisor,
2597 beparams=opts.beparams,
2598 osparams=opts.osparams,
2602 force_variant=force_variant,
2606 no_install=no_install,
2607 identify_defaults=identify_defaults,
2608 ignore_ipolicy=opts.ignore_ipolicy)
2610 SubmitOrSend(op, opts)
2614 class _RunWhileClusterStoppedHelper:
2615 """Helper class for L{RunWhileClusterStopped} to simplify state management
2618 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2619 """Initializes this class.
2621 @type feedback_fn: callable
2622 @param feedback_fn: Feedback function
2623 @type cluster_name: string
2624 @param cluster_name: Cluster name
2625 @type master_node: string
2626 @param master_node Master node name
2627 @type online_nodes: list
2628 @param online_nodes: List of names of online nodes
2631 self.feedback_fn = feedback_fn
2632 self.cluster_name = cluster_name
2633 self.master_node = master_node
2634 self.online_nodes = online_nodes
2636 self.ssh = ssh.SshRunner(self.cluster_name)
2638 self.nonmaster_nodes = [name for name in online_nodes
2639 if name != master_node]
2641 assert self.master_node not in self.nonmaster_nodes
2643 def _RunCmd(self, node_name, cmd):
2644 """Runs a command on the local or a remote machine.
2646 @type node_name: string
2647 @param node_name: Machine name
2652 if node_name is None or node_name == self.master_node:
2653 # No need to use SSH
2654 result = utils.RunCmd(cmd)
2656 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2657 utils.ShellQuoteArgs(cmd))
2660 errmsg = ["Failed to run command %s" % result.cmd]
2662 errmsg.append("on node %s" % node_name)
2663 errmsg.append(": exitcode %s and error %s" %
2664 (result.exit_code, result.output))
2665 raise errors.OpExecError(" ".join(errmsg))
2667 def Call(self, fn, *args):
2668 """Call function while all daemons are stopped.
2671 @param fn: Function to be called
2674 # Pause watcher by acquiring an exclusive lock on watcher state file
2675 self.feedback_fn("Blocking watcher")
2676 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2678 # TODO: Currently, this just blocks. There's no timeout.
2679 # TODO: Should it be a shared lock?
2680 watcher_block.Exclusive(blocking=True)
2682 # Stop master daemons, so that no new jobs can come in and all running
2684 self.feedback_fn("Stopping master daemons")
2685 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2687 # Stop daemons on all nodes
2688 for node_name in self.online_nodes:
2689 self.feedback_fn("Stopping daemons on %s" % node_name)
2690 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2692 # All daemons are shut down now
2694 return fn(self, *args)
2695 except Exception, err:
2696 _, errmsg = FormatError(err)
2697 logging.exception("Caught exception")
2698 self.feedback_fn(errmsg)
2701 # Start cluster again, master node last
2702 for node_name in self.nonmaster_nodes + [self.master_node]:
2703 self.feedback_fn("Starting daemons on %s" % node_name)
2704 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2707 watcher_block.Close()
2710 def RunWhileClusterStopped(feedback_fn, fn, *args):
2711 """Calls a function while all cluster daemons are stopped.
2713 @type feedback_fn: callable
2714 @param feedback_fn: Feedback function
2716 @param fn: Function to be called when daemons are stopped
2719 feedback_fn("Gathering cluster information")
2721 # This ensures we're running on the master daemon
2724 (cluster_name, master_node) = \
2725 cl.QueryConfigValues(["cluster_name", "master_node"])
2727 online_nodes = GetOnlineNodes([], cl=cl)
2729 # Don't keep a reference to the client. The master daemon will go away.
2732 assert master_node in online_nodes
2734 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2735 online_nodes).Call(fn, *args)
2738 def GenerateTable(headers, fields, separator, data,
2739 numfields=None, unitfields=None,
2741 """Prints a table with headers and different fields.
2744 @param headers: dictionary mapping field names to headers for
2747 @param fields: the field names corresponding to each row in
2749 @param separator: the separator to be used; if this is None,
2750 the default 'smart' algorithm is used which computes optimal
2751 field width, otherwise just the separator is used between
2754 @param data: a list of lists, each sublist being one row to be output
2755 @type numfields: list
2756 @param numfields: a list with the fields that hold numeric
2757 values and thus should be right-aligned
2758 @type unitfields: list
2759 @param unitfields: a list with the fields that hold numeric
2760 values that should be formatted with the units field
2761 @type units: string or None
2762 @param units: the units we should use for formatting, or None for
2763 automatic choice (human-readable for non-separator usage, otherwise
2764 megabytes); this is a one-letter string
2773 if numfields is None:
2775 if unitfields is None:
2778 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2779 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2782 for field in fields:
2783 if headers and field not in headers:
2784 # TODO: handle better unknown fields (either revert to old
2785 # style of raising exception, or deal more intelligently with
2787 headers[field] = field
2788 if separator is not None:
2789 format_fields.append("%s")
2790 elif numfields.Matches(field):
2791 format_fields.append("%*s")
2793 format_fields.append("%-*s")
2795 if separator is None:
2796 mlens = [0 for name in fields]
2797 format_str = " ".join(format_fields)
2799 format_str = separator.replace("%", "%%").join(format_fields)
2804 for idx, val in enumerate(row):
2805 if unitfields.Matches(fields[idx]):
2808 except (TypeError, ValueError):
2811 val = row[idx] = utils.FormatUnit(val, units)
2812 val = row[idx] = str(val)
2813 if separator is None:
2814 mlens[idx] = max(mlens[idx], len(val))
2819 for idx, name in enumerate(fields):
2821 if separator is None:
2822 mlens[idx] = max(mlens[idx], len(hdr))
2823 args.append(mlens[idx])
2825 result.append(format_str % tuple(args))
2827 if separator is None:
2828 assert len(mlens) == len(fields)
2830 if fields and not numfields.Matches(fields[-1]):
2836 line = ["-" for _ in fields]
2837 for idx in range(len(fields)):
2838 if separator is None:
2839 args.append(mlens[idx])
2840 args.append(line[idx])
2841 result.append(format_str % tuple(args))
2846 def _FormatBool(value):
2847 """Formats a boolean value as a string.
2855 #: Default formatting for query results; (callback, align right)
2856 _DEFAULT_FORMAT_QUERY = {
2857 constants.QFT_TEXT: (str, False),
2858 constants.QFT_BOOL: (_FormatBool, False),
2859 constants.QFT_NUMBER: (str, True),
2860 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2861 constants.QFT_OTHER: (str, False),
2862 constants.QFT_UNKNOWN: (str, False),
2866 def _GetColumnFormatter(fdef, override, unit):
2867 """Returns formatting function for a field.
2869 @type fdef: L{objects.QueryFieldDefinition}
2870 @type override: dict
2871 @param override: Dictionary for overriding field formatting functions,
2872 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2874 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2875 @rtype: tuple; (callable, bool)
2876 @return: Returns the function to format a value (takes one parameter) and a
2877 boolean for aligning the value on the right-hand side
2880 fmt = override.get(fdef.name, None)
2884 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2886 if fdef.kind == constants.QFT_UNIT:
2887 # Can't keep this information in the static dictionary
2888 return (lambda value: utils.FormatUnit(value, unit), True)
2890 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2894 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2897 class _QueryColumnFormatter:
2898 """Callable class for formatting fields of a query.
2901 def __init__(self, fn, status_fn, verbose):
2902 """Initializes this class.
2905 @param fn: Formatting function
2906 @type status_fn: callable
2907 @param status_fn: Function to report fields' status
2908 @type verbose: boolean
2909 @param verbose: whether to use verbose field descriptions or not
2913 self._status_fn = status_fn
2914 self._verbose = verbose
2916 def __call__(self, data):
2917 """Returns a field's string representation.
2920 (status, value) = data
2923 self._status_fn(status)
2925 if status == constants.RS_NORMAL:
2926 return self._fn(value)
2928 assert value is None, \
2929 "Found value %r for abnormal status %s" % (value, status)
2931 return FormatResultError(status, self._verbose)
2934 def FormatResultError(status, verbose):
2935 """Formats result status other than L{constants.RS_NORMAL}.
2937 @param status: The result status
2938 @type verbose: boolean
2939 @param verbose: Whether to return the verbose text
2940 @return: Text of result status
2943 assert status != constants.RS_NORMAL, \
2944 "FormatResultError called with status equal to constants.RS_NORMAL"
2946 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2948 raise NotImplementedError("Unknown status %s" % status)
2955 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2956 header=False, verbose=False):
2957 """Formats data in L{objects.QueryResponse}.
2959 @type result: L{objects.QueryResponse}
2960 @param result: result of query operation
2962 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2963 see L{utils.text.FormatUnit}
2964 @type format_override: dict
2965 @param format_override: Dictionary for overriding field formatting functions,
2966 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2967 @type separator: string or None
2968 @param separator: String used to separate fields
2970 @param header: Whether to output header row
2971 @type verbose: boolean
2972 @param verbose: whether to use verbose field descriptions or not
2981 if format_override is None:
2982 format_override = {}
2984 stats = dict.fromkeys(constants.RS_ALL, 0)
2986 def _RecordStatus(status):
2991 for fdef in result.fields:
2992 assert fdef.title and fdef.name
2993 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2994 columns.append(TableColumn(fdef.title,
2995 _QueryColumnFormatter(fn, _RecordStatus,
2999 table = FormatTable(result.data, columns, header, separator)
3001 # Collect statistics
3002 assert len(stats) == len(constants.RS_ALL)
3003 assert compat.all(count >= 0 for count in stats.values())
3005 # Determine overall status. If there was no data, unknown fields must be
3006 # detected via the field definitions.
3007 if (stats[constants.RS_UNKNOWN] or
3008 (not result.data and _GetUnknownFields(result.fields))):
3010 elif compat.any(count > 0 for key, count in stats.items()
3011 if key != constants.RS_NORMAL):
3012 status = QR_INCOMPLETE
3016 return (status, table)
3019 def _GetUnknownFields(fdefs):
3020 """Returns list of unknown fields included in C{fdefs}.
3022 @type fdefs: list of L{objects.QueryFieldDefinition}
3025 return [fdef for fdef in fdefs
3026 if fdef.kind == constants.QFT_UNKNOWN]
3029 def _WarnUnknownFields(fdefs):
3030 """Prints a warning to stderr if a query included unknown fields.
3032 @type fdefs: list of L{objects.QueryFieldDefinition}
3035 unknown = _GetUnknownFields(fdefs)
3037 ToStderr("Warning: Queried for unknown fields %s",
3038 utils.CommaJoin(fdef.name for fdef in unknown))
3044 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3045 format_override=None, verbose=False, force_filter=False,
3046 namefield=None, qfilter=None, isnumeric=False):
3047 """Generic implementation for listing all items of a resource.
3049 @param resource: One of L{constants.QR_VIA_LUXI}
3050 @type fields: list of strings
3051 @param fields: List of fields to query for
3052 @type names: list of strings
3053 @param names: Names of items to query for
3054 @type unit: string or None
3055 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3056 None for automatic choice (human-readable for non-separator usage,
3057 otherwise megabytes); this is a one-letter string
3058 @type separator: string or None
3059 @param separator: String used to separate fields
3061 @param header: Whether to show header row
3062 @type force_filter: bool
3063 @param force_filter: Whether to always treat names as filter
3064 @type format_override: dict
3065 @param format_override: Dictionary for overriding field formatting functions,
3066 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3067 @type verbose: boolean
3068 @param verbose: whether to use verbose field descriptions or not
3069 @type namefield: string
3070 @param namefield: Name of field to use for simple filters (see
3071 L{qlang.MakeFilter} for details)
3072 @type qfilter: list or None
3073 @param qfilter: Query filter (in addition to names)
3074 @param isnumeric: bool
3075 @param isnumeric: Whether the namefield's type is numeric, and therefore
3076 any simple filters built by namefield should use integer values to
3083 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3084 isnumeric=isnumeric)
3087 qfilter = namefilter
3088 elif namefilter is not None:
3089 qfilter = [qlang.OP_AND, namefilter, qfilter]
3094 response = cl.Query(resource, fields, qfilter)
3096 found_unknown = _WarnUnknownFields(response.fields)
3098 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3100 format_override=format_override,
3106 assert ((found_unknown and status == QR_UNKNOWN) or
3107 (not found_unknown and status != QR_UNKNOWN))
3109 if status == QR_UNKNOWN:
3110 return constants.EXIT_UNKNOWN_FIELD
3112 # TODO: Should the list command fail if not all data could be collected?
3113 return constants.EXIT_SUCCESS
3116 def _FieldDescValues(fdef):
3117 """Helper function for L{GenericListFields} to get query field description.
3119 @type fdef: L{objects.QueryFieldDefinition}
3125 _QFT_NAMES.get(fdef.kind, fdef.kind),
3131 def GenericListFields(resource, fields, separator, header, cl=None):
3132 """Generic implementation for listing fields for a resource.
3134 @param resource: One of L{constants.QR_VIA_LUXI}
3135 @type fields: list of strings
3136 @param fields: List of fields to query for
3137 @type separator: string or None
3138 @param separator: String used to separate fields
3140 @param header: Whether to show header row
3149 response = cl.QueryFields(resource, fields)
3151 found_unknown = _WarnUnknownFields(response.fields)
3154 TableColumn("Name", str, False),
3155 TableColumn("Type", str, False),
3156 TableColumn("Title", str, False),
3157 TableColumn("Description", str, False),
3160 rows = map(_FieldDescValues, response.fields)
3162 for line in FormatTable(rows, columns, header, separator):
3166 return constants.EXIT_UNKNOWN_FIELD
3168 return constants.EXIT_SUCCESS
3172 """Describes a column for L{FormatTable}.
3175 def __init__(self, title, fn, align_right):
3176 """Initializes this class.
3179 @param title: Column title
3181 @param fn: Formatting function
3182 @type align_right: bool
3183 @param align_right: Whether to align values on the right-hand side
3188 self.align_right = align_right
3191 def _GetColFormatString(width, align_right):
3192 """Returns the format string for a field.
3200 return "%%%s%ss" % (sign, width)
3203 def FormatTable(rows, columns, header, separator):
3204 """Formats data as a table.
3206 @type rows: list of lists
3207 @param rows: Row data, one list per row
3208 @type columns: list of L{TableColumn}
3209 @param columns: Column descriptions
3211 @param header: Whether to show header row
3212 @type separator: string or None
3213 @param separator: String used to separate columns
3217 data = [[col.title for col in columns]]
3218 colwidth = [len(col.title) for col in columns]
3221 colwidth = [0 for _ in columns]
3225 assert len(row) == len(columns)
3227 formatted = [col.format(value) for value, col in zip(row, columns)]
3229 if separator is None:
3230 # Update column widths
3231 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3232 # Modifying a list's items while iterating is fine
3233 colwidth[idx] = max(oldwidth, len(value))
3235 data.append(formatted)
3237 if separator is not None:
3238 # Return early if a separator is used
3239 return [separator.join(row) for row in data]
3241 if columns and not columns[-1].align_right:
3242 # Avoid unnecessary spaces at end of line
3245 # Build format string
3246 fmt = " ".join([_GetColFormatString(width, col.align_right)
3247 for col, width in zip(columns, colwidth)])
3249 return [fmt % tuple(row) for row in data]
3252 def FormatTimestamp(ts):
3253 """Formats a given timestamp.
3256 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3259 @return: a string with the formatted timestamp
3262 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3266 return utils.FormatTime(sec, usecs=usecs)
3269 def ParseTimespec(value):
3270 """Parse a time specification.
3272 The following suffixed will be recognized:
3280 Without any suffix, the value will be taken to be in seconds.
3285 raise errors.OpPrereqError("Empty time specification passed",
3294 if value[-1] not in suffix_map:
3297 except (TypeError, ValueError):
3298 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3301 multiplier = suffix_map[value[-1]]
3303 if not value: # no data left after stripping the suffix
3304 raise errors.OpPrereqError("Invalid time specification (only"
3305 " suffix passed)", errors.ECODE_INVAL)
3307 value = int(value) * multiplier
3308 except (TypeError, ValueError):
3309 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3314 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3315 filter_master=False, nodegroup=None):
3316 """Returns the names of online nodes.
3318 This function will also log a warning on stderr with the names of
3321 @param nodes: if not empty, use only this subset of nodes (minus the
3323 @param cl: if not None, luxi client to use
3324 @type nowarn: boolean
3325 @param nowarn: by default, this function will output a note with the
3326 offline nodes that are skipped; if this parameter is True the
3327 note is not displayed
3328 @type secondary_ips: boolean
3329 @param secondary_ips: if True, return the secondary IPs instead of the
3330 names, useful for doing network traffic over the replication interface
3332 @type filter_master: boolean
3333 @param filter_master: if True, do not return the master node in the list
3334 (useful in coordination with secondary_ips where we cannot check our
3335 node name against the list)
3336 @type nodegroup: string
3337 @param nodegroup: If set, only return nodes in this node group
3346 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3348 if nodegroup is not None:
3349 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3350 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3353 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3356 if len(qfilter) > 1:
3357 final_filter = [qlang.OP_AND] + qfilter
3359 assert len(qfilter) == 1
3360 final_filter = qfilter[0]
3364 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3366 def _IsOffline(row):
3367 (_, (_, offline), _) = row
3371 ((_, name), _, _) = row
3375 (_, _, (_, sip)) = row
3378 (offline, online) = compat.partition(result.data, _IsOffline)
3380 if offline and not nowarn:
3381 ToStderr("Note: skipping offline node(s): %s" %
3382 utils.CommaJoin(map(_GetName, offline)))
3389 return map(fn, online)
3392 def _ToStream(stream, txt, *args):
3393 """Write a message to a stream, bypassing the logging system
3395 @type stream: file object
3396 @param stream: the file to which we should write
3398 @param txt: the message
3404 stream.write(txt % args)
3409 except IOError, err:
3410 if err.errno == errno.EPIPE:
3411 # our terminal went away, we'll exit
3412 sys.exit(constants.EXIT_FAILURE)
3417 def ToStdout(txt, *args):
3418 """Write a message to stdout only, bypassing the logging system
3420 This is just a wrapper over _ToStream.
3423 @param txt: the message
3426 _ToStream(sys.stdout, txt, *args)
3429 def ToStderr(txt, *args):
3430 """Write a message to stderr only, bypassing the logging system
3432 This is just a wrapper over _ToStream.
3435 @param txt: the message
3438 _ToStream(sys.stderr, txt, *args)
3441 class JobExecutor(object):
3442 """Class which manages the submission and execution of multiple jobs.
3444 Note that instances of this class should not be reused between
3448 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3453 self.verbose = verbose
3456 self.feedback_fn = feedback_fn
3457 self._counter = itertools.count()
3460 def _IfName(name, fmt):
3461 """Helper function for formatting name.
3469 def QueueJob(self, name, *ops):
3470 """Record a job for later submit.
3473 @param name: a description of the job, will be used in WaitJobSet
3476 SetGenericOpcodeOpts(ops, self.opts)
3477 self.queue.append((self._counter.next(), name, ops))
3479 def AddJobId(self, name, status, job_id):
3480 """Adds a job ID to the internal queue.
3483 self.jobs.append((self._counter.next(), status, job_id, name))
3485 def SubmitPending(self, each=False):
3486 """Submit all pending jobs.
3491 for (_, _, ops) in self.queue:
3492 # SubmitJob will remove the success status, but raise an exception if
3493 # the submission fails, so we'll notice that anyway.
3494 results.append([True, self.cl.SubmitJob(ops)[0]])
3496 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3497 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3498 self.jobs.append((idx, status, data, name))
3500 def _ChooseJob(self):
3501 """Choose a non-waiting/queued job to poll next.
3504 assert self.jobs, "_ChooseJob called with empty job list"
3506 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3510 for job_data, status in zip(self.jobs, result):
3511 if (isinstance(status, list) and status and
3512 status[0] in (constants.JOB_STATUS_QUEUED,
3513 constants.JOB_STATUS_WAITING,
3514 constants.JOB_STATUS_CANCELING)):
3515 # job is still present and waiting
3517 # good candidate found (either running job or lost job)
3518 self.jobs.remove(job_data)
3522 return self.jobs.pop(0)
3524 def GetResults(self):
3525 """Wait for and return the results of all jobs.
3528 @return: list of tuples (success, job results), in the same order
3529 as the submitted jobs; if a job has failed, instead of the result
3530 there will be the error message
3534 self.SubmitPending()
3537 ok_jobs = [row[2] for row in self.jobs if row[1]]
3539 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3541 # first, remove any non-submitted jobs
3542 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3543 for idx, _, jid, name in failures:
3544 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3545 results.append((idx, False, jid))
3548 (idx, _, jid, name) = self._ChooseJob()
3549 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3551 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3553 except errors.JobLost, err:
3554 _, job_result = FormatError(err)
3555 ToStderr("Job %s%s has been archived, cannot check its result",
3556 jid, self._IfName(name, " for %s"))
3558 except (errors.GenericError, luxi.ProtocolError), err:
3559 _, job_result = FormatError(err)
3561 # the error message will always be shown, verbose or not
3562 ToStderr("Job %s%s has failed: %s",
3563 jid, self._IfName(name, " for %s"), job_result)
3565 results.append((idx, success, job_result))
3567 # sort based on the index, then drop it
3569 results = [i[1:] for i in results]
3573 def WaitOrShow(self, wait):
3574 """Wait for job results or only print the job IDs.
3577 @param wait: whether to wait or not
3581 return self.GetResults()
3584 self.SubmitPending()
3585 for _, status, result, name in self.jobs:
3587 ToStdout("%s: %s", result, name)
3589 ToStderr("Failure for %s: %s", name, result)
3590 return [row[1:3] for row in self.jobs]
3593 def FormatParamsDictInfo(param_dict, actual):
3594 """Formats a parameter dictionary.
3596 @type param_dict: dict
3597 @param param_dict: the own parameters
3599 @param actual: the current parameter set (including defaults)
3601 @return: dictionary where the value of each parameter is either a fully
3602 formatted string or a dictionary containing formatted strings
3606 for (key, data) in actual.items():
3607 if isinstance(data, dict) and data:
3608 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3610 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3614 def _FormatListInfoDefault(data, def_data):
3615 if data is not None:
3616 ret = utils.CommaJoin(data)
3618 ret = "default (%s)" % utils.CommaJoin(def_data)
3622 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3623 """Formats an instance policy.
3625 @type custom_ipolicy: dict
3626 @param custom_ipolicy: own policy
3627 @type eff_ipolicy: dict
3628 @param eff_ipolicy: effective policy (including defaults); ignored for
3630 @type iscluster: bool
3631 @param iscluster: the policy is at cluster level
3632 @rtype: list of pairs
3633 @return: formatted data, suitable for L{PrintGenericInfo}
3637 eff_ipolicy = custom_ipolicy
3639 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3642 FormatParamsDictInfo(custom_minmax.get(key, {}),
3643 eff_ipolicy[constants.ISPECS_MINMAX][key]))
3644 for key in constants.ISPECS_MINMAX_KEYS
3647 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3649 (constants.ISPECS_STD,
3650 FormatParamsDictInfo(stdspecs, stdspecs))
3654 ("enabled disk templates",
3655 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3656 eff_ipolicy[constants.IPOLICY_DTS]))
3659 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3660 for key in constants.IPOLICY_PARAMETERS
3665 def ConfirmOperation(names, list_type, text, extra=""):
3666 """Ask the user to confirm an operation on a list of list_type.
3668 This function is used to request confirmation for doing an operation
3669 on a given list of list_type.
3672 @param names: the list of names that we display when
3673 we ask for confirmation
3674 @type list_type: str
3675 @param list_type: Human readable name for elements in the list (e.g. nodes)
3677 @param text: the operation that the user should confirm
3679 @return: True or False depending on user's confirmation.
3683 msg = ("The %s will operate on %d %s.\n%s"
3684 "Do you want to continue?" % (text, count, list_type, extra))
3685 affected = (("\nAffected %s:\n" % list_type) +
3686 "\n".join([" %s" % name for name in names]))
3688 choices = [("y", True, "Yes, execute the %s" % text),
3689 ("n", False, "No, abort the %s" % text)]
3692 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3695 question = msg + affected
3697 choice = AskUser(question, choices)
3700 choice = AskUser(msg + affected, choices)
3704 def _MaybeParseUnit(elements):
3705 """Parses and returns an array of potential values with units.
3709 for k, v in elements.items():
3710 if v == constants.VALUE_DEFAULT:
3713 parsed[k] = utils.ParseUnit(v)
3717 def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3718 ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3719 group_ipolicy, allowed_values):
3722 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3723 if ispecs_disk_size:
3724 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3725 except (TypeError, ValueError, errors.UnitParseError), err:
3726 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3728 (ispecs_disk_size, ispecs_mem_size, err),
3731 # prepare ipolicy dict
3732 ispecs_transposed = {
3733 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3734 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3735 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3736 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3737 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3740 # first, check that the values given are correct
3742 forced_type = TISPECS_GROUP_TYPES
3744 forced_type = TISPECS_CLUSTER_TYPES
3745 for specs in ispecs_transposed.values():
3746 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3750 constants.ISPECS_MIN: {},
3751 constants.ISPECS_MAX: {},
3752 constants.ISPECS_STD: {},
3754 for (name, specs) in ispecs_transposed.iteritems():
3755 assert name in constants.ISPECS_PARAMETERS
3756 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3757 assert key in ispecs
3758 ispecs[key][name] = val
3759 for key in constants.ISPECS_MINMAX_KEYS:
3760 ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3761 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3764 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3765 ispecs_cpu_count=None,
3766 ispecs_disk_count=None,
3767 ispecs_disk_size=None,
3768 ispecs_nic_count=None,
3769 ipolicy_disk_templates=None,
3770 ipolicy_vcpu_ratio=None,
3771 ipolicy_spindle_ratio=None,
3772 group_ipolicy=False,
3773 allowed_values=None,
3775 """Creation of instance policy based on command line options.
3777 @param fill_all: whether for cluster policies we should ensure that
3778 all values are filled
3783 ipolicy_out = objects.MakeEmptyIPolicy()
3784 _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3785 ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3786 group_ipolicy, allowed_values)
3788 if ipolicy_disk_templates is not None:
3789 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3790 if ipolicy_vcpu_ratio is not None:
3791 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3792 if ipolicy_spindle_ratio is not None:
3793 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3795 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3797 if not group_ipolicy and fill_all:
3798 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3803 def _SerializeGenericInfo(buf, data, level, afterkey=False):
3804 """Formatting core of L{PrintGenericInfo}.
3806 @param buf: (string) stream to accumulate the result into
3807 @param data: data to format
3809 @param level: depth in the data hierarchy, used for indenting
3810 @type afterkey: bool
3811 @param afterkey: True when we are in the middle of a line after a key (used
3812 to properly add newlines or indentation)
3816 if isinstance(data, dict):
3825 for key in sorted(data):
3827 buf.write(baseind * level)
3832 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3833 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3834 # list of tuples (an ordered dictionary)
3840 for (key, val) in data:
3842 buf.write(baseind * level)
3847 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3848 elif isinstance(data, list):
3859 buf.write(baseind * level)
3863 buf.write(baseind[1:])
3864 _SerializeGenericInfo(buf, item, level + 1)
3866 # This branch should be only taken for strings, but it's practically
3867 # impossible to guarantee that no other types are produced somewhere
3868 buf.write(str(data))
3872 def PrintGenericInfo(data):
3873 """Print information formatted according to the hierarchy.
3875 The output is a valid YAML string.
3877 @param data: the data to print. It's a hierarchical structure whose elements
3879 - dictionaries, where keys are strings and values are of any of the
3881 - lists of pairs (key, value), where key is a string and value is of
3882 any of the types listed here; it's a way to encode ordered
3884 - lists of any of the types listed here
3889 _SerializeGenericInfo(buf, data, 0)
3890 ToStdout(buf.getvalue().rstrip("\n"))