4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
84 # FIXME: disable storage types once disk templates are fully implemented.
85 "ENABLED_STORAGE_TYPES_OPT",
86 "ENABLED_DISK_TEMPLATES_OPT",
91 "FILESTORE_DRIVER_OPT",
99 "GLOBAL_SHARED_FILEDIR_OPT",
104 "DEFAULT_IALLOCATOR_OPT",
105 "IDENTIFY_DEFAULTS_OPT",
106 "IGNORE_CONSIST_OPT",
108 "IGNORE_FAILURES_OPT",
109 "IGNORE_OFFLINE_OPT",
110 "IGNORE_REMOVE_FAILURES_OPT",
111 "IGNORE_SECONDARIES_OPT",
115 "MAINTAIN_NODE_HEALTH_OPT",
117 "MASTER_NETMASK_OPT",
119 "MIGRATION_MODE_OPT",
123 "NEW_CLUSTER_CERT_OPT",
124 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125 "NEW_CONFD_HMAC_KEY_OPT",
129 "NEW_SPICE_CERT_OPT",
131 "NOCONFLICTSCHECK_OPT",
132 "NODE_FORCE_JOIN_OPT",
134 "NODE_PLACEMENT_OPT",
138 "NODRBD_STORAGE_OPT",
144 "NOMODIFY_ETCHOSTS_OPT",
145 "NOMODIFY_SSH_SETUP_OPT",
149 "NORUNTIME_CHGS_OPT",
152 "NOSSH_KEYCHECK_OPT",
166 "PREALLOC_WIPE_DISKS_OPT",
167 "PRIMARY_IP_VERSION_OPT",
174 "REMOVE_INSTANCE_OPT",
175 "REMOVE_RESERVED_IPS_OPT",
181 "SECONDARY_ONLY_OPT",
186 "SHUTDOWN_TIMEOUT_OPT",
188 "SPECS_CPU_COUNT_OPT",
189 "SPECS_DISK_COUNT_OPT",
190 "SPECS_DISK_SIZE_OPT",
191 "SPECS_MEM_SIZE_OPT",
192 "SPECS_NIC_COUNT_OPT",
193 "IPOLICY_DISK_TEMPLATES",
194 "IPOLICY_VCPU_RATIO",
200 "STARTUP_PAUSED_OPT",
209 "USE_EXTERNAL_MIP_SCRIPT",
217 "IGNORE_IPOLICY_OPT",
218 "INSTANCE_POLICY_OPTS",
219 # Generic functions for CLI programs
221 "CreateIPolicyFromOpts",
223 "GenericInstanceCreate",
229 "JobSubmittedException",
231 "RunWhileClusterStopped",
235 # Formatting functions
236 "ToStderr", "ToStdout",
239 "FormatParamsDictInfo",
250 # command line options support infrastructure
251 "ARGS_MANY_INSTANCES",
254 "ARGS_MANY_NETWORKS",
274 "OPT_COMPL_INST_ADD_NODES",
275 "OPT_COMPL_MANY_NODES",
276 "OPT_COMPL_ONE_IALLOCATOR",
277 "OPT_COMPL_ONE_INSTANCE",
278 "OPT_COMPL_ONE_NODE",
279 "OPT_COMPL_ONE_NODEGROUP",
280 "OPT_COMPL_ONE_NETWORK",
282 "OPT_COMPL_ONE_EXTSTORAGE",
287 "COMMON_CREATE_OPTS",
293 #: Priorities (sorted)
295 ("low", constants.OP_PRIO_LOW),
296 ("normal", constants.OP_PRIO_NORMAL),
297 ("high", constants.OP_PRIO_HIGH),
300 #: Priority dictionary for easier lookup
301 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
302 # we migrate to Python 2.6
303 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
305 # Query result status for clients
308 QR_INCOMPLETE) = range(3)
310 #: Maximum batch size for ChooseJob
314 # constants used to create InstancePolicy dictionary
315 TISPECS_GROUP_TYPES = {
316 constants.ISPECS_MIN: constants.VTYPE_INT,
317 constants.ISPECS_MAX: constants.VTYPE_INT,
320 TISPECS_CLUSTER_TYPES = {
321 constants.ISPECS_MIN: constants.VTYPE_INT,
322 constants.ISPECS_MAX: constants.VTYPE_INT,
323 constants.ISPECS_STD: constants.VTYPE_INT,
326 #: User-friendly names for query2 field types
328 constants.QFT_UNKNOWN: "Unknown",
329 constants.QFT_TEXT: "Text",
330 constants.QFT_BOOL: "Boolean",
331 constants.QFT_NUMBER: "Number",
332 constants.QFT_UNIT: "Storage size",
333 constants.QFT_TIMESTAMP: "Timestamp",
334 constants.QFT_OTHER: "Custom",
339 def __init__(self, min=0, max=None): # pylint: disable=W0622
344 return ("<%s min=%s max=%s>" %
345 (self.__class__.__name__, self.min, self.max))
348 class ArgSuggest(_Argument):
349 """Suggesting argument.
351 Value can be any of the ones passed to the constructor.
354 # pylint: disable=W0622
355 def __init__(self, min=0, max=None, choices=None):
356 _Argument.__init__(self, min=min, max=max)
357 self.choices = choices
360 return ("<%s min=%s max=%s choices=%r>" %
361 (self.__class__.__name__, self.min, self.max, self.choices))
364 class ArgChoice(ArgSuggest):
367 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
368 but value must be one of the choices.
373 class ArgUnknown(_Argument):
374 """Unknown argument to program (e.g. determined at runtime).
379 class ArgInstance(_Argument):
380 """Instances argument.
385 class ArgNode(_Argument):
391 class ArgNetwork(_Argument):
397 class ArgGroup(_Argument):
398 """Node group argument.
403 class ArgJobId(_Argument):
409 class ArgFile(_Argument):
410 """File path argument.
415 class ArgCommand(_Argument):
421 class ArgHost(_Argument):
427 class ArgOs(_Argument):
433 class ArgExtStorage(_Argument):
434 """ExtStorage argument.
440 ARGS_MANY_INSTANCES = [ArgInstance()]
441 ARGS_MANY_NETWORKS = [ArgNetwork()]
442 ARGS_MANY_NODES = [ArgNode()]
443 ARGS_MANY_GROUPS = [ArgGroup()]
444 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
445 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
446 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
448 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
449 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
452 def _ExtractTagsObject(opts, args):
453 """Extract the tag type object.
455 Note that this function will modify its args parameter.
458 if not hasattr(opts, "tag_type"):
459 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
461 if kind == constants.TAG_CLUSTER:
463 elif kind in (constants.TAG_NODEGROUP,
465 constants.TAG_NETWORK,
466 constants.TAG_INSTANCE):
468 raise errors.OpPrereqError("no arguments passed to the command",
473 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
477 def _ExtendTags(opts, args):
478 """Extend the args if a source file has been given.
480 This function will extend the tags with the contents of the file
481 passed in the 'tags_source' attribute of the opts parameter. A file
482 named '-' will be replaced by stdin.
485 fname = opts.tags_source
491 new_fh = open(fname, "r")
494 # we don't use the nice 'new_data = [line.strip() for line in fh]'
495 # because of python bug 1633941
497 line = new_fh.readline()
500 new_data.append(line.strip())
503 args.extend(new_data)
506 def ListTags(opts, args):
507 """List the tags on a given object.
509 This is a generic implementation that knows how to deal with all
510 three cases of tag objects (cluster, node, instance). The opts
511 argument is expected to contain a tag_type field denoting what
512 object type we work on.
515 kind, name = _ExtractTagsObject(opts, args)
516 cl = GetClient(query=True)
517 result = cl.QueryTags(kind, name)
518 result = list(result)
524 def AddTags(opts, args):
525 """Add tags on a given object.
527 This is a generic implementation that knows how to deal with all
528 three cases of tag objects (cluster, node, instance). The opts
529 argument is expected to contain a tag_type field denoting what
530 object type we work on.
533 kind, name = _ExtractTagsObject(opts, args)
534 _ExtendTags(opts, args)
536 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
537 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
538 SubmitOrSend(op, opts)
541 def RemoveTags(opts, args):
542 """Remove tags from a given object.
544 This is a generic implementation that knows how to deal with all
545 three cases of tag objects (cluster, node, instance). The opts
546 argument is expected to contain a tag_type field denoting what
547 object type we work on.
550 kind, name = _ExtractTagsObject(opts, args)
551 _ExtendTags(opts, args)
553 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
554 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
555 SubmitOrSend(op, opts)
558 def check_unit(option, opt, value): # pylint: disable=W0613
559 """OptParsers custom converter for units.
563 return utils.ParseUnit(value)
564 except errors.UnitParseError, err:
565 raise OptionValueError("option %s: %s" % (opt, err))
568 def _SplitKeyVal(opt, data):
569 """Convert a KeyVal string into a dict.
571 This function will convert a key=val[,...] string into a dict. Empty
572 values will be converted specially: keys which have the prefix 'no_'
573 will have the value=False and the prefix stripped, the others will
577 @param opt: a string holding the option name for which we process the
578 data, used in building error messages
580 @param data: a string of the format key=val,key=val,...
582 @return: {key=val, key=val}
583 @raises errors.ParameterError: if there are duplicate keys
588 for elem in utils.UnescapeAndSplit(data, sep=","):
590 key, val = elem.split("=", 1)
592 if elem.startswith(NO_PREFIX):
593 key, val = elem[len(NO_PREFIX):], False
594 elif elem.startswith(UN_PREFIX):
595 key, val = elem[len(UN_PREFIX):], None
597 key, val = elem, True
599 raise errors.ParameterError("Duplicate key '%s' in option %s" %
605 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
606 """Custom parser for ident:key=val,key=val options.
608 This will store the parsed values as a tuple (ident, {key: val}). As such,
609 multiple uses of this option via action=append is possible.
613 ident, rest = value, ""
615 ident, rest = value.split(":", 1)
617 if ident.startswith(NO_PREFIX):
619 msg = "Cannot pass options when removing parameter groups: %s" % value
620 raise errors.ParameterError(msg)
621 retval = (ident[len(NO_PREFIX):], False)
622 elif (ident.startswith(UN_PREFIX) and
623 (len(ident) <= len(UN_PREFIX) or
624 not ident[len(UN_PREFIX)][0].isdigit())):
626 msg = "Cannot pass options when removing parameter groups: %s" % value
627 raise errors.ParameterError(msg)
628 retval = (ident[len(UN_PREFIX):], None)
630 kv_dict = _SplitKeyVal(opt, rest)
631 retval = (ident, kv_dict)
635 def check_key_val(option, opt, value): # pylint: disable=W0613
636 """Custom parser class for key=val,key=val options.
638 This will store the parsed values as a dict {key: val}.
641 return _SplitKeyVal(opt, value)
644 def check_bool(option, opt, value): # pylint: disable=W0613
645 """Custom parser for yes/no options.
647 This will store the parsed value as either True or False.
650 value = value.lower()
651 if value == constants.VALUE_FALSE or value == "no":
653 elif value == constants.VALUE_TRUE or value == "yes":
656 raise errors.ParameterError("Invalid boolean value '%s'" % value)
659 def check_list(option, opt, value): # pylint: disable=W0613
660 """Custom parser for comma-separated lists.
663 # we have to make this explicit check since "".split(",") is [""],
664 # not an empty list :(
668 return utils.UnescapeAndSplit(value)
671 def check_maybefloat(option, opt, value): # pylint: disable=W0613
672 """Custom parser for float numbers which might be also defaults.
675 value = value.lower()
677 if value == constants.VALUE_DEFAULT:
683 # completion_suggestion is normally a list. Using numeric values not evaluating
684 # to False for dynamic completion.
685 (OPT_COMPL_MANY_NODES,
687 OPT_COMPL_ONE_INSTANCE,
689 OPT_COMPL_ONE_EXTSTORAGE,
690 OPT_COMPL_ONE_IALLOCATOR,
691 OPT_COMPL_ONE_NETWORK,
692 OPT_COMPL_INST_ADD_NODES,
693 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
695 OPT_COMPL_ALL = compat.UniqueFrozenset([
696 OPT_COMPL_MANY_NODES,
698 OPT_COMPL_ONE_INSTANCE,
700 OPT_COMPL_ONE_EXTSTORAGE,
701 OPT_COMPL_ONE_IALLOCATOR,
702 OPT_COMPL_ONE_NETWORK,
703 OPT_COMPL_INST_ADD_NODES,
704 OPT_COMPL_ONE_NODEGROUP,
708 class CliOption(Option):
709 """Custom option class for optparse.
712 ATTRS = Option.ATTRS + [
713 "completion_suggest",
715 TYPES = Option.TYPES + (
723 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
724 TYPE_CHECKER["identkeyval"] = check_ident_key_val
725 TYPE_CHECKER["keyval"] = check_key_val
726 TYPE_CHECKER["unit"] = check_unit
727 TYPE_CHECKER["bool"] = check_bool
728 TYPE_CHECKER["list"] = check_list
729 TYPE_CHECKER["maybefloat"] = check_maybefloat
732 # optparse.py sets make_option, so we do it for our own option class, too
733 cli_option = CliOption
738 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
739 help="Increase debugging level")
741 NOHDR_OPT = cli_option("--no-headers", default=False,
742 action="store_true", dest="no_headers",
743 help="Don't display column headers")
745 SEP_OPT = cli_option("--separator", default=None,
746 action="store", dest="separator",
747 help=("Separator between output fields"
748 " (defaults to one space)"))
750 USEUNITS_OPT = cli_option("--units", default=None,
751 dest="units", choices=("h", "m", "g", "t"),
752 help="Specify units for output (one of h/m/g/t)")
754 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
755 type="string", metavar="FIELDS",
756 help="Comma separated list of output fields")
758 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
759 default=False, help="Force the operation")
761 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
762 default=False, help="Do not require confirmation")
764 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
765 action="store_true", default=False,
766 help=("Ignore offline nodes and do as much"
769 TAG_ADD_OPT = cli_option("--tags", dest="tags",
770 default=None, help="Comma-separated list of instance"
773 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
774 default=None, help="File with tag names")
776 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
777 default=False, action="store_true",
778 help=("Submit the job and return the job ID, but"
779 " don't wait for the job to finish"))
781 SYNC_OPT = cli_option("--sync", dest="do_locking",
782 default=False, action="store_true",
783 help=("Grab locks while doing the queries"
784 " in order to ensure more consistent results"))
786 DRY_RUN_OPT = cli_option("--dry-run", default=False,
788 help=("Do not execute the operation, just run the"
789 " check steps and verify if it could be"
792 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
794 help="Increase the verbosity of the operation")
796 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
797 action="store_true", dest="simulate_errors",
798 help="Debugging option that makes the operation"
799 " treat most runtime checks as failed")
801 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
802 default=True, action="store_false",
803 help="Don't wait for sync (DANGEROUS!)")
805 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
806 default=False, action="store_true",
807 help="Wait for disks to sync")
809 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
810 action="store_true", default=False,
811 help="Enable offline instance")
813 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
814 action="store_true", default=False,
815 help="Disable down instance")
817 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
818 help=("Custom disk setup (%s)" %
819 utils.CommaJoin(constants.DISK_TEMPLATES)),
820 default=None, metavar="TEMPL",
821 choices=list(constants.DISK_TEMPLATES))
823 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
824 help="Do not create any network cards for"
827 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
828 help="Relative path under default cluster-wide"
829 " file storage dir to store file-based disks",
830 default=None, metavar="<DIR>")
832 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
833 help="Driver to use for image files",
834 default="loop", metavar="<DRIVER>",
835 choices=list(constants.FILE_DRIVER))
837 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
838 help="Select nodes for the instance automatically"
839 " using the <NAME> iallocator plugin",
840 default=None, type="string",
841 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
843 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
845 help="Set the default instance"
847 default=None, type="string",
848 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
850 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
852 completion_suggest=OPT_COMPL_ONE_OS)
854 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
855 type="keyval", default={},
856 help="OS parameters")
858 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
859 action="store_true", default=False,
860 help="Force an unknown variant")
862 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
863 action="store_true", default=False,
864 help="Do not install the OS (will"
867 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
868 dest="allow_runtime_chgs",
869 default=True, action="store_false",
870 help="Don't allow runtime changes")
872 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
873 type="keyval", default={},
874 help="Backend parameters")
876 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
877 default={}, dest="hvparams",
878 help="Hypervisor parameters")
880 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
881 help="Disk template parameters, in the format"
882 " template:option=value,option=value,...",
883 type="identkeyval", action="append", default=[])
885 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
886 type="keyval", default={},
887 help="Memory size specs: list of key=value,"
888 " where key is one of min, max, std"
889 " (in MB or using a unit)")
891 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
892 type="keyval", default={},
893 help="CPU count specs: list of key=value,"
894 " where key is one of min, max, std")
896 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
897 dest="ispecs_disk_count",
898 type="keyval", default={},
899 help="Disk count specs: list of key=value,"
900 " where key is one of min, max, std")
902 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
903 type="keyval", default={},
904 help="Disk size specs: list of key=value,"
905 " where key is one of min, max, std"
906 " (in MB or using a unit)")
908 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
909 type="keyval", default={},
910 help="NIC count specs: list of key=value,"
911 " where key is one of min, max, std")
913 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
914 dest="ipolicy_disk_templates",
915 type="list", default=None,
916 help="Comma-separated list of"
917 " enabled disk templates")
919 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
920 dest="ipolicy_vcpu_ratio",
921 type="maybefloat", default=None,
922 help="The maximum allowed vcpu-to-cpu ratio")
924 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
925 dest="ipolicy_spindle_ratio",
926 type="maybefloat", default=None,
927 help=("The maximum allowed instances to"
930 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
931 help="Hypervisor and hypervisor options, in the"
932 " format hypervisor:option=value,option=value,...",
933 default=None, type="identkeyval")
935 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
936 help="Hypervisor and hypervisor options, in the"
937 " format hypervisor:option=value,option=value,...",
938 default=[], action="append", type="identkeyval")
940 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
941 action="store_false",
942 help="Don't check that the instance's IP"
945 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
946 default=True, action="store_false",
947 help="Don't check that the instance's name"
950 NET_OPT = cli_option("--net",
951 help="NIC parameters", default=[],
952 dest="nics", action="append", type="identkeyval")
954 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
955 dest="disks", action="append", type="identkeyval")
957 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
958 help="Comma-separated list of disks"
959 " indices to act on (e.g. 0,2) (optional,"
960 " defaults to all disks)")
962 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
963 help="Enforces a single-disk configuration using the"
964 " given disk size, in MiB unless a suffix is used",
965 default=None, type="unit", metavar="<size>")
967 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
968 dest="ignore_consistency",
969 action="store_true", default=False,
970 help="Ignore the consistency of the disks on"
973 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
974 dest="allow_failover",
975 action="store_true", default=False,
976 help="If migration is not possible fallback to"
979 NONLIVE_OPT = cli_option("--non-live", dest="live",
980 default=True, action="store_false",
981 help="Do a non-live migration (this usually means"
982 " freeze the instance, save the state, transfer and"
983 " only then resume running on the secondary node)")
985 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
987 choices=list(constants.HT_MIGRATION_MODES),
988 help="Override default migration mode (choose"
989 " either live or non-live")
991 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
992 help="Target node and optional secondary node",
993 metavar="<pnode>[:<snode>]",
994 completion_suggest=OPT_COMPL_INST_ADD_NODES)
996 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
997 action="append", metavar="<node>",
998 help="Use only this node (can be used multiple"
999 " times, if not given defaults to all nodes)",
1000 completion_suggest=OPT_COMPL_ONE_NODE)
1002 NODEGROUP_OPT_NAME = "--node-group"
1003 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1005 help="Node group (name or uuid)",
1006 metavar="<nodegroup>",
1007 default=None, type="string",
1008 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1010 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1012 completion_suggest=OPT_COMPL_ONE_NODE)
1014 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1015 action="store_false",
1016 help="Don't start the instance after creation")
1018 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1019 action="store_true", default=False,
1020 help="Show command instead of executing it")
1022 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1023 default=False, action="store_true",
1024 help="Instead of performing the migration, try to"
1025 " recover from a failed cleanup. This is safe"
1026 " to run even if the instance is healthy, but it"
1027 " will create extra replication traffic and "
1028 " disrupt briefly the replication (like during the"
1031 STATIC_OPT = cli_option("-s", "--static", dest="static",
1032 action="store_true", default=False,
1033 help="Only show configuration data, not runtime data")
1035 ALL_OPT = cli_option("--all", dest="show_all",
1036 default=False, action="store_true",
1037 help="Show info on all instances on the cluster."
1038 " This can take a long time to run, use wisely")
1040 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1041 action="store_true", default=False,
1042 help="Interactive OS reinstall, lists available"
1043 " OS templates for selection")
1045 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1046 action="store_true", default=False,
1047 help="Remove the instance from the cluster"
1048 " configuration even if there are failures"
1049 " during the removal process")
1051 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1052 dest="ignore_remove_failures",
1053 action="store_true", default=False,
1054 help="Remove the instance from the"
1055 " cluster configuration even if there"
1056 " are failures during the removal"
1059 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1060 action="store_true", default=False,
1061 help="Remove the instance from the cluster")
1063 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1064 help="Specifies the new node for the instance",
1065 metavar="NODE", default=None,
1066 completion_suggest=OPT_COMPL_ONE_NODE)
1068 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1069 help="Specifies the new secondary node",
1070 metavar="NODE", default=None,
1071 completion_suggest=OPT_COMPL_ONE_NODE)
1073 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1074 help="Specifies the new primary node",
1075 metavar="<node>", default=None,
1076 completion_suggest=OPT_COMPL_ONE_NODE)
1078 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1079 default=False, action="store_true",
1080 help="Replace the disk(s) on the primary"
1081 " node (applies only to internally mirrored"
1082 " disk templates, e.g. %s)" %
1083 utils.CommaJoin(constants.DTS_INT_MIRROR))
1085 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1086 default=False, action="store_true",
1087 help="Replace the disk(s) on the secondary"
1088 " node (applies only to internally mirrored"
1089 " disk templates, e.g. %s)" %
1090 utils.CommaJoin(constants.DTS_INT_MIRROR))
1092 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1093 default=False, action="store_true",
1094 help="Lock all nodes and auto-promote as needed"
1097 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1098 default=False, action="store_true",
1099 help="Automatically replace faulty disks"
1100 " (applies only to internally mirrored"
1101 " disk templates, e.g. %s)" %
1102 utils.CommaJoin(constants.DTS_INT_MIRROR))
1104 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1105 default=False, action="store_true",
1106 help="Ignore current recorded size"
1107 " (useful for forcing activation when"
1108 " the recorded size is wrong)")
1110 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1112 completion_suggest=OPT_COMPL_ONE_NODE)
1114 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1117 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1118 help="Specify the secondary ip for the node",
1119 metavar="ADDRESS", default=None)
1121 READD_OPT = cli_option("--readd", dest="readd",
1122 default=False, action="store_true",
1123 help="Readd old node after replacing it")
1125 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1126 default=True, action="store_false",
1127 help="Disable SSH key fingerprint checking")
1129 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1130 default=False, action="store_true",
1131 help="Force the joining of a node")
1133 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1134 type="bool", default=None, metavar=_YORNO,
1135 help="Set the master_candidate flag on the node")
1137 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1138 type="bool", default=None,
1139 help=("Set the offline flag on the node"
1140 " (cluster does not communicate with offline"
1143 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1144 type="bool", default=None,
1145 help=("Set the drained flag on the node"
1146 " (excluded from allocation operations)"))
1148 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1149 type="bool", default=None, metavar=_YORNO,
1150 help="Set the master_capable flag on the node")
1152 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1153 type="bool", default=None, metavar=_YORNO,
1154 help="Set the vm_capable flag on the node")
1156 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1157 type="bool", default=None, metavar=_YORNO,
1158 help="Set the allocatable flag on a volume")
1160 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1161 help="Disable support for lvm based instances"
1163 action="store_false", default=True)
1165 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1166 dest="enabled_hypervisors",
1167 help="Comma-separated list of hypervisors",
1168 type="string", default=None)
1170 # FIXME: Remove once enabled disk templates are fully implemented.
1171 ENABLED_STORAGE_TYPES_OPT = cli_option("--enabled-storage-types",
1172 dest="enabled_storage_types",
1173 help="Comma-separated list of "
1175 type="string", default=None)
1177 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1178 dest="enabled_disk_templates",
1179 help="Comma-separated list of "
1181 type="string", default=None)
1183 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1184 type="keyval", default={},
1185 help="NIC parameters")
1187 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1188 dest="candidate_pool_size", type="int",
1189 help="Set the candidate pool size")
1191 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1192 help=("Enables LVM and specifies the volume group"
1193 " name (cluster-wide) for disk allocation"
1194 " [%s]" % constants.DEFAULT_VG),
1195 metavar="VG", default=None)
1197 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1198 help="Destroy cluster", action="store_true")
1200 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1201 help="Skip node agreement check (dangerous)",
1202 action="store_true", default=False)
1204 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1205 help="Specify the mac prefix for the instance IP"
1206 " addresses, in the format XX:XX:XX",
1210 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1211 help="Specify the node interface (cluster-wide)"
1212 " on which the master IP address will be added"
1213 " (cluster init default: %s)" %
1214 constants.DEFAULT_BRIDGE,
1218 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1219 help="Specify the netmask of the master IP",
1223 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1224 dest="use_external_mip_script",
1225 help="Specify whether to run a"
1226 " user-provided script for the master"
1227 " IP address turnup and"
1228 " turndown operations",
1229 type="bool", metavar=_YORNO, default=None)
1231 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1232 help="Specify the default directory (cluster-"
1233 "wide) for storing the file-based disks [%s]" %
1234 pathutils.DEFAULT_FILE_STORAGE_DIR,
1236 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1238 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1239 "--shared-file-storage-dir",
1240 dest="shared_file_storage_dir",
1241 help="Specify the default directory (cluster-wide) for storing the"
1242 " shared file-based disks [%s]" %
1243 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1244 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1246 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1247 help="Don't modify %s" % pathutils.ETC_HOSTS,
1248 action="store_false", default=True)
1250 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1251 help="Don't initialize SSH keys",
1252 action="store_false", default=True)
1254 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1255 help="Enable parseable error messages",
1256 action="store_true", default=False)
1258 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1259 help="Skip N+1 memory redundancy tests",
1260 action="store_true", default=False)
1262 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1263 help="Type of reboot: soft/hard/full",
1264 default=constants.INSTANCE_REBOOT_HARD,
1266 choices=list(constants.REBOOT_TYPES))
1268 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1269 dest="ignore_secondaries",
1270 default=False, action="store_true",
1271 help="Ignore errors from secondaries")
1273 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1274 action="store_false", default=True,
1275 help="Don't shutdown the instance (unsafe)")
1277 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1278 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1279 help="Maximum time to wait")
1281 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1282 dest="shutdown_timeout", type="int",
1283 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1284 help="Maximum time to wait for instance"
1287 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1289 help=("Number of seconds between repetions of the"
1292 EARLY_RELEASE_OPT = cli_option("--early-release",
1293 dest="early_release", default=False,
1294 action="store_true",
1295 help="Release the locks on the secondary"
1298 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1299 dest="new_cluster_cert",
1300 default=False, action="store_true",
1301 help="Generate a new cluster certificate")
1303 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1305 help="File containing new RAPI certificate")
1307 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1308 default=None, action="store_true",
1309 help=("Generate a new self-signed RAPI"
1312 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1314 help="File containing new SPICE certificate")
1316 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1318 help="File containing the certificate of the CA"
1319 " which signed the SPICE certificate")
1321 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1322 dest="new_spice_cert", default=None,
1323 action="store_true",
1324 help=("Generate a new self-signed SPICE"
1327 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1328 dest="new_confd_hmac_key",
1329 default=False, action="store_true",
1330 help=("Create a new HMAC key for %s" %
1333 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1334 dest="cluster_domain_secret",
1336 help=("Load new new cluster domain"
1337 " secret from file"))
1339 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1340 dest="new_cluster_domain_secret",
1341 default=False, action="store_true",
1342 help=("Create a new cluster domain"
1345 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1346 dest="use_replication_network",
1347 help="Whether to use the replication network"
1348 " for talking to the nodes",
1349 action="store_true", default=False)
1351 MAINTAIN_NODE_HEALTH_OPT = \
1352 cli_option("--maintain-node-health", dest="maintain_node_health",
1353 metavar=_YORNO, default=None, type="bool",
1354 help="Configure the cluster to automatically maintain node"
1355 " health, by shutting down unknown instances, shutting down"
1356 " unknown DRBD devices, etc.")
1358 IDENTIFY_DEFAULTS_OPT = \
1359 cli_option("--identify-defaults", dest="identify_defaults",
1360 default=False, action="store_true",
1361 help="Identify which saved instance parameters are equal to"
1362 " the current cluster defaults and set them as such, instead"
1363 " of marking them as overridden")
1365 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1366 action="store", dest="uid_pool",
1367 help=("A list of user-ids or user-id"
1368 " ranges separated by commas"))
1370 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1371 action="store", dest="add_uids",
1372 help=("A list of user-ids or user-id"
1373 " ranges separated by commas, to be"
1374 " added to the user-id pool"))
1376 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1377 action="store", dest="remove_uids",
1378 help=("A list of user-ids or user-id"
1379 " ranges separated by commas, to be"
1380 " removed from the user-id pool"))
1382 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1383 action="store", dest="reserved_lvs",
1384 help=("A comma-separated list of reserved"
1385 " logical volumes names, that will be"
1386 " ignored by cluster verify"))
1388 ROMAN_OPT = cli_option("--roman",
1389 dest="roman_integers", default=False,
1390 action="store_true",
1391 help="Use roman numbers for positive integers")
1393 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1394 action="store", default=None,
1395 help="Specifies usermode helper for DRBD")
1397 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1398 action="store_false", default=True,
1399 help="Disable support for DRBD")
1401 PRIMARY_IP_VERSION_OPT = \
1402 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1403 action="store", dest="primary_ip_version",
1404 metavar="%d|%d" % (constants.IP4_VERSION,
1405 constants.IP6_VERSION),
1406 help="Cluster-wide IP version for primary IP")
1408 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1409 action="store_true",
1410 help="Show machine name for every line in output")
1412 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1413 action="store_true",
1414 help=("Hide successful results and show failures"
1415 " only (determined by the exit code)"))
1417 REASON_OPT = cli_option("--reason", default=None,
1418 help="The reason for executing a VM-state-changing"
1422 def _PriorityOptionCb(option, _, value, parser):
1423 """Callback for processing C{--priority} option.
1426 value = _PRIONAME_TO_VALUE[value]
1428 setattr(parser.values, option.dest, value)
1431 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1432 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1433 choices=_PRIONAME_TO_VALUE.keys(),
1434 action="callback", type="choice",
1435 callback=_PriorityOptionCb,
1436 help="Priority for opcode processing")
1438 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1439 type="bool", default=None, metavar=_YORNO,
1440 help="Sets the hidden flag on the OS")
1442 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1443 type="bool", default=None, metavar=_YORNO,
1444 help="Sets the blacklisted flag on the OS")
1446 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1447 type="bool", metavar=_YORNO,
1448 dest="prealloc_wipe_disks",
1449 help=("Wipe disks prior to instance"
1452 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1453 type="keyval", default=None,
1454 help="Node parameters")
1456 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1457 action="store", metavar="POLICY", default=None,
1458 help="Allocation policy for the node group")
1460 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1461 type="bool", metavar=_YORNO,
1462 dest="node_powered",
1463 help="Specify if the SoR for node is powered")
1465 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1466 default=constants.OOB_TIMEOUT,
1467 help="Maximum time to wait for out-of-band helper")
1469 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1470 default=constants.OOB_POWER_DELAY,
1471 help="Time in seconds to wait between power-ons")
1473 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1474 action="store_true", default=False,
1475 help=("Whether command argument should be treated"
1478 NO_REMEMBER_OPT = cli_option("--no-remember",
1480 action="store_true", default=False,
1481 help="Perform but do not record the change"
1482 " in the configuration")
1484 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1485 default=False, action="store_true",
1486 help="Evacuate primary instances only")
1488 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1489 default=False, action="store_true",
1490 help="Evacuate secondary instances only"
1491 " (applies only to internally mirrored"
1492 " disk templates, e.g. %s)" %
1493 utils.CommaJoin(constants.DTS_INT_MIRROR))
1495 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1496 action="store_true", default=False,
1497 help="Pause instance at startup")
1499 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1500 help="Destination node group (name or uuid)",
1501 default=None, action="append",
1502 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1504 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1505 action="append", dest="ignore_errors",
1506 choices=list(constants.CV_ALL_ECODES_STRINGS),
1507 help="Error code to be ignored")
1509 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1511 help=("Specify disk state information in the"
1513 " storage_type/identifier:option=value,...;"
1514 " note this is unused for now"),
1517 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1519 help=("Specify hypervisor state information in the"
1520 " format hypervisor:option=value,...;"
1521 " note this is unused for now"),
1524 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1525 action="store_true", default=False,
1526 help="Ignore instance policy violations")
1528 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1529 help="Sets the instance's runtime memory,"
1530 " ballooning it up or down to the new value",
1531 default=None, type="unit", metavar="<size>")
1533 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1534 action="store_true", default=False,
1535 help="Marks the grow as absolute instead of the"
1536 " (default) relative mode")
1538 NETWORK_OPT = cli_option("--network",
1539 action="store", default=None, dest="network",
1540 help="IP network in CIDR notation")
1542 GATEWAY_OPT = cli_option("--gateway",
1543 action="store", default=None, dest="gateway",
1544 help="IP address of the router (gateway)")
1546 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1547 action="store", default=None,
1548 dest="add_reserved_ips",
1549 help="Comma-separated list of"
1550 " reserved IPs to add")
1552 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1553 action="store", default=None,
1554 dest="remove_reserved_ips",
1555 help="Comma-delimited list of"
1556 " reserved IPs to remove")
1558 NETWORK6_OPT = cli_option("--network6",
1559 action="store", default=None, dest="network6",
1560 help="IP network in CIDR notation")
1562 GATEWAY6_OPT = cli_option("--gateway6",
1563 action="store", default=None, dest="gateway6",
1564 help="IP6 address of the router (gateway)")
1566 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1567 dest="conflicts_check",
1569 action="store_false",
1570 help="Don't check for conflicting IPs")
1572 #: Options provided by all commands
1573 COMMON_OPTS = [DEBUG_OPT]
1575 # common options for creating instances. add and import then add their own
1577 COMMON_CREATE_OPTS = [
1582 FILESTORE_DRIVER_OPT,
1588 NOCONFLICTSCHECK_OPT,
1600 # common instance policy options
1601 INSTANCE_POLICY_OPTS = [
1602 SPECS_CPU_COUNT_OPT,
1603 SPECS_DISK_COUNT_OPT,
1604 SPECS_DISK_SIZE_OPT,
1606 SPECS_NIC_COUNT_OPT,
1607 IPOLICY_DISK_TEMPLATES,
1609 IPOLICY_SPINDLE_RATIO,
1613 class _ShowUsage(Exception):
1614 """Exception class for L{_ParseArgs}.
1617 def __init__(self, exit_error):
1618 """Initializes instances of this class.
1620 @type exit_error: bool
1621 @param exit_error: Whether to report failure on exit
1624 Exception.__init__(self)
1625 self.exit_error = exit_error
1628 class _ShowVersion(Exception):
1629 """Exception class for L{_ParseArgs}.
1634 def _ParseArgs(binary, argv, commands, aliases, env_override):
1635 """Parser for the command line arguments.
1637 This function parses the arguments and returns the function which
1638 must be executed together with its (modified) arguments.
1640 @param binary: Script name
1641 @param argv: Command line arguments
1642 @param commands: Dictionary containing command definitions
1643 @param aliases: dictionary with command aliases {"alias": "target", ...}
1644 @param env_override: list of env variables allowed for default args
1645 @raise _ShowUsage: If usage description should be shown
1646 @raise _ShowVersion: If version should be shown
1649 assert not (env_override - set(commands))
1650 assert not (set(aliases.keys()) & set(commands.keys()))
1655 # No option or command given
1656 raise _ShowUsage(exit_error=True)
1658 if cmd == "--version":
1659 raise _ShowVersion()
1660 elif cmd == "--help":
1661 raise _ShowUsage(exit_error=False)
1662 elif not (cmd in commands or cmd in aliases):
1663 raise _ShowUsage(exit_error=True)
1665 # get command, unalias it, and look it up in commands
1667 if aliases[cmd] not in commands:
1668 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1669 " command '%s'" % (cmd, aliases[cmd]))
1673 if cmd in env_override:
1674 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1675 env_args = os.environ.get(args_env_name)
1677 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1679 func, args_def, parser_opts, usage, description = commands[cmd]
1680 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1681 description=description,
1682 formatter=TitledHelpFormatter(),
1683 usage="%%prog %s %s" % (cmd, usage))
1684 parser.disable_interspersed_args()
1685 options, args = parser.parse_args(args=argv[2:])
1687 if not _CheckArguments(cmd, args_def, args):
1688 return None, None, None
1690 return func, options, args
1693 def _FormatUsage(binary, commands):
1694 """Generates a nice description of all commands.
1696 @param binary: Script name
1697 @param commands: Dictionary containing command definitions
1700 # compute the max line length for cmd + usage
1701 mlen = min(60, max(map(len, commands)))
1703 yield "Usage: %s {command} [options...] [argument...]" % binary
1704 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1708 # and format a nice command list
1709 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1710 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1711 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1712 for line in help_lines:
1713 yield " %-*s %s" % (mlen, "", line)
1718 def _CheckArguments(cmd, args_def, args):
1719 """Verifies the arguments using the argument definition.
1723 1. Abort with error if values specified by user but none expected.
1725 1. For each argument in definition
1727 1. Keep running count of minimum number of values (min_count)
1728 1. Keep running count of maximum number of values (max_count)
1729 1. If it has an unlimited number of values
1731 1. Abort with error if it's not the last argument in the definition
1733 1. If last argument has limited number of values
1735 1. Abort with error if number of values doesn't match or is too large
1737 1. Abort with error if user didn't pass enough values (min_count)
1740 if args and not args_def:
1741 ToStderr("Error: Command %s expects no arguments", cmd)
1748 last_idx = len(args_def) - 1
1750 for idx, arg in enumerate(args_def):
1751 if min_count is None:
1753 elif arg.min is not None:
1754 min_count += arg.min
1756 if max_count is None:
1758 elif arg.max is not None:
1759 max_count += arg.max
1762 check_max = (arg.max is not None)
1764 elif arg.max is None:
1765 raise errors.ProgrammerError("Only the last argument can have max=None")
1768 # Command with exact number of arguments
1769 if (min_count is not None and max_count is not None and
1770 min_count == max_count and len(args) != min_count):
1771 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1774 # Command with limited number of arguments
1775 if max_count is not None and len(args) > max_count:
1776 ToStderr("Error: Command %s expects only %d argument(s)",
1780 # Command with some required arguments
1781 if min_count is not None and len(args) < min_count:
1782 ToStderr("Error: Command %s expects at least %d argument(s)",
1789 def SplitNodeOption(value):
1790 """Splits the value of a --node option.
1793 if value and ":" in value:
1794 return value.split(":", 1)
1796 return (value, None)
1799 def CalculateOSNames(os_name, os_variants):
1800 """Calculates all the names an OS can be called, according to its variants.
1802 @type os_name: string
1803 @param os_name: base name of the os
1804 @type os_variants: list or None
1805 @param os_variants: list of supported variants
1807 @return: list of valid names
1811 return ["%s+%s" % (os_name, v) for v in os_variants]
1816 def ParseFields(selected, default):
1817 """Parses the values of "--field"-like options.
1819 @type selected: string or None
1820 @param selected: User-selected options
1822 @param default: Default fields
1825 if selected is None:
1828 if selected.startswith("+"):
1829 return default + selected[1:].split(",")
1831 return selected.split(",")
1834 UsesRPC = rpc.RunWithRPC
1837 def AskUser(text, choices=None):
1838 """Ask the user a question.
1840 @param text: the question to ask
1842 @param choices: list with elements tuples (input_char, return_value,
1843 description); if not given, it will default to: [('y', True,
1844 'Perform the operation'), ('n', False, 'Do no do the operation')];
1845 note that the '?' char is reserved for help
1847 @return: one of the return values from the choices list; if input is
1848 not possible (i.e. not running with a tty, we return the last
1853 choices = [("y", True, "Perform the operation"),
1854 ("n", False, "Do not perform the operation")]
1855 if not choices or not isinstance(choices, list):
1856 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1857 for entry in choices:
1858 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1859 raise errors.ProgrammerError("Invalid choices element to AskUser")
1861 answer = choices[-1][1]
1863 for line in text.splitlines():
1864 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1865 text = "\n".join(new_text)
1867 f = file("/dev/tty", "a+")
1871 chars = [entry[0] for entry in choices]
1872 chars[-1] = "[%s]" % chars[-1]
1874 maps = dict([(entry[0], entry[1]) for entry in choices])
1878 f.write("/".join(chars))
1880 line = f.readline(2).strip().lower()
1885 for entry in choices:
1886 f.write(" %s - %s\n" % (entry[0], entry[2]))
1894 class JobSubmittedException(Exception):
1895 """Job was submitted, client should exit.
1897 This exception has one argument, the ID of the job that was
1898 submitted. The handler should print this ID.
1900 This is not an error, just a structured way to exit from clients.
1905 def SendJob(ops, cl=None):
1906 """Function to submit an opcode without waiting for the results.
1909 @param ops: list of opcodes
1910 @type cl: luxi.Client
1911 @param cl: the luxi client to use for communicating with the master;
1912 if None, a new client will be created
1918 job_id = cl.SubmitJob(ops)
1923 def GenericPollJob(job_id, cbs, report_cbs):
1924 """Generic job-polling function.
1926 @type job_id: number
1927 @param job_id: Job ID
1928 @type cbs: Instance of L{JobPollCbBase}
1929 @param cbs: Data callbacks
1930 @type report_cbs: Instance of L{JobPollReportCbBase}
1931 @param report_cbs: Reporting callbacks
1934 prev_job_info = None
1935 prev_logmsg_serial = None
1940 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1943 # job not found, go away!
1944 raise errors.JobLost("Job with id %s lost" % job_id)
1946 if result == constants.JOB_NOTCHANGED:
1947 report_cbs.ReportNotChanged(job_id, status)
1952 # Split result, a tuple of (field values, log entries)
1953 (job_info, log_entries) = result
1954 (status, ) = job_info
1957 for log_entry in log_entries:
1958 (serial, timestamp, log_type, message) = log_entry
1959 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1961 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1963 # TODO: Handle canceled and archived jobs
1964 elif status in (constants.JOB_STATUS_SUCCESS,
1965 constants.JOB_STATUS_ERROR,
1966 constants.JOB_STATUS_CANCELING,
1967 constants.JOB_STATUS_CANCELED):
1970 prev_job_info = job_info
1972 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1974 raise errors.JobLost("Job with id %s lost" % job_id)
1976 status, opstatus, result = jobs[0]
1978 if status == constants.JOB_STATUS_SUCCESS:
1981 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1982 raise errors.OpExecError("Job was canceled")
1985 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1986 if status == constants.OP_STATUS_SUCCESS:
1988 elif status == constants.OP_STATUS_ERROR:
1989 errors.MaybeRaise(msg)
1992 raise errors.OpExecError("partial failure (opcode %d): %s" %
1995 raise errors.OpExecError(str(msg))
1997 # default failure mode
1998 raise errors.OpExecError(result)
2001 class JobPollCbBase:
2002 """Base class for L{GenericPollJob} callbacks.
2006 """Initializes this class.
2010 def WaitForJobChangeOnce(self, job_id, fields,
2011 prev_job_info, prev_log_serial):
2012 """Waits for changes on a job.
2015 raise NotImplementedError()
2017 def QueryJobs(self, job_ids, fields):
2018 """Returns the selected fields for the selected job IDs.
2020 @type job_ids: list of numbers
2021 @param job_ids: Job IDs
2022 @type fields: list of strings
2023 @param fields: Fields
2026 raise NotImplementedError()
2029 class JobPollReportCbBase:
2030 """Base class for L{GenericPollJob} reporting callbacks.
2034 """Initializes this class.
2038 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2039 """Handles a log message.
2042 raise NotImplementedError()
2044 def ReportNotChanged(self, job_id, status):
2045 """Called for if a job hasn't changed in a while.
2047 @type job_id: number
2048 @param job_id: Job ID
2049 @type status: string or None
2050 @param status: Job status if available
2053 raise NotImplementedError()
2056 class _LuxiJobPollCb(JobPollCbBase):
2057 def __init__(self, cl):
2058 """Initializes this class.
2061 JobPollCbBase.__init__(self)
2064 def WaitForJobChangeOnce(self, job_id, fields,
2065 prev_job_info, prev_log_serial):
2066 """Waits for changes on a job.
2069 return self.cl.WaitForJobChangeOnce(job_id, fields,
2070 prev_job_info, prev_log_serial)
2072 def QueryJobs(self, job_ids, fields):
2073 """Returns the selected fields for the selected job IDs.
2076 return self.cl.QueryJobs(job_ids, fields)
2079 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2080 def __init__(self, feedback_fn):
2081 """Initializes this class.
2084 JobPollReportCbBase.__init__(self)
2086 self.feedback_fn = feedback_fn
2088 assert callable(feedback_fn)
2090 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2091 """Handles a log message.
2094 self.feedback_fn((timestamp, log_type, log_msg))
2096 def ReportNotChanged(self, job_id, status):
2097 """Called if a job hasn't changed in a while.
2103 class StdioJobPollReportCb(JobPollReportCbBase):
2105 """Initializes this class.
2108 JobPollReportCbBase.__init__(self)
2110 self.notified_queued = False
2111 self.notified_waitlock = False
2113 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2114 """Handles a log message.
2117 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2118 FormatLogMessage(log_type, log_msg))
2120 def ReportNotChanged(self, job_id, status):
2121 """Called if a job hasn't changed in a while.
2127 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2128 ToStderr("Job %s is waiting in queue", job_id)
2129 self.notified_queued = True
2131 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2132 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2133 self.notified_waitlock = True
2136 def FormatLogMessage(log_type, log_msg):
2137 """Formats a job message according to its type.
2140 if log_type != constants.ELOG_MESSAGE:
2141 log_msg = str(log_msg)
2143 return utils.SafeEncode(log_msg)
2146 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2147 """Function to poll for the result of a job.
2149 @type job_id: job identified
2150 @param job_id: the job to poll for results
2151 @type cl: luxi.Client
2152 @param cl: the luxi client to use for communicating with the master;
2153 if None, a new client will be created
2159 if reporter is None:
2161 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2163 reporter = StdioJobPollReportCb()
2165 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2167 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2170 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2171 """Legacy function to submit an opcode.
2173 This is just a simple wrapper over the construction of the processor
2174 instance. It should be extended to better handle feedback and
2175 interaction functions.
2181 SetGenericOpcodeOpts([op], opts)
2183 job_id = SendJob([op], cl=cl)
2185 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2188 return op_results[0]
2191 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2192 """Wrapper around SubmitOpCode or SendJob.
2194 This function will decide, based on the 'opts' parameter, whether to
2195 submit and wait for the result of the opcode (and return it), or
2196 whether to just send the job and print its identifier. It is used in
2197 order to simplify the implementation of the '--submit' option.
2199 It will also process the opcodes if we're sending the via SendJob
2200 (otherwise SubmitOpCode does it).
2203 if opts and opts.submit_only:
2205 SetGenericOpcodeOpts(job, opts)
2206 job_id = SendJob(job, cl=cl)
2207 raise JobSubmittedException(job_id)
2209 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2212 def SetGenericOpcodeOpts(opcode_list, options):
2213 """Processor for generic options.
2215 This function updates the given opcodes based on generic command
2216 line options (like debug, dry-run, etc.).
2218 @param opcode_list: list of opcodes
2219 @param options: command line options or None
2220 @return: None (in-place modification)
2225 for op in opcode_list:
2226 op.debug_level = options.debug
2227 if hasattr(options, "dry_run"):
2228 op.dry_run = options.dry_run
2229 if getattr(options, "priority", None) is not None:
2230 op.priority = options.priority
2233 def GetClient(query=False):
2234 """Connects to the a luxi socket and returns a client.
2236 @type query: boolean
2237 @param query: this signifies that the client will only be
2238 used for queries; if the build-time parameter
2239 enable-split-queries is enabled, then the client will be
2240 connected to the query socket instead of the masterd socket
2243 override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2245 if override_socket == constants.LUXI_OVERRIDE_MASTER:
2246 address = pathutils.MASTER_SOCKET
2247 elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2248 address = pathutils.QUERY_SOCKET
2250 address = override_socket
2251 elif query and constants.ENABLE_SPLIT_QUERY:
2252 address = pathutils.QUERY_SOCKET
2255 # TODO: Cache object?
2257 client = luxi.Client(address=address)
2258 except luxi.NoMasterError:
2259 ss = ssconf.SimpleStore()
2261 # Try to read ssconf file
2264 except errors.ConfigurationError:
2265 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2266 " not part of a cluster",
2269 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2270 if master != myself:
2271 raise errors.OpPrereqError("This is not the master node, please connect"
2272 " to node '%s' and rerun the command" %
2273 master, errors.ECODE_INVAL)
2278 def FormatError(err):
2279 """Return a formatted error message for a given error.
2281 This function takes an exception instance and returns a tuple
2282 consisting of two values: first, the recommended exit code, and
2283 second, a string describing the error message (not
2284 newline-terminated).
2290 if isinstance(err, errors.ConfigurationError):
2291 txt = "Corrupt configuration file: %s" % msg
2293 obuf.write(txt + "\n")
2294 obuf.write("Aborting.")
2296 elif isinstance(err, errors.HooksAbort):
2297 obuf.write("Failure: hooks execution failed:\n")
2298 for node, script, out in err.args[0]:
2300 obuf.write(" node: %s, script: %s, output: %s\n" %
2301 (node, script, out))
2303 obuf.write(" node: %s, script: %s (no output)\n" %
2305 elif isinstance(err, errors.HooksFailure):
2306 obuf.write("Failure: hooks general failure: %s" % msg)
2307 elif isinstance(err, errors.ResolverError):
2308 this_host = netutils.Hostname.GetSysName()
2309 if err.args[0] == this_host:
2310 msg = "Failure: can't resolve my own hostname ('%s')"
2312 msg = "Failure: can't resolve hostname '%s'"
2313 obuf.write(msg % err.args[0])
2314 elif isinstance(err, errors.OpPrereqError):
2315 if len(err.args) == 2:
2316 obuf.write("Failure: prerequisites not met for this"
2317 " operation:\nerror type: %s, error details:\n%s" %
2318 (err.args[1], err.args[0]))
2320 obuf.write("Failure: prerequisites not met for this"
2321 " operation:\n%s" % msg)
2322 elif isinstance(err, errors.OpExecError):
2323 obuf.write("Failure: command execution error:\n%s" % msg)
2324 elif isinstance(err, errors.TagError):
2325 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2326 elif isinstance(err, errors.JobQueueDrainError):
2327 obuf.write("Failure: the job queue is marked for drain and doesn't"
2328 " accept new requests\n")
2329 elif isinstance(err, errors.JobQueueFull):
2330 obuf.write("Failure: the job queue is full and doesn't accept new"
2331 " job submissions until old jobs are archived\n")
2332 elif isinstance(err, errors.TypeEnforcementError):
2333 obuf.write("Parameter Error: %s" % msg)
2334 elif isinstance(err, errors.ParameterError):
2335 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2336 elif isinstance(err, luxi.NoMasterError):
2337 if err.args[0] == pathutils.MASTER_SOCKET:
2338 daemon = "the master daemon"
2339 elif err.args[0] == pathutils.QUERY_SOCKET:
2340 daemon = "the config daemon"
2342 daemon = "socket '%s'" % str(err.args[0])
2343 obuf.write("Cannot communicate with %s.\nIs the process running"
2344 " and listening for connections?" % daemon)
2345 elif isinstance(err, luxi.TimeoutError):
2346 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2347 " been submitted and will continue to run even if the call"
2348 " timed out. Useful commands in this situation are \"gnt-job"
2349 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2351 elif isinstance(err, luxi.PermissionError):
2352 obuf.write("It seems you don't have permissions to connect to the"
2353 " master daemon.\nPlease retry as a different user.")
2354 elif isinstance(err, luxi.ProtocolError):
2355 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2357 elif isinstance(err, errors.JobLost):
2358 obuf.write("Error checking job status: %s" % msg)
2359 elif isinstance(err, errors.QueryFilterParseError):
2360 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2361 obuf.write("\n".join(err.GetDetails()))
2362 elif isinstance(err, errors.GenericError):
2363 obuf.write("Unhandled Ganeti error: %s" % msg)
2364 elif isinstance(err, JobSubmittedException):
2365 obuf.write("JobID: %s\n" % err.args[0])
2368 obuf.write("Unhandled exception: %s" % msg)
2369 return retcode, obuf.getvalue().rstrip("\n")
2372 def GenericMain(commands, override=None, aliases=None,
2373 env_override=frozenset()):
2374 """Generic main function for all the gnt-* commands.
2376 @param commands: a dictionary with a special structure, see the design doc
2377 for command line handling.
2378 @param override: if not None, we expect a dictionary with keys that will
2379 override command line options; this can be used to pass
2380 options from the scripts to generic functions
2381 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2382 @param env_override: list of environment names which are allowed to submit
2383 default args for commands
2386 # save the program name and the entire command line for later logging
2388 binary = os.path.basename(sys.argv[0])
2390 binary = sys.argv[0]
2392 if len(sys.argv) >= 2:
2393 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2397 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2399 binary = "<unknown program>"
2400 cmdline = "<unknown>"
2406 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2408 except _ShowVersion:
2409 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2410 constants.RELEASE_VERSION)
2411 return constants.EXIT_SUCCESS
2412 except _ShowUsage, err:
2413 for line in _FormatUsage(binary, commands):
2417 return constants.EXIT_FAILURE
2419 return constants.EXIT_SUCCESS
2420 except errors.ParameterError, err:
2421 result, err_msg = FormatError(err)
2425 if func is None: # parse error
2428 if override is not None:
2429 for key, val in override.iteritems():
2430 setattr(options, key, val)
2432 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2433 stderr_logging=True)
2435 logging.info("Command line: %s", cmdline)
2438 result = func(options, args)
2439 except (errors.GenericError, luxi.ProtocolError,
2440 JobSubmittedException), err:
2441 result, err_msg = FormatError(err)
2442 logging.exception("Error during command processing")
2444 except KeyboardInterrupt:
2445 result = constants.EXIT_FAILURE
2446 ToStderr("Aborted. Note that if the operation created any jobs, they"
2447 " might have been submitted and"
2448 " will continue to run in the background.")
2449 except IOError, err:
2450 if err.errno == errno.EPIPE:
2451 # our terminal went away, we'll exit
2452 sys.exit(constants.EXIT_FAILURE)
2459 def ParseNicOption(optvalue):
2460 """Parses the value of the --net option(s).
2464 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2465 except (TypeError, ValueError), err:
2466 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2469 nics = [{}] * nic_max
2470 for nidx, ndict in optvalue:
2473 if not isinstance(ndict, dict):
2474 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2475 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2477 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2484 def GenericInstanceCreate(mode, opts, args):
2485 """Add an instance to the cluster via either creation or import.
2487 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2488 @param opts: the command line options selected by the user
2490 @param args: should contain only one element, the new instance name
2492 @return: the desired exit code
2497 (pnode, snode) = SplitNodeOption(opts.node)
2502 hypervisor, hvparams = opts.hypervisor
2505 nics = ParseNicOption(opts.nics)
2509 elif mode == constants.INSTANCE_CREATE:
2510 # default of one nic, all auto
2516 if opts.disk_template == constants.DT_DISKLESS:
2517 if opts.disks or opts.sd_size is not None:
2518 raise errors.OpPrereqError("Diskless instance but disk"
2519 " information passed", errors.ECODE_INVAL)
2522 if (not opts.disks and not opts.sd_size
2523 and mode == constants.INSTANCE_CREATE):
2524 raise errors.OpPrereqError("No disk information specified",
2526 if opts.disks and opts.sd_size is not None:
2527 raise errors.OpPrereqError("Please use either the '--disk' or"
2528 " '-s' option", errors.ECODE_INVAL)
2529 if opts.sd_size is not None:
2530 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2534 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2535 except ValueError, err:
2536 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2538 disks = [{}] * disk_max
2541 for didx, ddict in opts.disks:
2543 if not isinstance(ddict, dict):
2544 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2545 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2546 elif constants.IDISK_SIZE in ddict:
2547 if constants.IDISK_ADOPT in ddict:
2548 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2549 " (disk %d)" % didx, errors.ECODE_INVAL)
2551 ddict[constants.IDISK_SIZE] = \
2552 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2553 except ValueError, err:
2554 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2555 (didx, err), errors.ECODE_INVAL)
2556 elif constants.IDISK_ADOPT in ddict:
2557 if mode == constants.INSTANCE_IMPORT:
2558 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2559 " import", errors.ECODE_INVAL)
2560 ddict[constants.IDISK_SIZE] = 0
2562 raise errors.OpPrereqError("Missing size or adoption source for"
2563 " disk %d" % didx, errors.ECODE_INVAL)
2566 if opts.tags is not None:
2567 tags = opts.tags.split(",")
2571 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2572 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2574 if mode == constants.INSTANCE_CREATE:
2577 force_variant = opts.force_variant
2580 no_install = opts.no_install
2581 identify_defaults = False
2582 elif mode == constants.INSTANCE_IMPORT:
2585 force_variant = False
2586 src_node = opts.src_node
2587 src_path = opts.src_dir
2589 identify_defaults = opts.identify_defaults
2591 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2593 op = opcodes.OpInstanceCreate(instance_name=instance,
2595 disk_template=opts.disk_template,
2597 conflicts_check=opts.conflicts_check,
2598 pnode=pnode, snode=snode,
2599 ip_check=opts.ip_check,
2600 name_check=opts.name_check,
2601 wait_for_sync=opts.wait_for_sync,
2602 file_storage_dir=opts.file_storage_dir,
2603 file_driver=opts.file_driver,
2604 iallocator=opts.iallocator,
2605 hypervisor=hypervisor,
2607 beparams=opts.beparams,
2608 osparams=opts.osparams,
2612 force_variant=force_variant,
2616 no_install=no_install,
2617 identify_defaults=identify_defaults,
2618 ignore_ipolicy=opts.ignore_ipolicy)
2620 SubmitOrSend(op, opts)
2624 class _RunWhileClusterStoppedHelper:
2625 """Helper class for L{RunWhileClusterStopped} to simplify state management
2628 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2629 """Initializes this class.
2631 @type feedback_fn: callable
2632 @param feedback_fn: Feedback function
2633 @type cluster_name: string
2634 @param cluster_name: Cluster name
2635 @type master_node: string
2636 @param master_node Master node name
2637 @type online_nodes: list
2638 @param online_nodes: List of names of online nodes
2641 self.feedback_fn = feedback_fn
2642 self.cluster_name = cluster_name
2643 self.master_node = master_node
2644 self.online_nodes = online_nodes
2646 self.ssh = ssh.SshRunner(self.cluster_name)
2648 self.nonmaster_nodes = [name for name in online_nodes
2649 if name != master_node]
2651 assert self.master_node not in self.nonmaster_nodes
2653 def _RunCmd(self, node_name, cmd):
2654 """Runs a command on the local or a remote machine.
2656 @type node_name: string
2657 @param node_name: Machine name
2662 if node_name is None or node_name == self.master_node:
2663 # No need to use SSH
2664 result = utils.RunCmd(cmd)
2666 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2667 utils.ShellQuoteArgs(cmd))
2670 errmsg = ["Failed to run command %s" % result.cmd]
2672 errmsg.append("on node %s" % node_name)
2673 errmsg.append(": exitcode %s and error %s" %
2674 (result.exit_code, result.output))
2675 raise errors.OpExecError(" ".join(errmsg))
2677 def Call(self, fn, *args):
2678 """Call function while all daemons are stopped.
2681 @param fn: Function to be called
2684 # Pause watcher by acquiring an exclusive lock on watcher state file
2685 self.feedback_fn("Blocking watcher")
2686 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2688 # TODO: Currently, this just blocks. There's no timeout.
2689 # TODO: Should it be a shared lock?
2690 watcher_block.Exclusive(blocking=True)
2692 # Stop master daemons, so that no new jobs can come in and all running
2694 self.feedback_fn("Stopping master daemons")
2695 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2697 # Stop daemons on all nodes
2698 for node_name in self.online_nodes:
2699 self.feedback_fn("Stopping daemons on %s" % node_name)
2700 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2702 # All daemons are shut down now
2704 return fn(self, *args)
2705 except Exception, err:
2706 _, errmsg = FormatError(err)
2707 logging.exception("Caught exception")
2708 self.feedback_fn(errmsg)
2711 # Start cluster again, master node last
2712 for node_name in self.nonmaster_nodes + [self.master_node]:
2713 self.feedback_fn("Starting daemons on %s" % node_name)
2714 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2717 watcher_block.Close()
2720 def RunWhileClusterStopped(feedback_fn, fn, *args):
2721 """Calls a function while all cluster daemons are stopped.
2723 @type feedback_fn: callable
2724 @param feedback_fn: Feedback function
2726 @param fn: Function to be called when daemons are stopped
2729 feedback_fn("Gathering cluster information")
2731 # This ensures we're running on the master daemon
2734 (cluster_name, master_node) = \
2735 cl.QueryConfigValues(["cluster_name", "master_node"])
2737 online_nodes = GetOnlineNodes([], cl=cl)
2739 # Don't keep a reference to the client. The master daemon will go away.
2742 assert master_node in online_nodes
2744 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2745 online_nodes).Call(fn, *args)
2748 def GenerateTable(headers, fields, separator, data,
2749 numfields=None, unitfields=None,
2751 """Prints a table with headers and different fields.
2754 @param headers: dictionary mapping field names to headers for
2757 @param fields: the field names corresponding to each row in
2759 @param separator: the separator to be used; if this is None,
2760 the default 'smart' algorithm is used which computes optimal
2761 field width, otherwise just the separator is used between
2764 @param data: a list of lists, each sublist being one row to be output
2765 @type numfields: list
2766 @param numfields: a list with the fields that hold numeric
2767 values and thus should be right-aligned
2768 @type unitfields: list
2769 @param unitfields: a list with the fields that hold numeric
2770 values that should be formatted with the units field
2771 @type units: string or None
2772 @param units: the units we should use for formatting, or None for
2773 automatic choice (human-readable for non-separator usage, otherwise
2774 megabytes); this is a one-letter string
2783 if numfields is None:
2785 if unitfields is None:
2788 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2789 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2792 for field in fields:
2793 if headers and field not in headers:
2794 # TODO: handle better unknown fields (either revert to old
2795 # style of raising exception, or deal more intelligently with
2797 headers[field] = field
2798 if separator is not None:
2799 format_fields.append("%s")
2800 elif numfields.Matches(field):
2801 format_fields.append("%*s")
2803 format_fields.append("%-*s")
2805 if separator is None:
2806 mlens = [0 for name in fields]
2807 format_str = " ".join(format_fields)
2809 format_str = separator.replace("%", "%%").join(format_fields)
2814 for idx, val in enumerate(row):
2815 if unitfields.Matches(fields[idx]):
2818 except (TypeError, ValueError):
2821 val = row[idx] = utils.FormatUnit(val, units)
2822 val = row[idx] = str(val)
2823 if separator is None:
2824 mlens[idx] = max(mlens[idx], len(val))
2829 for idx, name in enumerate(fields):
2831 if separator is None:
2832 mlens[idx] = max(mlens[idx], len(hdr))
2833 args.append(mlens[idx])
2835 result.append(format_str % tuple(args))
2837 if separator is None:
2838 assert len(mlens) == len(fields)
2840 if fields and not numfields.Matches(fields[-1]):
2846 line = ["-" for _ in fields]
2847 for idx in range(len(fields)):
2848 if separator is None:
2849 args.append(mlens[idx])
2850 args.append(line[idx])
2851 result.append(format_str % tuple(args))
2856 def _FormatBool(value):
2857 """Formats a boolean value as a string.
2865 #: Default formatting for query results; (callback, align right)
2866 _DEFAULT_FORMAT_QUERY = {
2867 constants.QFT_TEXT: (str, False),
2868 constants.QFT_BOOL: (_FormatBool, False),
2869 constants.QFT_NUMBER: (str, True),
2870 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2871 constants.QFT_OTHER: (str, False),
2872 constants.QFT_UNKNOWN: (str, False),
2876 def _GetColumnFormatter(fdef, override, unit):
2877 """Returns formatting function for a field.
2879 @type fdef: L{objects.QueryFieldDefinition}
2880 @type override: dict
2881 @param override: Dictionary for overriding field formatting functions,
2882 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2884 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2885 @rtype: tuple; (callable, bool)
2886 @return: Returns the function to format a value (takes one parameter) and a
2887 boolean for aligning the value on the right-hand side
2890 fmt = override.get(fdef.name, None)
2894 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2896 if fdef.kind == constants.QFT_UNIT:
2897 # Can't keep this information in the static dictionary
2898 return (lambda value: utils.FormatUnit(value, unit), True)
2900 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2904 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2907 class _QueryColumnFormatter:
2908 """Callable class for formatting fields of a query.
2911 def __init__(self, fn, status_fn, verbose):
2912 """Initializes this class.
2915 @param fn: Formatting function
2916 @type status_fn: callable
2917 @param status_fn: Function to report fields' status
2918 @type verbose: boolean
2919 @param verbose: whether to use verbose field descriptions or not
2923 self._status_fn = status_fn
2924 self._verbose = verbose
2926 def __call__(self, data):
2927 """Returns a field's string representation.
2930 (status, value) = data
2933 self._status_fn(status)
2935 if status == constants.RS_NORMAL:
2936 return self._fn(value)
2938 assert value is None, \
2939 "Found value %r for abnormal status %s" % (value, status)
2941 return FormatResultError(status, self._verbose)
2944 def FormatResultError(status, verbose):
2945 """Formats result status other than L{constants.RS_NORMAL}.
2947 @param status: The result status
2948 @type verbose: boolean
2949 @param verbose: Whether to return the verbose text
2950 @return: Text of result status
2953 assert status != constants.RS_NORMAL, \
2954 "FormatResultError called with status equal to constants.RS_NORMAL"
2956 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2958 raise NotImplementedError("Unknown status %s" % status)
2965 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2966 header=False, verbose=False):
2967 """Formats data in L{objects.QueryResponse}.
2969 @type result: L{objects.QueryResponse}
2970 @param result: result of query operation
2972 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2973 see L{utils.text.FormatUnit}
2974 @type format_override: dict
2975 @param format_override: Dictionary for overriding field formatting functions,
2976 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2977 @type separator: string or None
2978 @param separator: String used to separate fields
2980 @param header: Whether to output header row
2981 @type verbose: boolean
2982 @param verbose: whether to use verbose field descriptions or not
2991 if format_override is None:
2992 format_override = {}
2994 stats = dict.fromkeys(constants.RS_ALL, 0)
2996 def _RecordStatus(status):
3001 for fdef in result.fields:
3002 assert fdef.title and fdef.name
3003 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3004 columns.append(TableColumn(fdef.title,
3005 _QueryColumnFormatter(fn, _RecordStatus,
3009 table = FormatTable(result.data, columns, header, separator)
3011 # Collect statistics
3012 assert len(stats) == len(constants.RS_ALL)
3013 assert compat.all(count >= 0 for count in stats.values())
3015 # Determine overall status. If there was no data, unknown fields must be
3016 # detected via the field definitions.
3017 if (stats[constants.RS_UNKNOWN] or
3018 (not result.data and _GetUnknownFields(result.fields))):
3020 elif compat.any(count > 0 for key, count in stats.items()
3021 if key != constants.RS_NORMAL):
3022 status = QR_INCOMPLETE
3026 return (status, table)
3029 def _GetUnknownFields(fdefs):
3030 """Returns list of unknown fields included in C{fdefs}.
3032 @type fdefs: list of L{objects.QueryFieldDefinition}
3035 return [fdef for fdef in fdefs
3036 if fdef.kind == constants.QFT_UNKNOWN]
3039 def _WarnUnknownFields(fdefs):
3040 """Prints a warning to stderr if a query included unknown fields.
3042 @type fdefs: list of L{objects.QueryFieldDefinition}
3045 unknown = _GetUnknownFields(fdefs)
3047 ToStderr("Warning: Queried for unknown fields %s",
3048 utils.CommaJoin(fdef.name for fdef in unknown))
3054 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3055 format_override=None, verbose=False, force_filter=False,
3056 namefield=None, qfilter=None, isnumeric=False):
3057 """Generic implementation for listing all items of a resource.
3059 @param resource: One of L{constants.QR_VIA_LUXI}
3060 @type fields: list of strings
3061 @param fields: List of fields to query for
3062 @type names: list of strings
3063 @param names: Names of items to query for
3064 @type unit: string or None
3065 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3066 None for automatic choice (human-readable for non-separator usage,
3067 otherwise megabytes); this is a one-letter string
3068 @type separator: string or None
3069 @param separator: String used to separate fields
3071 @param header: Whether to show header row
3072 @type force_filter: bool
3073 @param force_filter: Whether to always treat names as filter
3074 @type format_override: dict
3075 @param format_override: Dictionary for overriding field formatting functions,
3076 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3077 @type verbose: boolean
3078 @param verbose: whether to use verbose field descriptions or not
3079 @type namefield: string
3080 @param namefield: Name of field to use for simple filters (see
3081 L{qlang.MakeFilter} for details)
3082 @type qfilter: list or None
3083 @param qfilter: Query filter (in addition to names)
3084 @param isnumeric: bool
3085 @param isnumeric: Whether the namefield's type is numeric, and therefore
3086 any simple filters built by namefield should use integer values to
3093 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3094 isnumeric=isnumeric)
3097 qfilter = namefilter
3098 elif namefilter is not None:
3099 qfilter = [qlang.OP_AND, namefilter, qfilter]
3104 response = cl.Query(resource, fields, qfilter)
3106 found_unknown = _WarnUnknownFields(response.fields)
3108 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3110 format_override=format_override,
3116 assert ((found_unknown and status == QR_UNKNOWN) or
3117 (not found_unknown and status != QR_UNKNOWN))
3119 if status == QR_UNKNOWN:
3120 return constants.EXIT_UNKNOWN_FIELD
3122 # TODO: Should the list command fail if not all data could be collected?
3123 return constants.EXIT_SUCCESS
3126 def _FieldDescValues(fdef):
3127 """Helper function for L{GenericListFields} to get query field description.
3129 @type fdef: L{objects.QueryFieldDefinition}
3135 _QFT_NAMES.get(fdef.kind, fdef.kind),
3141 def GenericListFields(resource, fields, separator, header, cl=None):
3142 """Generic implementation for listing fields for a resource.
3144 @param resource: One of L{constants.QR_VIA_LUXI}
3145 @type fields: list of strings
3146 @param fields: List of fields to query for
3147 @type separator: string or None
3148 @param separator: String used to separate fields
3150 @param header: Whether to show header row
3159 response = cl.QueryFields(resource, fields)
3161 found_unknown = _WarnUnknownFields(response.fields)
3164 TableColumn("Name", str, False),
3165 TableColumn("Type", str, False),
3166 TableColumn("Title", str, False),
3167 TableColumn("Description", str, False),
3170 rows = map(_FieldDescValues, response.fields)
3172 for line in FormatTable(rows, columns, header, separator):
3176 return constants.EXIT_UNKNOWN_FIELD
3178 return constants.EXIT_SUCCESS
3182 """Describes a column for L{FormatTable}.
3185 def __init__(self, title, fn, align_right):
3186 """Initializes this class.
3189 @param title: Column title
3191 @param fn: Formatting function
3192 @type align_right: bool
3193 @param align_right: Whether to align values on the right-hand side
3198 self.align_right = align_right
3201 def _GetColFormatString(width, align_right):
3202 """Returns the format string for a field.
3210 return "%%%s%ss" % (sign, width)
3213 def FormatTable(rows, columns, header, separator):
3214 """Formats data as a table.
3216 @type rows: list of lists
3217 @param rows: Row data, one list per row
3218 @type columns: list of L{TableColumn}
3219 @param columns: Column descriptions
3221 @param header: Whether to show header row
3222 @type separator: string or None
3223 @param separator: String used to separate columns
3227 data = [[col.title for col in columns]]
3228 colwidth = [len(col.title) for col in columns]
3231 colwidth = [0 for _ in columns]
3235 assert len(row) == len(columns)
3237 formatted = [col.format(value) for value, col in zip(row, columns)]
3239 if separator is None:
3240 # Update column widths
3241 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3242 # Modifying a list's items while iterating is fine
3243 colwidth[idx] = max(oldwidth, len(value))
3245 data.append(formatted)
3247 if separator is not None:
3248 # Return early if a separator is used
3249 return [separator.join(row) for row in data]
3251 if columns and not columns[-1].align_right:
3252 # Avoid unnecessary spaces at end of line
3255 # Build format string
3256 fmt = " ".join([_GetColFormatString(width, col.align_right)
3257 for col, width in zip(columns, colwidth)])
3259 return [fmt % tuple(row) for row in data]
3262 def FormatTimestamp(ts):
3263 """Formats a given timestamp.
3266 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3269 @return: a string with the formatted timestamp
3272 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3276 return utils.FormatTime(sec, usecs=usecs)
3279 def ParseTimespec(value):
3280 """Parse a time specification.
3282 The following suffixed will be recognized:
3290 Without any suffix, the value will be taken to be in seconds.
3295 raise errors.OpPrereqError("Empty time specification passed",
3304 if value[-1] not in suffix_map:
3307 except (TypeError, ValueError):
3308 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3311 multiplier = suffix_map[value[-1]]
3313 if not value: # no data left after stripping the suffix
3314 raise errors.OpPrereqError("Invalid time specification (only"
3315 " suffix passed)", errors.ECODE_INVAL)
3317 value = int(value) * multiplier
3318 except (TypeError, ValueError):
3319 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3324 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3325 filter_master=False, nodegroup=None):
3326 """Returns the names of online nodes.
3328 This function will also log a warning on stderr with the names of
3331 @param nodes: if not empty, use only this subset of nodes (minus the
3333 @param cl: if not None, luxi client to use
3334 @type nowarn: boolean
3335 @param nowarn: by default, this function will output a note with the
3336 offline nodes that are skipped; if this parameter is True the
3337 note is not displayed
3338 @type secondary_ips: boolean
3339 @param secondary_ips: if True, return the secondary IPs instead of the
3340 names, useful for doing network traffic over the replication interface
3342 @type filter_master: boolean
3343 @param filter_master: if True, do not return the master node in the list
3344 (useful in coordination with secondary_ips where we cannot check our
3345 node name against the list)
3346 @type nodegroup: string
3347 @param nodegroup: If set, only return nodes in this node group
3356 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3358 if nodegroup is not None:
3359 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3360 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3363 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3366 if len(qfilter) > 1:
3367 final_filter = [qlang.OP_AND] + qfilter
3369 assert len(qfilter) == 1
3370 final_filter = qfilter[0]
3374 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3376 def _IsOffline(row):
3377 (_, (_, offline), _) = row
3381 ((_, name), _, _) = row
3385 (_, _, (_, sip)) = row
3388 (offline, online) = compat.partition(result.data, _IsOffline)
3390 if offline and not nowarn:
3391 ToStderr("Note: skipping offline node(s): %s" %
3392 utils.CommaJoin(map(_GetName, offline)))
3399 return map(fn, online)
3402 def _ToStream(stream, txt, *args):
3403 """Write a message to a stream, bypassing the logging system
3405 @type stream: file object
3406 @param stream: the file to which we should write
3408 @param txt: the message
3414 stream.write(txt % args)
3419 except IOError, err:
3420 if err.errno == errno.EPIPE:
3421 # our terminal went away, we'll exit
3422 sys.exit(constants.EXIT_FAILURE)
3427 def ToStdout(txt, *args):
3428 """Write a message to stdout only, bypassing the logging system
3430 This is just a wrapper over _ToStream.
3433 @param txt: the message
3436 _ToStream(sys.stdout, txt, *args)
3439 def ToStderr(txt, *args):
3440 """Write a message to stderr only, bypassing the logging system
3442 This is just a wrapper over _ToStream.
3445 @param txt: the message
3448 _ToStream(sys.stderr, txt, *args)
3451 class JobExecutor(object):
3452 """Class which manages the submission and execution of multiple jobs.
3454 Note that instances of this class should not be reused between
3458 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3463 self.verbose = verbose
3466 self.feedback_fn = feedback_fn
3467 self._counter = itertools.count()
3470 def _IfName(name, fmt):
3471 """Helper function for formatting name.
3479 def QueueJob(self, name, *ops):
3480 """Record a job for later submit.
3483 @param name: a description of the job, will be used in WaitJobSet
3486 SetGenericOpcodeOpts(ops, self.opts)
3487 self.queue.append((self._counter.next(), name, ops))
3489 def AddJobId(self, name, status, job_id):
3490 """Adds a job ID to the internal queue.
3493 self.jobs.append((self._counter.next(), status, job_id, name))
3495 def SubmitPending(self, each=False):
3496 """Submit all pending jobs.
3501 for (_, _, ops) in self.queue:
3502 # SubmitJob will remove the success status, but raise an exception if
3503 # the submission fails, so we'll notice that anyway.
3504 results.append([True, self.cl.SubmitJob(ops)[0]])
3506 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3507 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3508 self.jobs.append((idx, status, data, name))
3510 def _ChooseJob(self):
3511 """Choose a non-waiting/queued job to poll next.
3514 assert self.jobs, "_ChooseJob called with empty job list"
3516 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3520 for job_data, status in zip(self.jobs, result):
3521 if (isinstance(status, list) and status and
3522 status[0] in (constants.JOB_STATUS_QUEUED,
3523 constants.JOB_STATUS_WAITING,
3524 constants.JOB_STATUS_CANCELING)):
3525 # job is still present and waiting
3527 # good candidate found (either running job or lost job)
3528 self.jobs.remove(job_data)
3532 return self.jobs.pop(0)
3534 def GetResults(self):
3535 """Wait for and return the results of all jobs.
3538 @return: list of tuples (success, job results), in the same order
3539 as the submitted jobs; if a job has failed, instead of the result
3540 there will be the error message
3544 self.SubmitPending()
3547 ok_jobs = [row[2] for row in self.jobs if row[1]]
3549 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3551 # first, remove any non-submitted jobs
3552 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3553 for idx, _, jid, name in failures:
3554 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3555 results.append((idx, False, jid))
3558 (idx, _, jid, name) = self._ChooseJob()
3559 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3561 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3563 except errors.JobLost, err:
3564 _, job_result = FormatError(err)
3565 ToStderr("Job %s%s has been archived, cannot check its result",
3566 jid, self._IfName(name, " for %s"))
3568 except (errors.GenericError, luxi.ProtocolError), err:
3569 _, job_result = FormatError(err)
3571 # the error message will always be shown, verbose or not
3572 ToStderr("Job %s%s has failed: %s",
3573 jid, self._IfName(name, " for %s"), job_result)
3575 results.append((idx, success, job_result))
3577 # sort based on the index, then drop it
3579 results = [i[1:] for i in results]
3583 def WaitOrShow(self, wait):
3584 """Wait for job results or only print the job IDs.
3587 @param wait: whether to wait or not
3591 return self.GetResults()
3594 self.SubmitPending()
3595 for _, status, result, name in self.jobs:
3597 ToStdout("%s: %s", result, name)
3599 ToStderr("Failure for %s: %s", name, result)
3600 return [row[1:3] for row in self.jobs]
3603 def FormatParamsDictInfo(param_dict, actual):
3604 """Formats a parameter dictionary.
3606 @type param_dict: dict
3607 @param param_dict: the own parameters
3609 @param actual: the current parameter set (including defaults)
3611 @return: dictionary where the value of each parameter is either a fully
3612 formatted string or a dictionary containing formatted strings
3616 for (key, data) in actual.items():
3617 if isinstance(data, dict) and data:
3618 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3620 ret[key] = str(param_dict.get(key, "default (%s)" % data))
3624 def _FormatListInfoDefault(data, def_data):
3625 if data is not None:
3626 ret = utils.CommaJoin(data)
3628 ret = "default (%s)" % utils.CommaJoin(def_data)
3632 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3633 """Formats an instance policy.
3635 @type custom_ipolicy: dict
3636 @param custom_ipolicy: own policy
3637 @type eff_ipolicy: dict
3638 @param eff_ipolicy: effective policy (including defaults); ignored for
3640 @type iscluster: bool
3641 @param iscluster: the policy is at cluster level
3642 @rtype: list of pairs
3643 @return: formatted data, suitable for L{PrintGenericInfo}
3647 eff_ipolicy = custom_ipolicy
3649 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3652 FormatParamsDictInfo(custom_minmax.get(key, {}),
3653 eff_ipolicy[constants.ISPECS_MINMAX][key]))
3654 for key in constants.ISPECS_MINMAX_KEYS
3657 stdspecs = custom_ipolicy[constants.ISPECS_STD]
3659 (constants.ISPECS_STD,
3660 FormatParamsDictInfo(stdspecs, stdspecs))
3664 ("enabled disk templates",
3665 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3666 eff_ipolicy[constants.IPOLICY_DTS]))
3669 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3670 for key in constants.IPOLICY_PARAMETERS
3675 def ConfirmOperation(names, list_type, text, extra=""):
3676 """Ask the user to confirm an operation on a list of list_type.
3678 This function is used to request confirmation for doing an operation
3679 on a given list of list_type.
3682 @param names: the list of names that we display when
3683 we ask for confirmation
3684 @type list_type: str
3685 @param list_type: Human readable name for elements in the list (e.g. nodes)
3687 @param text: the operation that the user should confirm
3689 @return: True or False depending on user's confirmation.
3693 msg = ("The %s will operate on %d %s.\n%s"
3694 "Do you want to continue?" % (text, count, list_type, extra))
3695 affected = (("\nAffected %s:\n" % list_type) +
3696 "\n".join([" %s" % name for name in names]))
3698 choices = [("y", True, "Yes, execute the %s" % text),
3699 ("n", False, "No, abort the %s" % text)]
3702 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3705 question = msg + affected
3707 choice = AskUser(question, choices)
3710 choice = AskUser(msg + affected, choices)
3714 def _MaybeParseUnit(elements):
3715 """Parses and returns an array of potential values with units.
3719 for k, v in elements.items():
3720 if v == constants.VALUE_DEFAULT:
3723 parsed[k] = utils.ParseUnit(v)
3727 def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3728 ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3729 group_ipolicy, allowed_values):
3732 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3733 if ispecs_disk_size:
3734 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3735 except (TypeError, ValueError, errors.UnitParseError), err:
3736 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3738 (ispecs_disk_size, ispecs_mem_size, err),
3741 # prepare ipolicy dict
3742 ispecs_transposed = {
3743 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3744 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3745 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3746 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3747 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3750 # first, check that the values given are correct
3752 forced_type = TISPECS_GROUP_TYPES
3754 forced_type = TISPECS_CLUSTER_TYPES
3755 for specs in ispecs_transposed.values():
3756 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3760 constants.ISPECS_MIN: {},
3761 constants.ISPECS_MAX: {},
3762 constants.ISPECS_STD: {},
3764 for (name, specs) in ispecs_transposed.iteritems():
3765 assert name in constants.ISPECS_PARAMETERS
3766 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3767 assert key in ispecs
3768 ispecs[key][name] = val
3769 for key in constants.ISPECS_MINMAX_KEYS:
3770 ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3771 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3774 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3775 ispecs_cpu_count=None,
3776 ispecs_disk_count=None,
3777 ispecs_disk_size=None,
3778 ispecs_nic_count=None,
3779 ipolicy_disk_templates=None,
3780 ipolicy_vcpu_ratio=None,
3781 ipolicy_spindle_ratio=None,
3782 group_ipolicy=False,
3783 allowed_values=None,
3785 """Creation of instance policy based on command line options.
3787 @param fill_all: whether for cluster policies we should ensure that
3788 all values are filled
3793 ipolicy_out = objects.MakeEmptyIPolicy()
3794 _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3795 ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3796 group_ipolicy, allowed_values)
3798 if ipolicy_disk_templates is not None:
3799 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3800 if ipolicy_vcpu_ratio is not None:
3801 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3802 if ipolicy_spindle_ratio is not None:
3803 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3805 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3807 if not group_ipolicy and fill_all:
3808 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3813 def _SerializeGenericInfo(buf, data, level, afterkey=False):
3814 """Formatting core of L{PrintGenericInfo}.
3816 @param buf: (string) stream to accumulate the result into
3817 @param data: data to format
3819 @param level: depth in the data hierarchy, used for indenting
3820 @type afterkey: bool
3821 @param afterkey: True when we are in the middle of a line after a key (used
3822 to properly add newlines or indentation)
3826 if isinstance(data, dict):
3835 for key in sorted(data):
3837 buf.write(baseind * level)
3842 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3843 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3844 # list of tuples (an ordered dictionary)
3850 for (key, val) in data:
3852 buf.write(baseind * level)
3857 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3858 elif isinstance(data, list):
3869 buf.write(baseind * level)
3873 buf.write(baseind[1:])
3874 _SerializeGenericInfo(buf, item, level + 1)
3876 # This branch should be only taken for strings, but it's practically
3877 # impossible to guarantee that no other types are produced somewhere
3878 buf.write(str(data))
3882 def PrintGenericInfo(data):
3883 """Print information formatted according to the hierarchy.
3885 The output is a valid YAML string.
3887 @param data: the data to print. It's a hierarchical structure whose elements
3889 - dictionaries, where keys are strings and values are of any of the
3891 - lists of pairs (key, value), where key is a string and value is of
3892 any of the types listed here; it's a way to encode ordered
3894 - lists of any of the types listed here
3899 _SerializeGenericInfo(buf, data, 0)
3900 ToStdout(buf.getvalue().rstrip("\n"))