4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
88 "FILESTORE_DRIVER_OPT",
96 "GLOBAL_SHARED_FILEDIR_OPT",
101 "DEFAULT_IALLOCATOR_OPT",
102 "IDENTIFY_DEFAULTS_OPT",
103 "IGNORE_CONSIST_OPT",
105 "IGNORE_FAILURES_OPT",
106 "IGNORE_OFFLINE_OPT",
107 "IGNORE_REMOVE_FAILURES_OPT",
108 "IGNORE_SECONDARIES_OPT",
112 "MAINTAIN_NODE_HEALTH_OPT",
114 "MASTER_NETMASK_OPT",
116 "MIGRATION_MODE_OPT",
121 "NEW_CLUSTER_CERT_OPT",
122 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123 "NEW_CONFD_HMAC_KEY_OPT",
126 "NEW_SPICE_CERT_OPT",
128 "NOCONFLICTSCHECK_OPT",
129 "NODE_FORCE_JOIN_OPT",
131 "NODE_PLACEMENT_OPT",
135 "NODRBD_STORAGE_OPT",
141 "NOMODIFY_ETCHOSTS_OPT",
142 "NOMODIFY_SSH_SETUP_OPT",
146 "NORUNTIME_CHGS_OPT",
149 "NOSSH_KEYCHECK_OPT",
163 "PREALLOC_WIPE_DISKS_OPT",
164 "PRIMARY_IP_VERSION_OPT",
170 "REMOVE_INSTANCE_OPT",
171 "REMOVE_RESERVED_IPS_OPT",
177 "SECONDARY_ONLY_OPT",
182 "SHUTDOWN_TIMEOUT_OPT",
184 "SPECS_CPU_COUNT_OPT",
185 "SPECS_DISK_COUNT_OPT",
186 "SPECS_DISK_SIZE_OPT",
187 "SPECS_MEM_SIZE_OPT",
188 "SPECS_NIC_COUNT_OPT",
189 "IPOLICY_DISK_TEMPLATES",
190 "IPOLICY_VCPU_RATIO",
196 "STARTUP_PAUSED_OPT",
205 "USE_EXTERNAL_MIP_SCRIPT",
213 "IGNORE_IPOLICY_OPT",
214 "INSTANCE_POLICY_OPTS",
215 # Generic functions for CLI programs
217 "CreateIPolicyFromOpts",
219 "GenericInstanceCreate",
225 "JobSubmittedException",
227 "RunWhileClusterStopped",
231 # Formatting functions
232 "ToStderr", "ToStdout",
235 "FormatParameterDict",
244 # command line options support infrastructure
245 "ARGS_MANY_INSTANCES",
248 "ARGS_MANY_NETWORKS",
267 "OPT_COMPL_INST_ADD_NODES",
268 "OPT_COMPL_MANY_NODES",
269 "OPT_COMPL_ONE_IALLOCATOR",
270 "OPT_COMPL_ONE_INSTANCE",
271 "OPT_COMPL_ONE_NODE",
272 "OPT_COMPL_ONE_NODEGROUP",
273 "OPT_COMPL_ONE_NETWORK",
279 "COMMON_CREATE_OPTS",
285 #: Priorities (sorted)
287 ("low", constants.OP_PRIO_LOW),
288 ("normal", constants.OP_PRIO_NORMAL),
289 ("high", constants.OP_PRIO_HIGH),
292 #: Priority dictionary for easier lookup
293 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
294 # we migrate to Python 2.6
295 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
297 # Query result status for clients
300 QR_INCOMPLETE) = range(3)
302 #: Maximum batch size for ChooseJob
306 # constants used to create InstancePolicy dictionary
307 TISPECS_GROUP_TYPES = {
308 constants.ISPECS_MIN: constants.VTYPE_INT,
309 constants.ISPECS_MAX: constants.VTYPE_INT,
312 TISPECS_CLUSTER_TYPES = {
313 constants.ISPECS_MIN: constants.VTYPE_INT,
314 constants.ISPECS_MAX: constants.VTYPE_INT,
315 constants.ISPECS_STD: constants.VTYPE_INT,
320 def __init__(self, min=0, max=None): # pylint: disable=W0622
325 return ("<%s min=%s max=%s>" %
326 (self.__class__.__name__, self.min, self.max))
329 class ArgSuggest(_Argument):
330 """Suggesting argument.
332 Value can be any of the ones passed to the constructor.
335 # pylint: disable=W0622
336 def __init__(self, min=0, max=None, choices=None):
337 _Argument.__init__(self, min=min, max=max)
338 self.choices = choices
341 return ("<%s min=%s max=%s choices=%r>" %
342 (self.__class__.__name__, self.min, self.max, self.choices))
345 class ArgChoice(ArgSuggest):
348 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
349 but value must be one of the choices.
354 class ArgUnknown(_Argument):
355 """Unknown argument to program (e.g. determined at runtime).
360 class ArgInstance(_Argument):
361 """Instances argument.
366 class ArgNode(_Argument):
372 class ArgNetwork(_Argument):
378 class ArgGroup(_Argument):
379 """Node group argument.
384 class ArgJobId(_Argument):
390 class ArgFile(_Argument):
391 """File path argument.
396 class ArgCommand(_Argument):
402 class ArgHost(_Argument):
408 class ArgOs(_Argument):
415 ARGS_MANY_INSTANCES = [ArgInstance()]
416 ARGS_MANY_NETWORKS = [ArgNetwork()]
417 ARGS_MANY_NODES = [ArgNode()]
418 ARGS_MANY_GROUPS = [ArgGroup()]
419 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
420 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
421 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
423 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
424 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
427 def _ExtractTagsObject(opts, args):
428 """Extract the tag type object.
430 Note that this function will modify its args parameter.
433 if not hasattr(opts, "tag_type"):
434 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
436 if kind == constants.TAG_CLUSTER:
438 elif kind in (constants.TAG_NODEGROUP,
440 constants.TAG_NETWORK,
441 constants.TAG_INSTANCE):
443 raise errors.OpPrereqError("no arguments passed to the command",
448 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
452 def _ExtendTags(opts, args):
453 """Extend the args if a source file has been given.
455 This function will extend the tags with the contents of the file
456 passed in the 'tags_source' attribute of the opts parameter. A file
457 named '-' will be replaced by stdin.
460 fname = opts.tags_source
466 new_fh = open(fname, "r")
469 # we don't use the nice 'new_data = [line.strip() for line in fh]'
470 # because of python bug 1633941
472 line = new_fh.readline()
475 new_data.append(line.strip())
478 args.extend(new_data)
481 def ListTags(opts, args):
482 """List the tags on a given object.
484 This is a generic implementation that knows how to deal with all
485 three cases of tag objects (cluster, node, instance). The opts
486 argument is expected to contain a tag_type field denoting what
487 object type we work on.
490 kind, name = _ExtractTagsObject(opts, args)
491 cl = GetClient(query=True)
492 result = cl.QueryTags(kind, name)
493 result = list(result)
499 def AddTags(opts, args):
500 """Add tags on a given object.
502 This is a generic implementation that knows how to deal with all
503 three cases of tag objects (cluster, node, instance). The opts
504 argument is expected to contain a tag_type field denoting what
505 object type we work on.
508 kind, name = _ExtractTagsObject(opts, args)
509 _ExtendTags(opts, args)
511 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
512 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
513 SubmitOrSend(op, opts)
516 def RemoveTags(opts, args):
517 """Remove tags from a given object.
519 This is a generic implementation that knows how to deal with all
520 three cases of tag objects (cluster, node, instance). The opts
521 argument is expected to contain a tag_type field denoting what
522 object type we work on.
525 kind, name = _ExtractTagsObject(opts, args)
526 _ExtendTags(opts, args)
528 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
529 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
530 SubmitOrSend(op, opts)
533 def check_unit(option, opt, value): # pylint: disable=W0613
534 """OptParsers custom converter for units.
538 return utils.ParseUnit(value)
539 except errors.UnitParseError, err:
540 raise OptionValueError("option %s: %s" % (opt, err))
543 def _SplitKeyVal(opt, data):
544 """Convert a KeyVal string into a dict.
546 This function will convert a key=val[,...] string into a dict. Empty
547 values will be converted specially: keys which have the prefix 'no_'
548 will have the value=False and the prefix stripped, the others will
552 @param opt: a string holding the option name for which we process the
553 data, used in building error messages
555 @param data: a string of the format key=val,key=val,...
557 @return: {key=val, key=val}
558 @raises errors.ParameterError: if there are duplicate keys
563 for elem in utils.UnescapeAndSplit(data, sep=","):
565 key, val = elem.split("=", 1)
567 if elem.startswith(NO_PREFIX):
568 key, val = elem[len(NO_PREFIX):], False
569 elif elem.startswith(UN_PREFIX):
570 key, val = elem[len(UN_PREFIX):], None
572 key, val = elem, True
574 raise errors.ParameterError("Duplicate key '%s' in option %s" %
580 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
581 """Custom parser for ident:key=val,key=val options.
583 This will store the parsed values as a tuple (ident, {key: val}). As such,
584 multiple uses of this option via action=append is possible.
588 ident, rest = value, ""
590 ident, rest = value.split(":", 1)
592 if ident.startswith(NO_PREFIX):
594 msg = "Cannot pass options when removing parameter groups: %s" % value
595 raise errors.ParameterError(msg)
596 retval = (ident[len(NO_PREFIX):], False)
597 elif (ident.startswith(UN_PREFIX) and
598 (len(ident) <= len(UN_PREFIX) or
599 not ident[len(UN_PREFIX)][0].isdigit())):
601 msg = "Cannot pass options when removing parameter groups: %s" % value
602 raise errors.ParameterError(msg)
603 retval = (ident[len(UN_PREFIX):], None)
605 kv_dict = _SplitKeyVal(opt, rest)
606 retval = (ident, kv_dict)
610 def check_key_val(option, opt, value): # pylint: disable=W0613
611 """Custom parser class for key=val,key=val options.
613 This will store the parsed values as a dict {key: val}.
616 return _SplitKeyVal(opt, value)
619 def check_bool(option, opt, value): # pylint: disable=W0613
620 """Custom parser for yes/no options.
622 This will store the parsed value as either True or False.
625 value = value.lower()
626 if value == constants.VALUE_FALSE or value == "no":
628 elif value == constants.VALUE_TRUE or value == "yes":
631 raise errors.ParameterError("Invalid boolean value '%s'" % value)
634 def check_list(option, opt, value): # pylint: disable=W0613
635 """Custom parser for comma-separated lists.
638 # we have to make this explicit check since "".split(",") is [""],
639 # not an empty list :(
643 return utils.UnescapeAndSplit(value)
646 def check_maybefloat(option, opt, value): # pylint: disable=W0613
647 """Custom parser for float numbers which might be also defaults.
650 value = value.lower()
652 if value == constants.VALUE_DEFAULT:
658 # completion_suggestion is normally a list. Using numeric values not evaluating
659 # to False for dynamic completion.
660 (OPT_COMPL_MANY_NODES,
662 OPT_COMPL_ONE_INSTANCE,
664 OPT_COMPL_ONE_IALLOCATOR,
665 OPT_COMPL_ONE_NETWORK,
666 OPT_COMPL_INST_ADD_NODES,
667 OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
669 OPT_COMPL_ALL = compat.UniqueFrozenset([
670 OPT_COMPL_MANY_NODES,
672 OPT_COMPL_ONE_INSTANCE,
674 OPT_COMPL_ONE_IALLOCATOR,
675 OPT_COMPL_ONE_NETWORK,
676 OPT_COMPL_INST_ADD_NODES,
677 OPT_COMPL_ONE_NODEGROUP,
681 class CliOption(Option):
682 """Custom option class for optparse.
685 ATTRS = Option.ATTRS + [
686 "completion_suggest",
688 TYPES = Option.TYPES + (
696 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
697 TYPE_CHECKER["identkeyval"] = check_ident_key_val
698 TYPE_CHECKER["keyval"] = check_key_val
699 TYPE_CHECKER["unit"] = check_unit
700 TYPE_CHECKER["bool"] = check_bool
701 TYPE_CHECKER["list"] = check_list
702 TYPE_CHECKER["maybefloat"] = check_maybefloat
705 # optparse.py sets make_option, so we do it for our own option class, too
706 cli_option = CliOption
711 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
712 help="Increase debugging level")
714 NOHDR_OPT = cli_option("--no-headers", default=False,
715 action="store_true", dest="no_headers",
716 help="Don't display column headers")
718 SEP_OPT = cli_option("--separator", default=None,
719 action="store", dest="separator",
720 help=("Separator between output fields"
721 " (defaults to one space)"))
723 USEUNITS_OPT = cli_option("--units", default=None,
724 dest="units", choices=("h", "m", "g", "t"),
725 help="Specify units for output (one of h/m/g/t)")
727 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
728 type="string", metavar="FIELDS",
729 help="Comma separated list of output fields")
731 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
732 default=False, help="Force the operation")
734 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
735 default=False, help="Do not require confirmation")
737 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
738 action="store_true", default=False,
739 help=("Ignore offline nodes and do as much"
742 TAG_ADD_OPT = cli_option("--tags", dest="tags",
743 default=None, help="Comma-separated list of instance"
746 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
747 default=None, help="File with tag names")
749 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
750 default=False, action="store_true",
751 help=("Submit the job and return the job ID, but"
752 " don't wait for the job to finish"))
754 SYNC_OPT = cli_option("--sync", dest="do_locking",
755 default=False, action="store_true",
756 help=("Grab locks while doing the queries"
757 " in order to ensure more consistent results"))
759 DRY_RUN_OPT = cli_option("--dry-run", default=False,
761 help=("Do not execute the operation, just run the"
762 " check steps and verify if it could be"
765 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
767 help="Increase the verbosity of the operation")
769 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
770 action="store_true", dest="simulate_errors",
771 help="Debugging option that makes the operation"
772 " treat most runtime checks as failed")
774 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
775 default=True, action="store_false",
776 help="Don't wait for sync (DANGEROUS!)")
778 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
779 default=False, action="store_true",
780 help="Wait for disks to sync")
782 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
783 action="store_true", default=False,
784 help="Enable offline instance")
786 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
787 action="store_true", default=False,
788 help="Disable down instance")
790 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
791 help=("Custom disk setup (%s)" %
792 utils.CommaJoin(constants.DISK_TEMPLATES)),
793 default=None, metavar="TEMPL",
794 choices=list(constants.DISK_TEMPLATES))
796 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
797 help="Do not create any network cards for"
800 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
801 help="Relative path under default cluster-wide"
802 " file storage dir to store file-based disks",
803 default=None, metavar="<DIR>")
805 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
806 help="Driver to use for image files",
807 default="loop", metavar="<DRIVER>",
808 choices=list(constants.FILE_DRIVER))
810 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
811 help="Select nodes for the instance automatically"
812 " using the <NAME> iallocator plugin",
813 default=None, type="string",
814 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
816 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
818 help="Set the default instance"
820 default=None, type="string",
821 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
823 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
825 completion_suggest=OPT_COMPL_ONE_OS)
827 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
828 type="keyval", default={},
829 help="OS parameters")
831 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
832 action="store_true", default=False,
833 help="Force an unknown variant")
835 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
836 action="store_true", default=False,
837 help="Do not install the OS (will"
840 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
841 dest="allow_runtime_chgs",
842 default=True, action="store_false",
843 help="Don't allow runtime changes")
845 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
846 type="keyval", default={},
847 help="Backend parameters")
849 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
850 default={}, dest="hvparams",
851 help="Hypervisor parameters")
853 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
854 help="Disk template parameters, in the format"
855 " template:option=value,option=value,...",
856 type="identkeyval", action="append", default=[])
858 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
859 type="keyval", default={},
860 help="Memory size specs: list of key=value,"
861 " where key is one of min, max, std"
862 " (in MB or using a unit)")
864 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
865 type="keyval", default={},
866 help="CPU count specs: list of key=value,"
867 " where key is one of min, max, std")
869 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
870 dest="ispecs_disk_count",
871 type="keyval", default={},
872 help="Disk count specs: list of key=value,"
873 " where key is one of min, max, std")
875 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
876 type="keyval", default={},
877 help="Disk size specs: list of key=value,"
878 " where key is one of min, max, std"
879 " (in MB or using a unit)")
881 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
882 type="keyval", default={},
883 help="NIC count specs: list of key=value,"
884 " where key is one of min, max, std")
886 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
887 dest="ipolicy_disk_templates",
888 type="list", default=None,
889 help="Comma-separated list of"
890 " enabled disk templates")
892 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
893 dest="ipolicy_vcpu_ratio",
894 type="maybefloat", default=None,
895 help="The maximum allowed vcpu-to-cpu ratio")
897 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
898 dest="ipolicy_spindle_ratio",
899 type="maybefloat", default=None,
900 help=("The maximum allowed instances to"
903 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
904 help="Hypervisor and hypervisor options, in the"
905 " format hypervisor:option=value,option=value,...",
906 default=None, type="identkeyval")
908 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
909 help="Hypervisor and hypervisor options, in the"
910 " format hypervisor:option=value,option=value,...",
911 default=[], action="append", type="identkeyval")
913 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
914 action="store_false",
915 help="Don't check that the instance's IP"
918 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
919 default=True, action="store_false",
920 help="Don't check that the instance's name"
923 NET_OPT = cli_option("--net",
924 help="NIC parameters", default=[],
925 dest="nics", action="append", type="identkeyval")
927 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
928 dest="disks", action="append", type="identkeyval")
930 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
931 help="Comma-separated list of disks"
932 " indices to act on (e.g. 0,2) (optional,"
933 " defaults to all disks)")
935 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
936 help="Enforces a single-disk configuration using the"
937 " given disk size, in MiB unless a suffix is used",
938 default=None, type="unit", metavar="<size>")
940 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
941 dest="ignore_consistency",
942 action="store_true", default=False,
943 help="Ignore the consistency of the disks on"
946 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
947 dest="allow_failover",
948 action="store_true", default=False,
949 help="If migration is not possible fallback to"
952 NONLIVE_OPT = cli_option("--non-live", dest="live",
953 default=True, action="store_false",
954 help="Do a non-live migration (this usually means"
955 " freeze the instance, save the state, transfer and"
956 " only then resume running on the secondary node)")
958 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
960 choices=list(constants.HT_MIGRATION_MODES),
961 help="Override default migration mode (choose"
962 " either live or non-live")
964 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
965 help="Target node and optional secondary node",
966 metavar="<pnode>[:<snode>]",
967 completion_suggest=OPT_COMPL_INST_ADD_NODES)
969 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
970 action="append", metavar="<node>",
971 help="Use only this node (can be used multiple"
972 " times, if not given defaults to all nodes)",
973 completion_suggest=OPT_COMPL_ONE_NODE)
975 NODEGROUP_OPT_NAME = "--node-group"
976 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
978 help="Node group (name or uuid)",
979 metavar="<nodegroup>",
980 default=None, type="string",
981 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
983 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
985 completion_suggest=OPT_COMPL_ONE_NODE)
987 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
988 action="store_false",
989 help="Don't start the instance after creation")
991 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
992 action="store_true", default=False,
993 help="Show command instead of executing it")
995 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
996 default=False, action="store_true",
997 help="Instead of performing the migration, try to"
998 " recover from a failed cleanup. This is safe"
999 " to run even if the instance is healthy, but it"
1000 " will create extra replication traffic and "
1001 " disrupt briefly the replication (like during the"
1004 STATIC_OPT = cli_option("-s", "--static", dest="static",
1005 action="store_true", default=False,
1006 help="Only show configuration data, not runtime data")
1008 ALL_OPT = cli_option("--all", dest="show_all",
1009 default=False, action="store_true",
1010 help="Show info on all instances on the cluster."
1011 " This can take a long time to run, use wisely")
1013 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1014 action="store_true", default=False,
1015 help="Interactive OS reinstall, lists available"
1016 " OS templates for selection")
1018 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1019 action="store_true", default=False,
1020 help="Remove the instance from the cluster"
1021 " configuration even if there are failures"
1022 " during the removal process")
1024 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1025 dest="ignore_remove_failures",
1026 action="store_true", default=False,
1027 help="Remove the instance from the"
1028 " cluster configuration even if there"
1029 " are failures during the removal"
1032 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1033 action="store_true", default=False,
1034 help="Remove the instance from the cluster")
1036 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1037 help="Specifies the new node for the instance",
1038 metavar="NODE", default=None,
1039 completion_suggest=OPT_COMPL_ONE_NODE)
1041 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1042 help="Specifies the new secondary node",
1043 metavar="NODE", default=None,
1044 completion_suggest=OPT_COMPL_ONE_NODE)
1046 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1047 default=False, action="store_true",
1048 help="Replace the disk(s) on the primary"
1049 " node (applies only to internally mirrored"
1050 " disk templates, e.g. %s)" %
1051 utils.CommaJoin(constants.DTS_INT_MIRROR))
1053 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1054 default=False, action="store_true",
1055 help="Replace the disk(s) on the secondary"
1056 " node (applies only to internally mirrored"
1057 " disk templates, e.g. %s)" %
1058 utils.CommaJoin(constants.DTS_INT_MIRROR))
1060 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1061 default=False, action="store_true",
1062 help="Lock all nodes and auto-promote as needed"
1065 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1066 default=False, action="store_true",
1067 help="Automatically replace faulty disks"
1068 " (applies only to internally mirrored"
1069 " disk templates, e.g. %s)" %
1070 utils.CommaJoin(constants.DTS_INT_MIRROR))
1072 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1073 default=False, action="store_true",
1074 help="Ignore current recorded size"
1075 " (useful for forcing activation when"
1076 " the recorded size is wrong)")
1078 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1080 completion_suggest=OPT_COMPL_ONE_NODE)
1082 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1085 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1086 help="Specify the secondary ip for the node",
1087 metavar="ADDRESS", default=None)
1089 READD_OPT = cli_option("--readd", dest="readd",
1090 default=False, action="store_true",
1091 help="Readd old node after replacing it")
1093 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1094 default=True, action="store_false",
1095 help="Disable SSH key fingerprint checking")
1097 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1098 default=False, action="store_true",
1099 help="Force the joining of a node")
1101 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1102 type="bool", default=None, metavar=_YORNO,
1103 help="Set the master_candidate flag on the node")
1105 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1106 type="bool", default=None,
1107 help=("Set the offline flag on the node"
1108 " (cluster does not communicate with offline"
1111 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1112 type="bool", default=None,
1113 help=("Set the drained flag on the node"
1114 " (excluded from allocation operations)"))
1116 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1117 type="bool", default=None, metavar=_YORNO,
1118 help="Set the master_capable flag on the node")
1120 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1121 type="bool", default=None, metavar=_YORNO,
1122 help="Set the vm_capable flag on the node")
1124 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1125 type="bool", default=None, metavar=_YORNO,
1126 help="Set the allocatable flag on a volume")
1128 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1129 help="Disable support for lvm based instances"
1131 action="store_false", default=True)
1133 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1134 dest="enabled_hypervisors",
1135 help="Comma-separated list of hypervisors",
1136 type="string", default=None)
1138 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1139 type="keyval", default={},
1140 help="NIC parameters")
1142 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1143 dest="candidate_pool_size", type="int",
1144 help="Set the candidate pool size")
1146 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1147 help=("Enables LVM and specifies the volume group"
1148 " name (cluster-wide) for disk allocation"
1149 " [%s]" % constants.DEFAULT_VG),
1150 metavar="VG", default=None)
1152 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1153 help="Destroy cluster", action="store_true")
1155 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1156 help="Skip node agreement check (dangerous)",
1157 action="store_true", default=False)
1159 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1160 help="Specify the mac prefix for the instance IP"
1161 " addresses, in the format XX:XX:XX",
1165 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1166 help="Specify the node interface (cluster-wide)"
1167 " on which the master IP address will be added"
1168 " (cluster init default: %s)" %
1169 constants.DEFAULT_BRIDGE,
1173 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1174 help="Specify the netmask of the master IP",
1178 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1179 dest="use_external_mip_script",
1180 help="Specify whether to run a"
1181 " user-provided script for the master"
1182 " IP address turnup and"
1183 " turndown operations",
1184 type="bool", metavar=_YORNO, default=None)
1186 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1187 help="Specify the default directory (cluster-"
1188 "wide) for storing the file-based disks [%s]" %
1189 pathutils.DEFAULT_FILE_STORAGE_DIR,
1191 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1193 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1194 "--shared-file-storage-dir",
1195 dest="shared_file_storage_dir",
1196 help="Specify the default directory (cluster-wide) for storing the"
1197 " shared file-based disks [%s]" %
1198 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1199 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1201 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1202 help="Don't modify %s" % pathutils.ETC_HOSTS,
1203 action="store_false", default=True)
1205 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1206 help="Don't initialize SSH keys",
1207 action="store_false", default=True)
1209 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1210 help="Enable parseable error messages",
1211 action="store_true", default=False)
1213 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1214 help="Skip N+1 memory redundancy tests",
1215 action="store_true", default=False)
1217 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1218 help="Type of reboot: soft/hard/full",
1219 default=constants.INSTANCE_REBOOT_HARD,
1221 choices=list(constants.REBOOT_TYPES))
1223 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1224 dest="ignore_secondaries",
1225 default=False, action="store_true",
1226 help="Ignore errors from secondaries")
1228 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1229 action="store_false", default=True,
1230 help="Don't shutdown the instance (unsafe)")
1232 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1233 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1234 help="Maximum time to wait")
1236 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1237 dest="shutdown_timeout", type="int",
1238 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1239 help="Maximum time to wait for instance"
1242 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1244 help=("Number of seconds between repetions of the"
1247 EARLY_RELEASE_OPT = cli_option("--early-release",
1248 dest="early_release", default=False,
1249 action="store_true",
1250 help="Release the locks on the secondary"
1253 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1254 dest="new_cluster_cert",
1255 default=False, action="store_true",
1256 help="Generate a new cluster certificate")
1258 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1260 help="File containing new RAPI certificate")
1262 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1263 default=None, action="store_true",
1264 help=("Generate a new self-signed RAPI"
1267 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1269 help="File containing new SPICE certificate")
1271 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1273 help="File containing the certificate of the CA"
1274 " which signed the SPICE certificate")
1276 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1277 dest="new_spice_cert", default=None,
1278 action="store_true",
1279 help=("Generate a new self-signed SPICE"
1282 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1283 dest="new_confd_hmac_key",
1284 default=False, action="store_true",
1285 help=("Create a new HMAC key for %s" %
1288 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1289 dest="cluster_domain_secret",
1291 help=("Load new new cluster domain"
1292 " secret from file"))
1294 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1295 dest="new_cluster_domain_secret",
1296 default=False, action="store_true",
1297 help=("Create a new cluster domain"
1300 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1301 dest="use_replication_network",
1302 help="Whether to use the replication network"
1303 " for talking to the nodes",
1304 action="store_true", default=False)
1306 MAINTAIN_NODE_HEALTH_OPT = \
1307 cli_option("--maintain-node-health", dest="maintain_node_health",
1308 metavar=_YORNO, default=None, type="bool",
1309 help="Configure the cluster to automatically maintain node"
1310 " health, by shutting down unknown instances, shutting down"
1311 " unknown DRBD devices, etc.")
1313 IDENTIFY_DEFAULTS_OPT = \
1314 cli_option("--identify-defaults", dest="identify_defaults",
1315 default=False, action="store_true",
1316 help="Identify which saved instance parameters are equal to"
1317 " the current cluster defaults and set them as such, instead"
1318 " of marking them as overridden")
1320 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1321 action="store", dest="uid_pool",
1322 help=("A list of user-ids or user-id"
1323 " ranges separated by commas"))
1325 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1326 action="store", dest="add_uids",
1327 help=("A list of user-ids or user-id"
1328 " ranges separated by commas, to be"
1329 " added to the user-id pool"))
1331 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1332 action="store", dest="remove_uids",
1333 help=("A list of user-ids or user-id"
1334 " ranges separated by commas, to be"
1335 " removed from the user-id pool"))
1337 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1338 action="store", dest="reserved_lvs",
1339 help=("A comma-separated list of reserved"
1340 " logical volumes names, that will be"
1341 " ignored by cluster verify"))
1343 ROMAN_OPT = cli_option("--roman",
1344 dest="roman_integers", default=False,
1345 action="store_true",
1346 help="Use roman numbers for positive integers")
1348 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1349 action="store", default=None,
1350 help="Specifies usermode helper for DRBD")
1352 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1353 action="store_false", default=True,
1354 help="Disable support for DRBD")
1356 PRIMARY_IP_VERSION_OPT = \
1357 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1358 action="store", dest="primary_ip_version",
1359 metavar="%d|%d" % (constants.IP4_VERSION,
1360 constants.IP6_VERSION),
1361 help="Cluster-wide IP version for primary IP")
1363 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1364 action="store_true",
1365 help="Show machine name for every line in output")
1367 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1368 action="store_true",
1369 help=("Hide successful results and show failures"
1370 " only (determined by the exit code)"))
1373 def _PriorityOptionCb(option, _, value, parser):
1374 """Callback for processing C{--priority} option.
1377 value = _PRIONAME_TO_VALUE[value]
1379 setattr(parser.values, option.dest, value)
1382 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1383 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1384 choices=_PRIONAME_TO_VALUE.keys(),
1385 action="callback", type="choice",
1386 callback=_PriorityOptionCb,
1387 help="Priority for opcode processing")
1389 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1390 type="bool", default=None, metavar=_YORNO,
1391 help="Sets the hidden flag on the OS")
1393 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1394 type="bool", default=None, metavar=_YORNO,
1395 help="Sets the blacklisted flag on the OS")
1397 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1398 type="bool", metavar=_YORNO,
1399 dest="prealloc_wipe_disks",
1400 help=("Wipe disks prior to instance"
1403 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1404 type="keyval", default=None,
1405 help="Node parameters")
1407 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1408 action="store", metavar="POLICY", default=None,
1409 help="Allocation policy for the node group")
1411 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1412 type="bool", metavar=_YORNO,
1413 dest="node_powered",
1414 help="Specify if the SoR for node is powered")
1416 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1417 default=constants.OOB_TIMEOUT,
1418 help="Maximum time to wait for out-of-band helper")
1420 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1421 default=constants.OOB_POWER_DELAY,
1422 help="Time in seconds to wait between power-ons")
1424 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1425 action="store_true", default=False,
1426 help=("Whether command argument should be treated"
1429 NO_REMEMBER_OPT = cli_option("--no-remember",
1431 action="store_true", default=False,
1432 help="Perform but do not record the change"
1433 " in the configuration")
1435 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1436 default=False, action="store_true",
1437 help="Evacuate primary instances only")
1439 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1440 default=False, action="store_true",
1441 help="Evacuate secondary instances only"
1442 " (applies only to internally mirrored"
1443 " disk templates, e.g. %s)" %
1444 utils.CommaJoin(constants.DTS_INT_MIRROR))
1446 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1447 action="store_true", default=False,
1448 help="Pause instance at startup")
1450 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1451 help="Destination node group (name or uuid)",
1452 default=None, action="append",
1453 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1455 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1456 action="append", dest="ignore_errors",
1457 choices=list(constants.CV_ALL_ECODES_STRINGS),
1458 help="Error code to be ignored")
1460 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1462 help=("Specify disk state information in the"
1464 " storage_type/identifier:option=value,...;"
1465 " note this is unused for now"),
1468 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1470 help=("Specify hypervisor state information in the"
1471 " format hypervisor:option=value,...;"
1472 " note this is unused for now"),
1475 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1476 action="store_true", default=False,
1477 help="Ignore instance policy violations")
1479 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1480 help="Sets the instance's runtime memory,"
1481 " ballooning it up or down to the new value",
1482 default=None, type="unit", metavar="<size>")
1484 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1485 action="store_true", default=False,
1486 help="Marks the grow as absolute instead of the"
1487 " (default) relative mode")
1489 NETWORK_OPT = cli_option("--network",
1490 action="store", default=None, dest="network",
1491 help="IP network in CIDR notation")
1493 GATEWAY_OPT = cli_option("--gateway",
1494 action="store", default=None, dest="gateway",
1495 help="IP address of the router (gateway)")
1497 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1498 action="store", default=None,
1499 dest="add_reserved_ips",
1500 help="Comma-separated list of"
1501 " reserved IPs to add")
1503 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1504 action="store", default=None,
1505 dest="remove_reserved_ips",
1506 help="Comma-delimited list of"
1507 " reserved IPs to remove")
1509 NETWORK_TYPE_OPT = cli_option("--network-type",
1510 action="store", default=None, dest="network_type",
1511 help="Network type: private, public, None")
1513 NETWORK6_OPT = cli_option("--network6",
1514 action="store", default=None, dest="network6",
1515 help="IP network in CIDR notation")
1517 GATEWAY6_OPT = cli_option("--gateway6",
1518 action="store", default=None, dest="gateway6",
1519 help="IP6 address of the router (gateway)")
1521 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1522 dest="conflicts_check",
1524 action="store_false",
1525 help="Don't check for conflicting IPs")
1527 #: Options provided by all commands
1528 COMMON_OPTS = [DEBUG_OPT]
1530 # common options for creating instances. add and import then add their own
1532 COMMON_CREATE_OPTS = [
1537 FILESTORE_DRIVER_OPT,
1543 NOCONFLICTSCHECK_OPT,
1555 # common instance policy options
1556 INSTANCE_POLICY_OPTS = [
1557 SPECS_CPU_COUNT_OPT,
1558 SPECS_DISK_COUNT_OPT,
1559 SPECS_DISK_SIZE_OPT,
1561 SPECS_NIC_COUNT_OPT,
1562 IPOLICY_DISK_TEMPLATES,
1564 IPOLICY_SPINDLE_RATIO,
1568 class _ShowUsage(Exception):
1569 """Exception class for L{_ParseArgs}.
1572 def __init__(self, exit_error):
1573 """Initializes instances of this class.
1575 @type exit_error: bool
1576 @param exit_error: Whether to report failure on exit
1579 Exception.__init__(self)
1580 self.exit_error = exit_error
1583 class _ShowVersion(Exception):
1584 """Exception class for L{_ParseArgs}.
1589 def _ParseArgs(binary, argv, commands, aliases, env_override):
1590 """Parser for the command line arguments.
1592 This function parses the arguments and returns the function which
1593 must be executed together with its (modified) arguments.
1595 @param binary: Script name
1596 @param argv: Command line arguments
1597 @param commands: Dictionary containing command definitions
1598 @param aliases: dictionary with command aliases {"alias": "target", ...}
1599 @param env_override: list of env variables allowed for default args
1600 @raise _ShowUsage: If usage description should be shown
1601 @raise _ShowVersion: If version should be shown
1604 assert not (env_override - set(commands))
1605 assert not (set(aliases.keys()) & set(commands.keys()))
1610 # No option or command given
1611 raise _ShowUsage(exit_error=True)
1613 if cmd == "--version":
1614 raise _ShowVersion()
1615 elif cmd == "--help":
1616 raise _ShowUsage(exit_error=False)
1617 elif not (cmd in commands or cmd in aliases):
1618 raise _ShowUsage(exit_error=True)
1620 # get command, unalias it, and look it up in commands
1622 if aliases[cmd] not in commands:
1623 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1624 " command '%s'" % (cmd, aliases[cmd]))
1628 if cmd in env_override:
1629 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1630 env_args = os.environ.get(args_env_name)
1632 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1634 func, args_def, parser_opts, usage, description = commands[cmd]
1635 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1636 description=description,
1637 formatter=TitledHelpFormatter(),
1638 usage="%%prog %s %s" % (cmd, usage))
1639 parser.disable_interspersed_args()
1640 options, args = parser.parse_args(args=argv[2:])
1642 if not _CheckArguments(cmd, args_def, args):
1643 return None, None, None
1645 return func, options, args
1648 def _FormatUsage(binary, commands):
1649 """Generates a nice description of all commands.
1651 @param binary: Script name
1652 @param commands: Dictionary containing command definitions
1655 # compute the max line length for cmd + usage
1656 mlen = min(60, max(map(len, commands)))
1658 yield "Usage: %s {command} [options...] [argument...]" % binary
1659 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1663 # and format a nice command list
1664 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1665 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1666 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1667 for line in help_lines:
1668 yield " %-*s %s" % (mlen, "", line)
1673 def _CheckArguments(cmd, args_def, args):
1674 """Verifies the arguments using the argument definition.
1678 1. Abort with error if values specified by user but none expected.
1680 1. For each argument in definition
1682 1. Keep running count of minimum number of values (min_count)
1683 1. Keep running count of maximum number of values (max_count)
1684 1. If it has an unlimited number of values
1686 1. Abort with error if it's not the last argument in the definition
1688 1. If last argument has limited number of values
1690 1. Abort with error if number of values doesn't match or is too large
1692 1. Abort with error if user didn't pass enough values (min_count)
1695 if args and not args_def:
1696 ToStderr("Error: Command %s expects no arguments", cmd)
1703 last_idx = len(args_def) - 1
1705 for idx, arg in enumerate(args_def):
1706 if min_count is None:
1708 elif arg.min is not None:
1709 min_count += arg.min
1711 if max_count is None:
1713 elif arg.max is not None:
1714 max_count += arg.max
1717 check_max = (arg.max is not None)
1719 elif arg.max is None:
1720 raise errors.ProgrammerError("Only the last argument can have max=None")
1723 # Command with exact number of arguments
1724 if (min_count is not None and max_count is not None and
1725 min_count == max_count and len(args) != min_count):
1726 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1729 # Command with limited number of arguments
1730 if max_count is not None and len(args) > max_count:
1731 ToStderr("Error: Command %s expects only %d argument(s)",
1735 # Command with some required arguments
1736 if min_count is not None and len(args) < min_count:
1737 ToStderr("Error: Command %s expects at least %d argument(s)",
1744 def SplitNodeOption(value):
1745 """Splits the value of a --node option.
1748 if value and ":" in value:
1749 return value.split(":", 1)
1751 return (value, None)
1754 def CalculateOSNames(os_name, os_variants):
1755 """Calculates all the names an OS can be called, according to its variants.
1757 @type os_name: string
1758 @param os_name: base name of the os
1759 @type os_variants: list or None
1760 @param os_variants: list of supported variants
1762 @return: list of valid names
1766 return ["%s+%s" % (os_name, v) for v in os_variants]
1771 def ParseFields(selected, default):
1772 """Parses the values of "--field"-like options.
1774 @type selected: string or None
1775 @param selected: User-selected options
1777 @param default: Default fields
1780 if selected is None:
1783 if selected.startswith("+"):
1784 return default + selected[1:].split(",")
1786 return selected.split(",")
1789 UsesRPC = rpc.RunWithRPC
1792 def AskUser(text, choices=None):
1793 """Ask the user a question.
1795 @param text: the question to ask
1797 @param choices: list with elements tuples (input_char, return_value,
1798 description); if not given, it will default to: [('y', True,
1799 'Perform the operation'), ('n', False, 'Do no do the operation')];
1800 note that the '?' char is reserved for help
1802 @return: one of the return values from the choices list; if input is
1803 not possible (i.e. not running with a tty, we return the last
1808 choices = [("y", True, "Perform the operation"),
1809 ("n", False, "Do not perform the operation")]
1810 if not choices or not isinstance(choices, list):
1811 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1812 for entry in choices:
1813 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1814 raise errors.ProgrammerError("Invalid choices element to AskUser")
1816 answer = choices[-1][1]
1818 for line in text.splitlines():
1819 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1820 text = "\n".join(new_text)
1822 f = file("/dev/tty", "a+")
1826 chars = [entry[0] for entry in choices]
1827 chars[-1] = "[%s]" % chars[-1]
1829 maps = dict([(entry[0], entry[1]) for entry in choices])
1833 f.write("/".join(chars))
1835 line = f.readline(2).strip().lower()
1840 for entry in choices:
1841 f.write(" %s - %s\n" % (entry[0], entry[2]))
1849 class JobSubmittedException(Exception):
1850 """Job was submitted, client should exit.
1852 This exception has one argument, the ID of the job that was
1853 submitted. The handler should print this ID.
1855 This is not an error, just a structured way to exit from clients.
1860 def SendJob(ops, cl=None):
1861 """Function to submit an opcode without waiting for the results.
1864 @param ops: list of opcodes
1865 @type cl: luxi.Client
1866 @param cl: the luxi client to use for communicating with the master;
1867 if None, a new client will be created
1873 job_id = cl.SubmitJob(ops)
1878 def GenericPollJob(job_id, cbs, report_cbs):
1879 """Generic job-polling function.
1881 @type job_id: number
1882 @param job_id: Job ID
1883 @type cbs: Instance of L{JobPollCbBase}
1884 @param cbs: Data callbacks
1885 @type report_cbs: Instance of L{JobPollReportCbBase}
1886 @param report_cbs: Reporting callbacks
1889 prev_job_info = None
1890 prev_logmsg_serial = None
1895 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1898 # job not found, go away!
1899 raise errors.JobLost("Job with id %s lost" % job_id)
1901 if result == constants.JOB_NOTCHANGED:
1902 report_cbs.ReportNotChanged(job_id, status)
1907 # Split result, a tuple of (field values, log entries)
1908 (job_info, log_entries) = result
1909 (status, ) = job_info
1912 for log_entry in log_entries:
1913 (serial, timestamp, log_type, message) = log_entry
1914 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1916 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1918 # TODO: Handle canceled and archived jobs
1919 elif status in (constants.JOB_STATUS_SUCCESS,
1920 constants.JOB_STATUS_ERROR,
1921 constants.JOB_STATUS_CANCELING,
1922 constants.JOB_STATUS_CANCELED):
1925 prev_job_info = job_info
1927 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1929 raise errors.JobLost("Job with id %s lost" % job_id)
1931 status, opstatus, result = jobs[0]
1933 if status == constants.JOB_STATUS_SUCCESS:
1936 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1937 raise errors.OpExecError("Job was canceled")
1940 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1941 if status == constants.OP_STATUS_SUCCESS:
1943 elif status == constants.OP_STATUS_ERROR:
1944 errors.MaybeRaise(msg)
1947 raise errors.OpExecError("partial failure (opcode %d): %s" %
1950 raise errors.OpExecError(str(msg))
1952 # default failure mode
1953 raise errors.OpExecError(result)
1956 class JobPollCbBase:
1957 """Base class for L{GenericPollJob} callbacks.
1961 """Initializes this class.
1965 def WaitForJobChangeOnce(self, job_id, fields,
1966 prev_job_info, prev_log_serial):
1967 """Waits for changes on a job.
1970 raise NotImplementedError()
1972 def QueryJobs(self, job_ids, fields):
1973 """Returns the selected fields for the selected job IDs.
1975 @type job_ids: list of numbers
1976 @param job_ids: Job IDs
1977 @type fields: list of strings
1978 @param fields: Fields
1981 raise NotImplementedError()
1984 class JobPollReportCbBase:
1985 """Base class for L{GenericPollJob} reporting callbacks.
1989 """Initializes this class.
1993 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1994 """Handles a log message.
1997 raise NotImplementedError()
1999 def ReportNotChanged(self, job_id, status):
2000 """Called for if a job hasn't changed in a while.
2002 @type job_id: number
2003 @param job_id: Job ID
2004 @type status: string or None
2005 @param status: Job status if available
2008 raise NotImplementedError()
2011 class _LuxiJobPollCb(JobPollCbBase):
2012 def __init__(self, cl):
2013 """Initializes this class.
2016 JobPollCbBase.__init__(self)
2019 def WaitForJobChangeOnce(self, job_id, fields,
2020 prev_job_info, prev_log_serial):
2021 """Waits for changes on a job.
2024 return self.cl.WaitForJobChangeOnce(job_id, fields,
2025 prev_job_info, prev_log_serial)
2027 def QueryJobs(self, job_ids, fields):
2028 """Returns the selected fields for the selected job IDs.
2031 return self.cl.QueryJobs(job_ids, fields)
2034 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2035 def __init__(self, feedback_fn):
2036 """Initializes this class.
2039 JobPollReportCbBase.__init__(self)
2041 self.feedback_fn = feedback_fn
2043 assert callable(feedback_fn)
2045 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2046 """Handles a log message.
2049 self.feedback_fn((timestamp, log_type, log_msg))
2051 def ReportNotChanged(self, job_id, status):
2052 """Called if a job hasn't changed in a while.
2058 class StdioJobPollReportCb(JobPollReportCbBase):
2060 """Initializes this class.
2063 JobPollReportCbBase.__init__(self)
2065 self.notified_queued = False
2066 self.notified_waitlock = False
2068 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2069 """Handles a log message.
2072 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2073 FormatLogMessage(log_type, log_msg))
2075 def ReportNotChanged(self, job_id, status):
2076 """Called if a job hasn't changed in a while.
2082 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2083 ToStderr("Job %s is waiting in queue", job_id)
2084 self.notified_queued = True
2086 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2087 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2088 self.notified_waitlock = True
2091 def FormatLogMessage(log_type, log_msg):
2092 """Formats a job message according to its type.
2095 if log_type != constants.ELOG_MESSAGE:
2096 log_msg = str(log_msg)
2098 return utils.SafeEncode(log_msg)
2101 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2102 """Function to poll for the result of a job.
2104 @type job_id: job identified
2105 @param job_id: the job to poll for results
2106 @type cl: luxi.Client
2107 @param cl: the luxi client to use for communicating with the master;
2108 if None, a new client will be created
2114 if reporter is None:
2116 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2118 reporter = StdioJobPollReportCb()
2120 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2122 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2125 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2126 """Legacy function to submit an opcode.
2128 This is just a simple wrapper over the construction of the processor
2129 instance. It should be extended to better handle feedback and
2130 interaction functions.
2136 SetGenericOpcodeOpts([op], opts)
2138 job_id = SendJob([op], cl=cl)
2140 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2143 return op_results[0]
2146 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2147 """Wrapper around SubmitOpCode or SendJob.
2149 This function will decide, based on the 'opts' parameter, whether to
2150 submit and wait for the result of the opcode (and return it), or
2151 whether to just send the job and print its identifier. It is used in
2152 order to simplify the implementation of the '--submit' option.
2154 It will also process the opcodes if we're sending the via SendJob
2155 (otherwise SubmitOpCode does it).
2158 if opts and opts.submit_only:
2160 SetGenericOpcodeOpts(job, opts)
2161 job_id = SendJob(job, cl=cl)
2162 raise JobSubmittedException(job_id)
2164 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2167 def SetGenericOpcodeOpts(opcode_list, options):
2168 """Processor for generic options.
2170 This function updates the given opcodes based on generic command
2171 line options (like debug, dry-run, etc.).
2173 @param opcode_list: list of opcodes
2174 @param options: command line options or None
2175 @return: None (in-place modification)
2180 for op in opcode_list:
2181 op.debug_level = options.debug
2182 if hasattr(options, "dry_run"):
2183 op.dry_run = options.dry_run
2184 if getattr(options, "priority", None) is not None:
2185 op.priority = options.priority
2188 def GetClient(query=False):
2189 """Connects to the a luxi socket and returns a client.
2191 @type query: boolean
2192 @param query: this signifies that the client will only be
2193 used for queries; if the build-time parameter
2194 enable-split-queries is enabled, then the client will be
2195 connected to the query socket instead of the masterd socket
2198 if query and constants.ENABLE_SPLIT_QUERY:
2199 address = pathutils.QUERY_SOCKET
2202 # TODO: Cache object?
2204 client = luxi.Client(address=address)
2205 except luxi.NoMasterError:
2206 ss = ssconf.SimpleStore()
2208 # Try to read ssconf file
2211 except errors.ConfigurationError:
2212 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2213 " not part of a cluster",
2216 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2217 if master != myself:
2218 raise errors.OpPrereqError("This is not the master node, please connect"
2219 " to node '%s' and rerun the command" %
2220 master, errors.ECODE_INVAL)
2225 def FormatError(err):
2226 """Return a formatted error message for a given error.
2228 This function takes an exception instance and returns a tuple
2229 consisting of two values: first, the recommended exit code, and
2230 second, a string describing the error message (not
2231 newline-terminated).
2237 if isinstance(err, errors.ConfigurationError):
2238 txt = "Corrupt configuration file: %s" % msg
2240 obuf.write(txt + "\n")
2241 obuf.write("Aborting.")
2243 elif isinstance(err, errors.HooksAbort):
2244 obuf.write("Failure: hooks execution failed:\n")
2245 for node, script, out in err.args[0]:
2247 obuf.write(" node: %s, script: %s, output: %s\n" %
2248 (node, script, out))
2250 obuf.write(" node: %s, script: %s (no output)\n" %
2252 elif isinstance(err, errors.HooksFailure):
2253 obuf.write("Failure: hooks general failure: %s" % msg)
2254 elif isinstance(err, errors.ResolverError):
2255 this_host = netutils.Hostname.GetSysName()
2256 if err.args[0] == this_host:
2257 msg = "Failure: can't resolve my own hostname ('%s')"
2259 msg = "Failure: can't resolve hostname '%s'"
2260 obuf.write(msg % err.args[0])
2261 elif isinstance(err, errors.OpPrereqError):
2262 if len(err.args) == 2:
2263 obuf.write("Failure: prerequisites not met for this"
2264 " operation:\nerror type: %s, error details:\n%s" %
2265 (err.args[1], err.args[0]))
2267 obuf.write("Failure: prerequisites not met for this"
2268 " operation:\n%s" % msg)
2269 elif isinstance(err, errors.OpExecError):
2270 obuf.write("Failure: command execution error:\n%s" % msg)
2271 elif isinstance(err, errors.TagError):
2272 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2273 elif isinstance(err, errors.JobQueueDrainError):
2274 obuf.write("Failure: the job queue is marked for drain and doesn't"
2275 " accept new requests\n")
2276 elif isinstance(err, errors.JobQueueFull):
2277 obuf.write("Failure: the job queue is full and doesn't accept new"
2278 " job submissions until old jobs are archived\n")
2279 elif isinstance(err, errors.TypeEnforcementError):
2280 obuf.write("Parameter Error: %s" % msg)
2281 elif isinstance(err, errors.ParameterError):
2282 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2283 elif isinstance(err, luxi.NoMasterError):
2284 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2285 " and listening for connections?")
2286 elif isinstance(err, luxi.TimeoutError):
2287 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2288 " been submitted and will continue to run even if the call"
2289 " timed out. Useful commands in this situation are \"gnt-job"
2290 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2292 elif isinstance(err, luxi.PermissionError):
2293 obuf.write("It seems you don't have permissions to connect to the"
2294 " master daemon.\nPlease retry as a different user.")
2295 elif isinstance(err, luxi.ProtocolError):
2296 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2298 elif isinstance(err, errors.JobLost):
2299 obuf.write("Error checking job status: %s" % msg)
2300 elif isinstance(err, errors.QueryFilterParseError):
2301 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2302 obuf.write("\n".join(err.GetDetails()))
2303 elif isinstance(err, errors.GenericError):
2304 obuf.write("Unhandled Ganeti error: %s" % msg)
2305 elif isinstance(err, JobSubmittedException):
2306 obuf.write("JobID: %s\n" % err.args[0])
2309 obuf.write("Unhandled exception: %s" % msg)
2310 return retcode, obuf.getvalue().rstrip("\n")
2313 def GenericMain(commands, override=None, aliases=None,
2314 env_override=frozenset()):
2315 """Generic main function for all the gnt-* commands.
2317 @param commands: a dictionary with a special structure, see the design doc
2318 for command line handling.
2319 @param override: if not None, we expect a dictionary with keys that will
2320 override command line options; this can be used to pass
2321 options from the scripts to generic functions
2322 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2323 @param env_override: list of environment names which are allowed to submit
2324 default args for commands
2327 # save the program name and the entire command line for later logging
2329 binary = os.path.basename(sys.argv[0])
2331 binary = sys.argv[0]
2333 if len(sys.argv) >= 2:
2334 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2338 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2340 binary = "<unknown program>"
2341 cmdline = "<unknown>"
2347 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2349 except _ShowVersion:
2350 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2351 constants.RELEASE_VERSION)
2352 return constants.EXIT_SUCCESS
2353 except _ShowUsage, err:
2354 for line in _FormatUsage(binary, commands):
2358 return constants.EXIT_FAILURE
2360 return constants.EXIT_SUCCESS
2361 except errors.ParameterError, err:
2362 result, err_msg = FormatError(err)
2366 if func is None: # parse error
2369 if override is not None:
2370 for key, val in override.iteritems():
2371 setattr(options, key, val)
2373 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2374 stderr_logging=True)
2376 logging.info("Command line: %s", cmdline)
2379 result = func(options, args)
2380 except (errors.GenericError, luxi.ProtocolError,
2381 JobSubmittedException), err:
2382 result, err_msg = FormatError(err)
2383 logging.exception("Error during command processing")
2385 except KeyboardInterrupt:
2386 result = constants.EXIT_FAILURE
2387 ToStderr("Aborted. Note that if the operation created any jobs, they"
2388 " might have been submitted and"
2389 " will continue to run in the background.")
2390 except IOError, err:
2391 if err.errno == errno.EPIPE:
2392 # our terminal went away, we'll exit
2393 sys.exit(constants.EXIT_FAILURE)
2400 def ParseNicOption(optvalue):
2401 """Parses the value of the --net option(s).
2405 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2406 except (TypeError, ValueError), err:
2407 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2410 nics = [{}] * nic_max
2411 for nidx, ndict in optvalue:
2414 if not isinstance(ndict, dict):
2415 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2416 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2418 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2425 def GenericInstanceCreate(mode, opts, args):
2426 """Add an instance to the cluster via either creation or import.
2428 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2429 @param opts: the command line options selected by the user
2431 @param args: should contain only one element, the new instance name
2433 @return: the desired exit code
2438 (pnode, snode) = SplitNodeOption(opts.node)
2443 hypervisor, hvparams = opts.hypervisor
2446 nics = ParseNicOption(opts.nics)
2450 elif mode == constants.INSTANCE_CREATE:
2451 # default of one nic, all auto
2457 if opts.disk_template == constants.DT_DISKLESS:
2458 if opts.disks or opts.sd_size is not None:
2459 raise errors.OpPrereqError("Diskless instance but disk"
2460 " information passed", errors.ECODE_INVAL)
2463 if (not opts.disks and not opts.sd_size
2464 and mode == constants.INSTANCE_CREATE):
2465 raise errors.OpPrereqError("No disk information specified",
2467 if opts.disks and opts.sd_size is not None:
2468 raise errors.OpPrereqError("Please use either the '--disk' or"
2469 " '-s' option", errors.ECODE_INVAL)
2470 if opts.sd_size is not None:
2471 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2475 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2476 except ValueError, err:
2477 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2479 disks = [{}] * disk_max
2482 for didx, ddict in opts.disks:
2484 if not isinstance(ddict, dict):
2485 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2486 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2487 elif constants.IDISK_SIZE in ddict:
2488 if constants.IDISK_ADOPT in ddict:
2489 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2490 " (disk %d)" % didx, errors.ECODE_INVAL)
2492 ddict[constants.IDISK_SIZE] = \
2493 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2494 except ValueError, err:
2495 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2496 (didx, err), errors.ECODE_INVAL)
2497 elif constants.IDISK_ADOPT in ddict:
2498 if mode == constants.INSTANCE_IMPORT:
2499 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2500 " import", errors.ECODE_INVAL)
2501 ddict[constants.IDISK_SIZE] = 0
2503 raise errors.OpPrereqError("Missing size or adoption source for"
2504 " disk %d" % didx, errors.ECODE_INVAL)
2507 if opts.tags is not None:
2508 tags = opts.tags.split(",")
2512 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2513 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2515 if mode == constants.INSTANCE_CREATE:
2518 force_variant = opts.force_variant
2521 no_install = opts.no_install
2522 identify_defaults = False
2523 elif mode == constants.INSTANCE_IMPORT:
2526 force_variant = False
2527 src_node = opts.src_node
2528 src_path = opts.src_dir
2530 identify_defaults = opts.identify_defaults
2532 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2534 op = opcodes.OpInstanceCreate(instance_name=instance,
2536 disk_template=opts.disk_template,
2538 conflicts_check=opts.conflicts_check,
2539 pnode=pnode, snode=snode,
2540 ip_check=opts.ip_check,
2541 name_check=opts.name_check,
2542 wait_for_sync=opts.wait_for_sync,
2543 file_storage_dir=opts.file_storage_dir,
2544 file_driver=opts.file_driver,
2545 iallocator=opts.iallocator,
2546 hypervisor=hypervisor,
2548 beparams=opts.beparams,
2549 osparams=opts.osparams,
2553 force_variant=force_variant,
2557 no_install=no_install,
2558 identify_defaults=identify_defaults,
2559 ignore_ipolicy=opts.ignore_ipolicy)
2561 SubmitOrSend(op, opts)
2565 class _RunWhileClusterStoppedHelper:
2566 """Helper class for L{RunWhileClusterStopped} to simplify state management
2569 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2570 """Initializes this class.
2572 @type feedback_fn: callable
2573 @param feedback_fn: Feedback function
2574 @type cluster_name: string
2575 @param cluster_name: Cluster name
2576 @type master_node: string
2577 @param master_node Master node name
2578 @type online_nodes: list
2579 @param online_nodes: List of names of online nodes
2582 self.feedback_fn = feedback_fn
2583 self.cluster_name = cluster_name
2584 self.master_node = master_node
2585 self.online_nodes = online_nodes
2587 self.ssh = ssh.SshRunner(self.cluster_name)
2589 self.nonmaster_nodes = [name for name in online_nodes
2590 if name != master_node]
2592 assert self.master_node not in self.nonmaster_nodes
2594 def _RunCmd(self, node_name, cmd):
2595 """Runs a command on the local or a remote machine.
2597 @type node_name: string
2598 @param node_name: Machine name
2603 if node_name is None or node_name == self.master_node:
2604 # No need to use SSH
2605 result = utils.RunCmd(cmd)
2607 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2608 utils.ShellQuoteArgs(cmd))
2611 errmsg = ["Failed to run command %s" % result.cmd]
2613 errmsg.append("on node %s" % node_name)
2614 errmsg.append(": exitcode %s and error %s" %
2615 (result.exit_code, result.output))
2616 raise errors.OpExecError(" ".join(errmsg))
2618 def Call(self, fn, *args):
2619 """Call function while all daemons are stopped.
2622 @param fn: Function to be called
2625 # Pause watcher by acquiring an exclusive lock on watcher state file
2626 self.feedback_fn("Blocking watcher")
2627 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2629 # TODO: Currently, this just blocks. There's no timeout.
2630 # TODO: Should it be a shared lock?
2631 watcher_block.Exclusive(blocking=True)
2633 # Stop master daemons, so that no new jobs can come in and all running
2635 self.feedback_fn("Stopping master daemons")
2636 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2638 # Stop daemons on all nodes
2639 for node_name in self.online_nodes:
2640 self.feedback_fn("Stopping daemons on %s" % node_name)
2641 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2643 # All daemons are shut down now
2645 return fn(self, *args)
2646 except Exception, err:
2647 _, errmsg = FormatError(err)
2648 logging.exception("Caught exception")
2649 self.feedback_fn(errmsg)
2652 # Start cluster again, master node last
2653 for node_name in self.nonmaster_nodes + [self.master_node]:
2654 self.feedback_fn("Starting daemons on %s" % node_name)
2655 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2658 watcher_block.Close()
2661 def RunWhileClusterStopped(feedback_fn, fn, *args):
2662 """Calls a function while all cluster daemons are stopped.
2664 @type feedback_fn: callable
2665 @param feedback_fn: Feedback function
2667 @param fn: Function to be called when daemons are stopped
2670 feedback_fn("Gathering cluster information")
2672 # This ensures we're running on the master daemon
2675 (cluster_name, master_node) = \
2676 cl.QueryConfigValues(["cluster_name", "master_node"])
2678 online_nodes = GetOnlineNodes([], cl=cl)
2680 # Don't keep a reference to the client. The master daemon will go away.
2683 assert master_node in online_nodes
2685 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2686 online_nodes).Call(fn, *args)
2689 def GenerateTable(headers, fields, separator, data,
2690 numfields=None, unitfields=None,
2692 """Prints a table with headers and different fields.
2695 @param headers: dictionary mapping field names to headers for
2698 @param fields: the field names corresponding to each row in
2700 @param separator: the separator to be used; if this is None,
2701 the default 'smart' algorithm is used which computes optimal
2702 field width, otherwise just the separator is used between
2705 @param data: a list of lists, each sublist being one row to be output
2706 @type numfields: list
2707 @param numfields: a list with the fields that hold numeric
2708 values and thus should be right-aligned
2709 @type unitfields: list
2710 @param unitfields: a list with the fields that hold numeric
2711 values that should be formatted with the units field
2712 @type units: string or None
2713 @param units: the units we should use for formatting, or None for
2714 automatic choice (human-readable for non-separator usage, otherwise
2715 megabytes); this is a one-letter string
2724 if numfields is None:
2726 if unitfields is None:
2729 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2730 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2733 for field in fields:
2734 if headers and field not in headers:
2735 # TODO: handle better unknown fields (either revert to old
2736 # style of raising exception, or deal more intelligently with
2738 headers[field] = field
2739 if separator is not None:
2740 format_fields.append("%s")
2741 elif numfields.Matches(field):
2742 format_fields.append("%*s")
2744 format_fields.append("%-*s")
2746 if separator is None:
2747 mlens = [0 for name in fields]
2748 format_str = " ".join(format_fields)
2750 format_str = separator.replace("%", "%%").join(format_fields)
2755 for idx, val in enumerate(row):
2756 if unitfields.Matches(fields[idx]):
2759 except (TypeError, ValueError):
2762 val = row[idx] = utils.FormatUnit(val, units)
2763 val = row[idx] = str(val)
2764 if separator is None:
2765 mlens[idx] = max(mlens[idx], len(val))
2770 for idx, name in enumerate(fields):
2772 if separator is None:
2773 mlens[idx] = max(mlens[idx], len(hdr))
2774 args.append(mlens[idx])
2776 result.append(format_str % tuple(args))
2778 if separator is None:
2779 assert len(mlens) == len(fields)
2781 if fields and not numfields.Matches(fields[-1]):
2787 line = ["-" for _ in fields]
2788 for idx in range(len(fields)):
2789 if separator is None:
2790 args.append(mlens[idx])
2791 args.append(line[idx])
2792 result.append(format_str % tuple(args))
2797 def _FormatBool(value):
2798 """Formats a boolean value as a string.
2806 #: Default formatting for query results; (callback, align right)
2807 _DEFAULT_FORMAT_QUERY = {
2808 constants.QFT_TEXT: (str, False),
2809 constants.QFT_BOOL: (_FormatBool, False),
2810 constants.QFT_NUMBER: (str, True),
2811 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2812 constants.QFT_OTHER: (str, False),
2813 constants.QFT_UNKNOWN: (str, False),
2817 def _GetColumnFormatter(fdef, override, unit):
2818 """Returns formatting function for a field.
2820 @type fdef: L{objects.QueryFieldDefinition}
2821 @type override: dict
2822 @param override: Dictionary for overriding field formatting functions,
2823 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2825 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2826 @rtype: tuple; (callable, bool)
2827 @return: Returns the function to format a value (takes one parameter) and a
2828 boolean for aligning the value on the right-hand side
2831 fmt = override.get(fdef.name, None)
2835 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2837 if fdef.kind == constants.QFT_UNIT:
2838 # Can't keep this information in the static dictionary
2839 return (lambda value: utils.FormatUnit(value, unit), True)
2841 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2845 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2848 class _QueryColumnFormatter:
2849 """Callable class for formatting fields of a query.
2852 def __init__(self, fn, status_fn, verbose):
2853 """Initializes this class.
2856 @param fn: Formatting function
2857 @type status_fn: callable
2858 @param status_fn: Function to report fields' status
2859 @type verbose: boolean
2860 @param verbose: whether to use verbose field descriptions or not
2864 self._status_fn = status_fn
2865 self._verbose = verbose
2867 def __call__(self, data):
2868 """Returns a field's string representation.
2871 (status, value) = data
2874 self._status_fn(status)
2876 if status == constants.RS_NORMAL:
2877 return self._fn(value)
2879 assert value is None, \
2880 "Found value %r for abnormal status %s" % (value, status)
2882 return FormatResultError(status, self._verbose)
2885 def FormatResultError(status, verbose):
2886 """Formats result status other than L{constants.RS_NORMAL}.
2888 @param status: The result status
2889 @type verbose: boolean
2890 @param verbose: Whether to return the verbose text
2891 @return: Text of result status
2894 assert status != constants.RS_NORMAL, \
2895 "FormatResultError called with status equal to constants.RS_NORMAL"
2897 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2899 raise NotImplementedError("Unknown status %s" % status)
2906 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2907 header=False, verbose=False):
2908 """Formats data in L{objects.QueryResponse}.
2910 @type result: L{objects.QueryResponse}
2911 @param result: result of query operation
2913 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2914 see L{utils.text.FormatUnit}
2915 @type format_override: dict
2916 @param format_override: Dictionary for overriding field formatting functions,
2917 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2918 @type separator: string or None
2919 @param separator: String used to separate fields
2921 @param header: Whether to output header row
2922 @type verbose: boolean
2923 @param verbose: whether to use verbose field descriptions or not
2932 if format_override is None:
2933 format_override = {}
2935 stats = dict.fromkeys(constants.RS_ALL, 0)
2937 def _RecordStatus(status):
2942 for fdef in result.fields:
2943 assert fdef.title and fdef.name
2944 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2945 columns.append(TableColumn(fdef.title,
2946 _QueryColumnFormatter(fn, _RecordStatus,
2950 table = FormatTable(result.data, columns, header, separator)
2952 # Collect statistics
2953 assert len(stats) == len(constants.RS_ALL)
2954 assert compat.all(count >= 0 for count in stats.values())
2956 # Determine overall status. If there was no data, unknown fields must be
2957 # detected via the field definitions.
2958 if (stats[constants.RS_UNKNOWN] or
2959 (not result.data and _GetUnknownFields(result.fields))):
2961 elif compat.any(count > 0 for key, count in stats.items()
2962 if key != constants.RS_NORMAL):
2963 status = QR_INCOMPLETE
2967 return (status, table)
2970 def _GetUnknownFields(fdefs):
2971 """Returns list of unknown fields included in C{fdefs}.
2973 @type fdefs: list of L{objects.QueryFieldDefinition}
2976 return [fdef for fdef in fdefs
2977 if fdef.kind == constants.QFT_UNKNOWN]
2980 def _WarnUnknownFields(fdefs):
2981 """Prints a warning to stderr if a query included unknown fields.
2983 @type fdefs: list of L{objects.QueryFieldDefinition}
2986 unknown = _GetUnknownFields(fdefs)
2988 ToStderr("Warning: Queried for unknown fields %s",
2989 utils.CommaJoin(fdef.name for fdef in unknown))
2995 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2996 format_override=None, verbose=False, force_filter=False,
2997 namefield=None, qfilter=None, isnumeric=False):
2998 """Generic implementation for listing all items of a resource.
3000 @param resource: One of L{constants.QR_VIA_LUXI}
3001 @type fields: list of strings
3002 @param fields: List of fields to query for
3003 @type names: list of strings
3004 @param names: Names of items to query for
3005 @type unit: string or None
3006 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3007 None for automatic choice (human-readable for non-separator usage,
3008 otherwise megabytes); this is a one-letter string
3009 @type separator: string or None
3010 @param separator: String used to separate fields
3012 @param header: Whether to show header row
3013 @type force_filter: bool
3014 @param force_filter: Whether to always treat names as filter
3015 @type format_override: dict
3016 @param format_override: Dictionary for overriding field formatting functions,
3017 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3018 @type verbose: boolean
3019 @param verbose: whether to use verbose field descriptions or not
3020 @type namefield: string
3021 @param namefield: Name of field to use for simple filters (see
3022 L{qlang.MakeFilter} for details)
3023 @type qfilter: list or None
3024 @param qfilter: Query filter (in addition to names)
3025 @param isnumeric: bool
3026 @param isnumeric: Whether the namefield's type is numeric, and therefore
3027 any simple filters built by namefield should use integer values to
3034 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3035 isnumeric=isnumeric)
3038 qfilter = namefilter
3039 elif namefilter is not None:
3040 qfilter = [qlang.OP_AND, namefilter, qfilter]
3045 response = cl.Query(resource, fields, qfilter)
3047 found_unknown = _WarnUnknownFields(response.fields)
3049 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3051 format_override=format_override,
3057 assert ((found_unknown and status == QR_UNKNOWN) or
3058 (not found_unknown and status != QR_UNKNOWN))
3060 if status == QR_UNKNOWN:
3061 return constants.EXIT_UNKNOWN_FIELD
3063 # TODO: Should the list command fail if not all data could be collected?
3064 return constants.EXIT_SUCCESS
3067 def GenericListFields(resource, fields, separator, header, cl=None):
3068 """Generic implementation for listing fields for a resource.
3070 @param resource: One of L{constants.QR_VIA_LUXI}
3071 @type fields: list of strings
3072 @param fields: List of fields to query for
3073 @type separator: string or None
3074 @param separator: String used to separate fields
3076 @param header: Whether to show header row
3085 response = cl.QueryFields(resource, fields)
3087 found_unknown = _WarnUnknownFields(response.fields)
3090 TableColumn("Name", str, False),
3091 TableColumn("Title", str, False),
3092 TableColumn("Description", str, False),
3095 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3097 for line in FormatTable(rows, columns, header, separator):
3101 return constants.EXIT_UNKNOWN_FIELD
3103 return constants.EXIT_SUCCESS
3107 """Describes a column for L{FormatTable}.
3110 def __init__(self, title, fn, align_right):
3111 """Initializes this class.
3114 @param title: Column title
3116 @param fn: Formatting function
3117 @type align_right: bool
3118 @param align_right: Whether to align values on the right-hand side
3123 self.align_right = align_right
3126 def _GetColFormatString(width, align_right):
3127 """Returns the format string for a field.
3135 return "%%%s%ss" % (sign, width)
3138 def FormatTable(rows, columns, header, separator):
3139 """Formats data as a table.
3141 @type rows: list of lists
3142 @param rows: Row data, one list per row
3143 @type columns: list of L{TableColumn}
3144 @param columns: Column descriptions
3146 @param header: Whether to show header row
3147 @type separator: string or None
3148 @param separator: String used to separate columns
3152 data = [[col.title for col in columns]]
3153 colwidth = [len(col.title) for col in columns]
3156 colwidth = [0 for _ in columns]
3160 assert len(row) == len(columns)
3162 formatted = [col.format(value) for value, col in zip(row, columns)]
3164 if separator is None:
3165 # Update column widths
3166 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3167 # Modifying a list's items while iterating is fine
3168 colwidth[idx] = max(oldwidth, len(value))
3170 data.append(formatted)
3172 if separator is not None:
3173 # Return early if a separator is used
3174 return [separator.join(row) for row in data]
3176 if columns and not columns[-1].align_right:
3177 # Avoid unnecessary spaces at end of line
3180 # Build format string
3181 fmt = " ".join([_GetColFormatString(width, col.align_right)
3182 for col, width in zip(columns, colwidth)])
3184 return [fmt % tuple(row) for row in data]
3187 def FormatTimestamp(ts):
3188 """Formats a given timestamp.
3191 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3194 @return: a string with the formatted timestamp
3197 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3201 return utils.FormatTime(sec, usecs=usecs)
3204 def ParseTimespec(value):
3205 """Parse a time specification.
3207 The following suffixed will be recognized:
3215 Without any suffix, the value will be taken to be in seconds.
3220 raise errors.OpPrereqError("Empty time specification passed",
3229 if value[-1] not in suffix_map:
3232 except (TypeError, ValueError):
3233 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3236 multiplier = suffix_map[value[-1]]
3238 if not value: # no data left after stripping the suffix
3239 raise errors.OpPrereqError("Invalid time specification (only"
3240 " suffix passed)", errors.ECODE_INVAL)
3242 value = int(value) * multiplier
3243 except (TypeError, ValueError):
3244 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3249 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3250 filter_master=False, nodegroup=None):
3251 """Returns the names of online nodes.
3253 This function will also log a warning on stderr with the names of
3256 @param nodes: if not empty, use only this subset of nodes (minus the
3258 @param cl: if not None, luxi client to use
3259 @type nowarn: boolean
3260 @param nowarn: by default, this function will output a note with the
3261 offline nodes that are skipped; if this parameter is True the
3262 note is not displayed
3263 @type secondary_ips: boolean
3264 @param secondary_ips: if True, return the secondary IPs instead of the
3265 names, useful for doing network traffic over the replication interface
3267 @type filter_master: boolean
3268 @param filter_master: if True, do not return the master node in the list
3269 (useful in coordination with secondary_ips where we cannot check our
3270 node name against the list)
3271 @type nodegroup: string
3272 @param nodegroup: If set, only return nodes in this node group
3281 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3283 if nodegroup is not None:
3284 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3285 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3288 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3291 if len(qfilter) > 1:
3292 final_filter = [qlang.OP_AND] + qfilter
3294 assert len(qfilter) == 1
3295 final_filter = qfilter[0]
3299 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3301 def _IsOffline(row):
3302 (_, (_, offline), _) = row
3306 ((_, name), _, _) = row
3310 (_, _, (_, sip)) = row
3313 (offline, online) = compat.partition(result.data, _IsOffline)
3315 if offline and not nowarn:
3316 ToStderr("Note: skipping offline node(s): %s" %
3317 utils.CommaJoin(map(_GetName, offline)))
3324 return map(fn, online)
3327 def _ToStream(stream, txt, *args):
3328 """Write a message to a stream, bypassing the logging system
3330 @type stream: file object
3331 @param stream: the file to which we should write
3333 @param txt: the message
3339 stream.write(txt % args)
3344 except IOError, err:
3345 if err.errno == errno.EPIPE:
3346 # our terminal went away, we'll exit
3347 sys.exit(constants.EXIT_FAILURE)
3352 def ToStdout(txt, *args):
3353 """Write a message to stdout only, bypassing the logging system
3355 This is just a wrapper over _ToStream.
3358 @param txt: the message
3361 _ToStream(sys.stdout, txt, *args)
3364 def ToStderr(txt, *args):
3365 """Write a message to stderr only, bypassing the logging system
3367 This is just a wrapper over _ToStream.
3370 @param txt: the message
3373 _ToStream(sys.stderr, txt, *args)
3376 class JobExecutor(object):
3377 """Class which manages the submission and execution of multiple jobs.
3379 Note that instances of this class should not be reused between
3383 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3388 self.verbose = verbose
3391 self.feedback_fn = feedback_fn
3392 self._counter = itertools.count()
3395 def _IfName(name, fmt):
3396 """Helper function for formatting name.
3404 def QueueJob(self, name, *ops):
3405 """Record a job for later submit.
3408 @param name: a description of the job, will be used in WaitJobSet
3411 SetGenericOpcodeOpts(ops, self.opts)
3412 self.queue.append((self._counter.next(), name, ops))
3414 def AddJobId(self, name, status, job_id):
3415 """Adds a job ID to the internal queue.
3418 self.jobs.append((self._counter.next(), status, job_id, name))
3420 def SubmitPending(self, each=False):
3421 """Submit all pending jobs.
3426 for (_, _, ops) in self.queue:
3427 # SubmitJob will remove the success status, but raise an exception if
3428 # the submission fails, so we'll notice that anyway.
3429 results.append([True, self.cl.SubmitJob(ops)[0]])
3431 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3432 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3433 self.jobs.append((idx, status, data, name))
3435 def _ChooseJob(self):
3436 """Choose a non-waiting/queued job to poll next.
3439 assert self.jobs, "_ChooseJob called with empty job list"
3441 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3445 for job_data, status in zip(self.jobs, result):
3446 if (isinstance(status, list) and status and
3447 status[0] in (constants.JOB_STATUS_QUEUED,
3448 constants.JOB_STATUS_WAITING,
3449 constants.JOB_STATUS_CANCELING)):
3450 # job is still present and waiting
3452 # good candidate found (either running job or lost job)
3453 self.jobs.remove(job_data)
3457 return self.jobs.pop(0)
3459 def GetResults(self):
3460 """Wait for and return the results of all jobs.
3463 @return: list of tuples (success, job results), in the same order
3464 as the submitted jobs; if a job has failed, instead of the result
3465 there will be the error message
3469 self.SubmitPending()
3472 ok_jobs = [row[2] for row in self.jobs if row[1]]
3474 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3476 # first, remove any non-submitted jobs
3477 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3478 for idx, _, jid, name in failures:
3479 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3480 results.append((idx, False, jid))
3483 (idx, _, jid, name) = self._ChooseJob()
3484 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3486 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3488 except errors.JobLost, err:
3489 _, job_result = FormatError(err)
3490 ToStderr("Job %s%s has been archived, cannot check its result",
3491 jid, self._IfName(name, " for %s"))
3493 except (errors.GenericError, luxi.ProtocolError), err:
3494 _, job_result = FormatError(err)
3496 # the error message will always be shown, verbose or not
3497 ToStderr("Job %s%s has failed: %s",
3498 jid, self._IfName(name, " for %s"), job_result)
3500 results.append((idx, success, job_result))
3502 # sort based on the index, then drop it
3504 results = [i[1:] for i in results]
3508 def WaitOrShow(self, wait):
3509 """Wait for job results or only print the job IDs.
3512 @param wait: whether to wait or not
3516 return self.GetResults()
3519 self.SubmitPending()
3520 for _, status, result, name in self.jobs:
3522 ToStdout("%s: %s", result, name)
3524 ToStderr("Failure for %s: %s", name, result)
3525 return [row[1:3] for row in self.jobs]
3528 def FormatParameterDict(buf, param_dict, actual, level=1):
3529 """Formats a parameter dictionary.
3531 @type buf: L{StringIO}
3532 @param buf: the buffer into which to write
3533 @type param_dict: dict
3534 @param param_dict: the own parameters
3536 @param actual: the current parameter set (including defaults)
3537 @param level: Level of indent
3540 indent = " " * level
3542 for key in sorted(actual):
3544 buf.write("%s- %s:" % (indent, key))
3546 if isinstance(data, dict) and data:
3548 FormatParameterDict(buf, param_dict.get(key, {}), data,
3551 val = param_dict.get(key, "default (%s)" % data)
3552 buf.write(" %s\n" % val)
3555 def ConfirmOperation(names, list_type, text, extra=""):
3556 """Ask the user to confirm an operation on a list of list_type.
3558 This function is used to request confirmation for doing an operation
3559 on a given list of list_type.
3562 @param names: the list of names that we display when
3563 we ask for confirmation
3564 @type list_type: str
3565 @param list_type: Human readable name for elements in the list (e.g. nodes)
3567 @param text: the operation that the user should confirm
3569 @return: True or False depending on user's confirmation.
3573 msg = ("The %s will operate on %d %s.\n%s"
3574 "Do you want to continue?" % (text, count, list_type, extra))
3575 affected = (("\nAffected %s:\n" % list_type) +
3576 "\n".join([" %s" % name for name in names]))
3578 choices = [("y", True, "Yes, execute the %s" % text),
3579 ("n", False, "No, abort the %s" % text)]
3582 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3585 question = msg + affected
3587 choice = AskUser(question, choices)
3590 choice = AskUser(msg + affected, choices)
3594 def _MaybeParseUnit(elements):
3595 """Parses and returns an array of potential values with units.
3599 for k, v in elements.items():
3600 if v == constants.VALUE_DEFAULT:
3603 parsed[k] = utils.ParseUnit(v)
3607 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3608 ispecs_cpu_count=None,
3609 ispecs_disk_count=None,
3610 ispecs_disk_size=None,
3611 ispecs_nic_count=None,
3612 ipolicy_disk_templates=None,
3613 ipolicy_vcpu_ratio=None,
3614 ipolicy_spindle_ratio=None,
3615 group_ipolicy=False,
3616 allowed_values=None,
3618 """Creation of instance policy based on command line options.
3620 @param fill_all: whether for cluster policies we should ensure that
3621 all values are filled
3627 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3628 if ispecs_disk_size:
3629 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3630 except (TypeError, ValueError, errors.UnitParseError), err:
3631 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3633 (ispecs_disk_size, ispecs_mem_size, err),
3636 # prepare ipolicy dict
3637 ipolicy_transposed = {
3638 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3639 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3640 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3641 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3642 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3645 # first, check that the values given are correct
3647 forced_type = TISPECS_GROUP_TYPES
3649 forced_type = TISPECS_CLUSTER_TYPES
3651 for specs in ipolicy_transposed.values():
3652 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3655 ipolicy_out = objects.MakeEmptyIPolicy()
3656 for name, specs in ipolicy_transposed.iteritems():
3657 assert name in constants.ISPECS_PARAMETERS
3658 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3659 ipolicy_out[key][name] = val
3661 # no filldict for non-dicts
3662 if not group_ipolicy and fill_all:
3663 if ipolicy_disk_templates is None:
3664 ipolicy_disk_templates = constants.DISK_TEMPLATES
3665 if ipolicy_vcpu_ratio is None:
3666 ipolicy_vcpu_ratio = \
3667 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3668 if ipolicy_spindle_ratio is None:
3669 ipolicy_spindle_ratio = \
3670 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3671 if ipolicy_disk_templates is not None:
3672 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3673 if ipolicy_vcpu_ratio is not None:
3674 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3675 if ipolicy_spindle_ratio is not None:
3676 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3678 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)