4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
88 "FILESTORE_DRIVER_OPT",
96 "GLOBAL_SHARED_FILEDIR_OPT",
101 "DEFAULT_IALLOCATOR_OPT",
102 "IDENTIFY_DEFAULTS_OPT",
103 "IGNORE_CONSIST_OPT",
105 "IGNORE_FAILURES_OPT",
106 "IGNORE_OFFLINE_OPT",
107 "IGNORE_REMOVE_FAILURES_OPT",
108 "IGNORE_SECONDARIES_OPT",
112 "MAINTAIN_NODE_HEALTH_OPT",
114 "MASTER_NETMASK_OPT",
116 "MIGRATION_MODE_OPT",
121 "NEW_CLUSTER_CERT_OPT",
122 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123 "NEW_CONFD_HMAC_KEY_OPT",
126 "NEW_SPICE_CERT_OPT",
128 "NOCONFLICTSCHECK_OPT",
129 "NODE_FORCE_JOIN_OPT",
131 "NODE_PLACEMENT_OPT",
135 "NODRBD_STORAGE_OPT",
141 "NOMODIFY_ETCHOSTS_OPT",
142 "NOMODIFY_SSH_SETUP_OPT",
146 "NORUNTIME_CHGS_OPT",
149 "NOSSH_KEYCHECK_OPT",
163 "PREALLOC_WIPE_DISKS_OPT",
164 "PRIMARY_IP_VERSION_OPT",
170 "REMOVE_INSTANCE_OPT",
171 "REMOVE_RESERVED_IPS_OPT",
177 "SECONDARY_ONLY_OPT",
182 "SHUTDOWN_TIMEOUT_OPT",
184 "SPECS_CPU_COUNT_OPT",
185 "SPECS_DISK_COUNT_OPT",
186 "SPECS_DISK_SIZE_OPT",
187 "SPECS_MEM_SIZE_OPT",
188 "SPECS_NIC_COUNT_OPT",
189 "IPOLICY_DISK_TEMPLATES",
190 "IPOLICY_VCPU_RATIO",
196 "STARTUP_PAUSED_OPT",
205 "USE_EXTERNAL_MIP_SCRIPT",
213 "IGNORE_IPOLICY_OPT",
214 "INSTANCE_POLICY_OPTS",
215 # Generic functions for CLI programs
217 "CreateIPolicyFromOpts",
219 "GenericInstanceCreate",
225 "JobSubmittedException",
227 "RunWhileClusterStopped",
231 # Formatting functions
232 "ToStderr", "ToStdout",
235 "FormatParameterDict",
244 # command line options support infrastructure
245 "ARGS_MANY_INSTANCES",
248 "ARGS_MANY_NETWORKS",
268 "OPT_COMPL_INST_ADD_NODES",
269 "OPT_COMPL_MANY_NODES",
270 "OPT_COMPL_ONE_IALLOCATOR",
271 "OPT_COMPL_ONE_INSTANCE",
272 "OPT_COMPL_ONE_NODE",
273 "OPT_COMPL_ONE_NODEGROUP",
274 "OPT_COMPL_ONE_NETWORK",
276 "OPT_COMPL_ONE_EXTSTORAGE",
281 "COMMON_CREATE_OPTS",
287 #: Priorities (sorted)
289 ("low", constants.OP_PRIO_LOW),
290 ("normal", constants.OP_PRIO_NORMAL),
291 ("high", constants.OP_PRIO_HIGH),
294 #: Priority dictionary for easier lookup
295 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
296 # we migrate to Python 2.6
297 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
299 # Query result status for clients
302 QR_INCOMPLETE) = range(3)
304 #: Maximum batch size for ChooseJob
308 # constants used to create InstancePolicy dictionary
309 TISPECS_GROUP_TYPES = {
310 constants.ISPECS_MIN: constants.VTYPE_INT,
311 constants.ISPECS_MAX: constants.VTYPE_INT,
314 TISPECS_CLUSTER_TYPES = {
315 constants.ISPECS_MIN: constants.VTYPE_INT,
316 constants.ISPECS_MAX: constants.VTYPE_INT,
317 constants.ISPECS_STD: constants.VTYPE_INT,
320 #: User-friendly names for query2 field types
322 constants.QFT_UNKNOWN: "Unknown",
323 constants.QFT_TEXT: "Text",
324 constants.QFT_BOOL: "Boolean",
325 constants.QFT_NUMBER: "Number",
326 constants.QFT_UNIT: "Storage size",
327 constants.QFT_TIMESTAMP: "Timestamp",
328 constants.QFT_OTHER: "Custom",
333 def __init__(self, min=0, max=None): # pylint: disable=W0622
338 return ("<%s min=%s max=%s>" %
339 (self.__class__.__name__, self.min, self.max))
342 class ArgSuggest(_Argument):
343 """Suggesting argument.
345 Value can be any of the ones passed to the constructor.
348 # pylint: disable=W0622
349 def __init__(self, min=0, max=None, choices=None):
350 _Argument.__init__(self, min=min, max=max)
351 self.choices = choices
354 return ("<%s min=%s max=%s choices=%r>" %
355 (self.__class__.__name__, self.min, self.max, self.choices))
358 class ArgChoice(ArgSuggest):
361 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
362 but value must be one of the choices.
367 class ArgUnknown(_Argument):
368 """Unknown argument to program (e.g. determined at runtime).
373 class ArgInstance(_Argument):
374 """Instances argument.
379 class ArgNode(_Argument):
385 class ArgNetwork(_Argument):
391 class ArgGroup(_Argument):
392 """Node group argument.
397 class ArgJobId(_Argument):
403 class ArgFile(_Argument):
404 """File path argument.
409 class ArgCommand(_Argument):
415 class ArgHost(_Argument):
421 class ArgOs(_Argument):
427 class ArgExtStorage(_Argument):
428 """ExtStorage argument.
434 ARGS_MANY_INSTANCES = [ArgInstance()]
435 ARGS_MANY_NETWORKS = [ArgNetwork()]
436 ARGS_MANY_NODES = [ArgNode()]
437 ARGS_MANY_GROUPS = [ArgGroup()]
438 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
439 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
440 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
442 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
443 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
446 def _ExtractTagsObject(opts, args):
447 """Extract the tag type object.
449 Note that this function will modify its args parameter.
452 if not hasattr(opts, "tag_type"):
453 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
455 if kind == constants.TAG_CLUSTER:
457 elif kind in (constants.TAG_NODEGROUP,
459 constants.TAG_NETWORK,
460 constants.TAG_INSTANCE):
462 raise errors.OpPrereqError("no arguments passed to the command",
467 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
471 def _ExtendTags(opts, args):
472 """Extend the args if a source file has been given.
474 This function will extend the tags with the contents of the file
475 passed in the 'tags_source' attribute of the opts parameter. A file
476 named '-' will be replaced by stdin.
479 fname = opts.tags_source
485 new_fh = open(fname, "r")
488 # we don't use the nice 'new_data = [line.strip() for line in fh]'
489 # because of python bug 1633941
491 line = new_fh.readline()
494 new_data.append(line.strip())
497 args.extend(new_data)
500 def ListTags(opts, args):
501 """List the tags on a given object.
503 This is a generic implementation that knows how to deal with all
504 three cases of tag objects (cluster, node, instance). The opts
505 argument is expected to contain a tag_type field denoting what
506 object type we work on.
509 kind, name = _ExtractTagsObject(opts, args)
510 cl = GetClient(query=True)
511 result = cl.QueryTags(kind, name)
512 result = list(result)
518 def AddTags(opts, args):
519 """Add tags on a given object.
521 This is a generic implementation that knows how to deal with all
522 three cases of tag objects (cluster, node, instance). The opts
523 argument is expected to contain a tag_type field denoting what
524 object type we work on.
527 kind, name = _ExtractTagsObject(opts, args)
528 _ExtendTags(opts, args)
530 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
531 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
532 SubmitOrSend(op, opts)
535 def RemoveTags(opts, args):
536 """Remove tags from a given object.
538 This is a generic implementation that knows how to deal with all
539 three cases of tag objects (cluster, node, instance). The opts
540 argument is expected to contain a tag_type field denoting what
541 object type we work on.
544 kind, name = _ExtractTagsObject(opts, args)
545 _ExtendTags(opts, args)
547 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
548 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
549 SubmitOrSend(op, opts)
552 def check_unit(option, opt, value): # pylint: disable=W0613
553 """OptParsers custom converter for units.
557 return utils.ParseUnit(value)
558 except errors.UnitParseError, err:
559 raise OptionValueError("option %s: %s" % (opt, err))
562 def _SplitKeyVal(opt, data):
563 """Convert a KeyVal string into a dict.
565 This function will convert a key=val[,...] string into a dict. Empty
566 values will be converted specially: keys which have the prefix 'no_'
567 will have the value=False and the prefix stripped, the others will
571 @param opt: a string holding the option name for which we process the
572 data, used in building error messages
574 @param data: a string of the format key=val,key=val,...
576 @return: {key=val, key=val}
577 @raises errors.ParameterError: if there are duplicate keys
582 for elem in utils.UnescapeAndSplit(data, sep=","):
584 key, val = elem.split("=", 1)
586 if elem.startswith(NO_PREFIX):
587 key, val = elem[len(NO_PREFIX):], False
588 elif elem.startswith(UN_PREFIX):
589 key, val = elem[len(UN_PREFIX):], None
591 key, val = elem, True
593 raise errors.ParameterError("Duplicate key '%s' in option %s" %
599 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
600 """Custom parser for ident:key=val,key=val options.
602 This will store the parsed values as a tuple (ident, {key: val}). As such,
603 multiple uses of this option via action=append is possible.
607 ident, rest = value, ""
609 ident, rest = value.split(":", 1)
611 if ident.startswith(NO_PREFIX):
613 msg = "Cannot pass options when removing parameter groups: %s" % value
614 raise errors.ParameterError(msg)
615 retval = (ident[len(NO_PREFIX):], False)
616 elif (ident.startswith(UN_PREFIX) and
617 (len(ident) <= len(UN_PREFIX) or
618 not ident[len(UN_PREFIX)][0].isdigit())):
620 msg = "Cannot pass options when removing parameter groups: %s" % value
621 raise errors.ParameterError(msg)
622 retval = (ident[len(UN_PREFIX):], None)
624 kv_dict = _SplitKeyVal(opt, rest)
625 retval = (ident, kv_dict)
629 def check_key_val(option, opt, value): # pylint: disable=W0613
630 """Custom parser class for key=val,key=val options.
632 This will store the parsed values as a dict {key: val}.
635 return _SplitKeyVal(opt, value)
638 def check_bool(option, opt, value): # pylint: disable=W0613
639 """Custom parser for yes/no options.
641 This will store the parsed value as either True or False.
644 value = value.lower()
645 if value == constants.VALUE_FALSE or value == "no":
647 elif value == constants.VALUE_TRUE or value == "yes":
650 raise errors.ParameterError("Invalid boolean value '%s'" % value)
653 def check_list(option, opt, value): # pylint: disable=W0613
654 """Custom parser for comma-separated lists.
657 # we have to make this explicit check since "".split(",") is [""],
658 # not an empty list :(
662 return utils.UnescapeAndSplit(value)
665 def check_maybefloat(option, opt, value): # pylint: disable=W0613
666 """Custom parser for float numbers which might be also defaults.
669 value = value.lower()
671 if value == constants.VALUE_DEFAULT:
677 # completion_suggestion is normally a list. Using numeric values not evaluating
678 # to False for dynamic completion.
679 (OPT_COMPL_MANY_NODES,
681 OPT_COMPL_ONE_INSTANCE,
683 OPT_COMPL_ONE_EXTSTORAGE,
684 OPT_COMPL_ONE_IALLOCATOR,
685 OPT_COMPL_ONE_NETWORK,
686 OPT_COMPL_INST_ADD_NODES,
687 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
689 OPT_COMPL_ALL = compat.UniqueFrozenset([
690 OPT_COMPL_MANY_NODES,
692 OPT_COMPL_ONE_INSTANCE,
694 OPT_COMPL_ONE_EXTSTORAGE,
695 OPT_COMPL_ONE_IALLOCATOR,
696 OPT_COMPL_ONE_NETWORK,
697 OPT_COMPL_INST_ADD_NODES,
698 OPT_COMPL_ONE_NODEGROUP,
702 class CliOption(Option):
703 """Custom option class for optparse.
706 ATTRS = Option.ATTRS + [
707 "completion_suggest",
709 TYPES = Option.TYPES + (
717 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
718 TYPE_CHECKER["identkeyval"] = check_ident_key_val
719 TYPE_CHECKER["keyval"] = check_key_val
720 TYPE_CHECKER["unit"] = check_unit
721 TYPE_CHECKER["bool"] = check_bool
722 TYPE_CHECKER["list"] = check_list
723 TYPE_CHECKER["maybefloat"] = check_maybefloat
726 # optparse.py sets make_option, so we do it for our own option class, too
727 cli_option = CliOption
732 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
733 help="Increase debugging level")
735 NOHDR_OPT = cli_option("--no-headers", default=False,
736 action="store_true", dest="no_headers",
737 help="Don't display column headers")
739 SEP_OPT = cli_option("--separator", default=None,
740 action="store", dest="separator",
741 help=("Separator between output fields"
742 " (defaults to one space)"))
744 USEUNITS_OPT = cli_option("--units", default=None,
745 dest="units", choices=("h", "m", "g", "t"),
746 help="Specify units for output (one of h/m/g/t)")
748 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
749 type="string", metavar="FIELDS",
750 help="Comma separated list of output fields")
752 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
753 default=False, help="Force the operation")
755 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
756 default=False, help="Do not require confirmation")
758 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
759 action="store_true", default=False,
760 help=("Ignore offline nodes and do as much"
763 TAG_ADD_OPT = cli_option("--tags", dest="tags",
764 default=None, help="Comma-separated list of instance"
767 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
768 default=None, help="File with tag names")
770 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
771 default=False, action="store_true",
772 help=("Submit the job and return the job ID, but"
773 " don't wait for the job to finish"))
775 SYNC_OPT = cli_option("--sync", dest="do_locking",
776 default=False, action="store_true",
777 help=("Grab locks while doing the queries"
778 " in order to ensure more consistent results"))
780 DRY_RUN_OPT = cli_option("--dry-run", default=False,
782 help=("Do not execute the operation, just run the"
783 " check steps and verify if it could be"
786 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
788 help="Increase the verbosity of the operation")
790 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
791 action="store_true", dest="simulate_errors",
792 help="Debugging option that makes the operation"
793 " treat most runtime checks as failed")
795 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
796 default=True, action="store_false",
797 help="Don't wait for sync (DANGEROUS!)")
799 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
800 default=False, action="store_true",
801 help="Wait for disks to sync")
803 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
804 action="store_true", default=False,
805 help="Enable offline instance")
807 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
808 action="store_true", default=False,
809 help="Disable down instance")
811 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
812 help=("Custom disk setup (%s)" %
813 utils.CommaJoin(constants.DISK_TEMPLATES)),
814 default=None, metavar="TEMPL",
815 choices=list(constants.DISK_TEMPLATES))
817 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
818 help="Do not create any network cards for"
821 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
822 help="Relative path under default cluster-wide"
823 " file storage dir to store file-based disks",
824 default=None, metavar="<DIR>")
826 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
827 help="Driver to use for image files",
828 default="loop", metavar="<DRIVER>",
829 choices=list(constants.FILE_DRIVER))
831 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
832 help="Select nodes for the instance automatically"
833 " using the <NAME> iallocator plugin",
834 default=None, type="string",
835 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
837 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
839 help="Set the default instance"
841 default=None, type="string",
842 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
844 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
846 completion_suggest=OPT_COMPL_ONE_OS)
848 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
849 type="keyval", default={},
850 help="OS parameters")
852 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
853 action="store_true", default=False,
854 help="Force an unknown variant")
856 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
857 action="store_true", default=False,
858 help="Do not install the OS (will"
861 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
862 dest="allow_runtime_chgs",
863 default=True, action="store_false",
864 help="Don't allow runtime changes")
866 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
867 type="keyval", default={},
868 help="Backend parameters")
870 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
871 default={}, dest="hvparams",
872 help="Hypervisor parameters")
874 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
875 help="Disk template parameters, in the format"
876 " template:option=value,option=value,...",
877 type="identkeyval", action="append", default=[])
879 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
880 type="keyval", default={},
881 help="Memory size specs: list of key=value,"
882 " where key is one of min, max, std"
883 " (in MB or using a unit)")
885 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
886 type="keyval", default={},
887 help="CPU count specs: list of key=value,"
888 " where key is one of min, max, std")
890 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
891 dest="ispecs_disk_count",
892 type="keyval", default={},
893 help="Disk count specs: list of key=value,"
894 " where key is one of min, max, std")
896 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
897 type="keyval", default={},
898 help="Disk size specs: list of key=value,"
899 " where key is one of min, max, std"
900 " (in MB or using a unit)")
902 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
903 type="keyval", default={},
904 help="NIC count specs: list of key=value,"
905 " where key is one of min, max, std")
907 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
908 dest="ipolicy_disk_templates",
909 type="list", default=None,
910 help="Comma-separated list of"
911 " enabled disk templates")
913 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
914 dest="ipolicy_vcpu_ratio",
915 type="maybefloat", default=None,
916 help="The maximum allowed vcpu-to-cpu ratio")
918 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
919 dest="ipolicy_spindle_ratio",
920 type="maybefloat", default=None,
921 help=("The maximum allowed instances to"
924 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
925 help="Hypervisor and hypervisor options, in the"
926 " format hypervisor:option=value,option=value,...",
927 default=None, type="identkeyval")
929 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
930 help="Hypervisor and hypervisor options, in the"
931 " format hypervisor:option=value,option=value,...",
932 default=[], action="append", type="identkeyval")
934 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
935 action="store_false",
936 help="Don't check that the instance's IP"
939 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
940 default=True, action="store_false",
941 help="Don't check that the instance's name"
944 NET_OPT = cli_option("--net",
945 help="NIC parameters", default=[],
946 dest="nics", action="append", type="identkeyval")
948 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
949 dest="disks", action="append", type="identkeyval")
951 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
952 help="Comma-separated list of disks"
953 " indices to act on (e.g. 0,2) (optional,"
954 " defaults to all disks)")
956 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
957 help="Enforces a single-disk configuration using the"
958 " given disk size, in MiB unless a suffix is used",
959 default=None, type="unit", metavar="<size>")
961 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
962 dest="ignore_consistency",
963 action="store_true", default=False,
964 help="Ignore the consistency of the disks on"
967 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
968 dest="allow_failover",
969 action="store_true", default=False,
970 help="If migration is not possible fallback to"
973 NONLIVE_OPT = cli_option("--non-live", dest="live",
974 default=True, action="store_false",
975 help="Do a non-live migration (this usually means"
976 " freeze the instance, save the state, transfer and"
977 " only then resume running on the secondary node)")
979 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
981 choices=list(constants.HT_MIGRATION_MODES),
982 help="Override default migration mode (choose"
983 " either live or non-live")
985 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
986 help="Target node and optional secondary node",
987 metavar="<pnode>[:<snode>]",
988 completion_suggest=OPT_COMPL_INST_ADD_NODES)
990 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
991 action="append", metavar="<node>",
992 help="Use only this node (can be used multiple"
993 " times, if not given defaults to all nodes)",
994 completion_suggest=OPT_COMPL_ONE_NODE)
996 NODEGROUP_OPT_NAME = "--node-group"
997 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
999 help="Node group (name or uuid)",
1000 metavar="<nodegroup>",
1001 default=None, type="string",
1002 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1004 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1006 completion_suggest=OPT_COMPL_ONE_NODE)
1008 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1009 action="store_false",
1010 help="Don't start the instance after creation")
1012 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1013 action="store_true", default=False,
1014 help="Show command instead of executing it")
1016 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1017 default=False, action="store_true",
1018 help="Instead of performing the migration, try to"
1019 " recover from a failed cleanup. This is safe"
1020 " to run even if the instance is healthy, but it"
1021 " will create extra replication traffic and "
1022 " disrupt briefly the replication (like during the"
1025 STATIC_OPT = cli_option("-s", "--static", dest="static",
1026 action="store_true", default=False,
1027 help="Only show configuration data, not runtime data")
1029 ALL_OPT = cli_option("--all", dest="show_all",
1030 default=False, action="store_true",
1031 help="Show info on all instances on the cluster."
1032 " This can take a long time to run, use wisely")
1034 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1035 action="store_true", default=False,
1036 help="Interactive OS reinstall, lists available"
1037 " OS templates for selection")
1039 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1040 action="store_true", default=False,
1041 help="Remove the instance from the cluster"
1042 " configuration even if there are failures"
1043 " during the removal process")
1045 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1046 dest="ignore_remove_failures",
1047 action="store_true", default=False,
1048 help="Remove the instance from the"
1049 " cluster configuration even if there"
1050 " are failures during the removal"
1053 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1054 action="store_true", default=False,
1055 help="Remove the instance from the cluster")
1057 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1058 help="Specifies the new node for the instance",
1059 metavar="NODE", default=None,
1060 completion_suggest=OPT_COMPL_ONE_NODE)
1062 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1063 help="Specifies the new secondary node",
1064 metavar="NODE", default=None,
1065 completion_suggest=OPT_COMPL_ONE_NODE)
1067 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1068 default=False, action="store_true",
1069 help="Replace the disk(s) on the primary"
1070 " node (applies only to internally mirrored"
1071 " disk templates, e.g. %s)" %
1072 utils.CommaJoin(constants.DTS_INT_MIRROR))
1074 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1075 default=False, action="store_true",
1076 help="Replace the disk(s) on the secondary"
1077 " node (applies only to internally mirrored"
1078 " disk templates, e.g. %s)" %
1079 utils.CommaJoin(constants.DTS_INT_MIRROR))
1081 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1082 default=False, action="store_true",
1083 help="Lock all nodes and auto-promote as needed"
1086 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1087 default=False, action="store_true",
1088 help="Automatically replace faulty disks"
1089 " (applies only to internally mirrored"
1090 " disk templates, e.g. %s)" %
1091 utils.CommaJoin(constants.DTS_INT_MIRROR))
1093 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1094 default=False, action="store_true",
1095 help="Ignore current recorded size"
1096 " (useful for forcing activation when"
1097 " the recorded size is wrong)")
1099 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1101 completion_suggest=OPT_COMPL_ONE_NODE)
1103 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1106 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1107 help="Specify the secondary ip for the node",
1108 metavar="ADDRESS", default=None)
1110 READD_OPT = cli_option("--readd", dest="readd",
1111 default=False, action="store_true",
1112 help="Readd old node after replacing it")
1114 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1115 default=True, action="store_false",
1116 help="Disable SSH key fingerprint checking")
1118 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1119 default=False, action="store_true",
1120 help="Force the joining of a node")
1122 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1123 type="bool", default=None, metavar=_YORNO,
1124 help="Set the master_candidate flag on the node")
1126 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1127 type="bool", default=None,
1128 help=("Set the offline flag on the node"
1129 " (cluster does not communicate with offline"
1132 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1133 type="bool", default=None,
1134 help=("Set the drained flag on the node"
1135 " (excluded from allocation operations)"))
1137 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1138 type="bool", default=None, metavar=_YORNO,
1139 help="Set the master_capable flag on the node")
1141 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1142 type="bool", default=None, metavar=_YORNO,
1143 help="Set the vm_capable flag on the node")
1145 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1146 type="bool", default=None, metavar=_YORNO,
1147 help="Set the allocatable flag on a volume")
1149 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1150 help="Disable support for lvm based instances"
1152 action="store_false", default=True)
1154 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1155 dest="enabled_hypervisors",
1156 help="Comma-separated list of hypervisors",
1157 type="string", default=None)
1159 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1160 type="keyval", default={},
1161 help="NIC parameters")
1163 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1164 dest="candidate_pool_size", type="int",
1165 help="Set the candidate pool size")
1167 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1168 help=("Enables LVM and specifies the volume group"
1169 " name (cluster-wide) for disk allocation"
1170 " [%s]" % constants.DEFAULT_VG),
1171 metavar="VG", default=None)
1173 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1174 help="Destroy cluster", action="store_true")
1176 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1177 help="Skip node agreement check (dangerous)",
1178 action="store_true", default=False)
1180 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1181 help="Specify the mac prefix for the instance IP"
1182 " addresses, in the format XX:XX:XX",
1186 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1187 help="Specify the node interface (cluster-wide)"
1188 " on which the master IP address will be added"
1189 " (cluster init default: %s)" %
1190 constants.DEFAULT_BRIDGE,
1194 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1195 help="Specify the netmask of the master IP",
1199 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1200 dest="use_external_mip_script",
1201 help="Specify whether to run a"
1202 " user-provided script for the master"
1203 " IP address turnup and"
1204 " turndown operations",
1205 type="bool", metavar=_YORNO, default=None)
1207 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1208 help="Specify the default directory (cluster-"
1209 "wide) for storing the file-based disks [%s]" %
1210 pathutils.DEFAULT_FILE_STORAGE_DIR,
1212 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1214 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1215 "--shared-file-storage-dir",
1216 dest="shared_file_storage_dir",
1217 help="Specify the default directory (cluster-wide) for storing the"
1218 " shared file-based disks [%s]" %
1219 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1220 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1222 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1223 help="Don't modify %s" % pathutils.ETC_HOSTS,
1224 action="store_false", default=True)
1226 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1227 help="Don't initialize SSH keys",
1228 action="store_false", default=True)
1230 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1231 help="Enable parseable error messages",
1232 action="store_true", default=False)
1234 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1235 help="Skip N+1 memory redundancy tests",
1236 action="store_true", default=False)
1238 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1239 help="Type of reboot: soft/hard/full",
1240 default=constants.INSTANCE_REBOOT_HARD,
1242 choices=list(constants.REBOOT_TYPES))
1244 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1245 dest="ignore_secondaries",
1246 default=False, action="store_true",
1247 help="Ignore errors from secondaries")
1249 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1250 action="store_false", default=True,
1251 help="Don't shutdown the instance (unsafe)")
1253 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1254 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1255 help="Maximum time to wait")
1257 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1258 dest="shutdown_timeout", type="int",
1259 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1260 help="Maximum time to wait for instance"
1263 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1265 help=("Number of seconds between repetions of the"
1268 EARLY_RELEASE_OPT = cli_option("--early-release",
1269 dest="early_release", default=False,
1270 action="store_true",
1271 help="Release the locks on the secondary"
1274 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1275 dest="new_cluster_cert",
1276 default=False, action="store_true",
1277 help="Generate a new cluster certificate")
1279 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1281 help="File containing new RAPI certificate")
1283 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1284 default=None, action="store_true",
1285 help=("Generate a new self-signed RAPI"
1288 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1290 help="File containing new SPICE certificate")
1292 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1294 help="File containing the certificate of the CA"
1295 " which signed the SPICE certificate")
1297 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1298 dest="new_spice_cert", default=None,
1299 action="store_true",
1300 help=("Generate a new self-signed SPICE"
1303 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1304 dest="new_confd_hmac_key",
1305 default=False, action="store_true",
1306 help=("Create a new HMAC key for %s" %
1309 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1310 dest="cluster_domain_secret",
1312 help=("Load new new cluster domain"
1313 " secret from file"))
1315 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1316 dest="new_cluster_domain_secret",
1317 default=False, action="store_true",
1318 help=("Create a new cluster domain"
1321 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1322 dest="use_replication_network",
1323 help="Whether to use the replication network"
1324 " for talking to the nodes",
1325 action="store_true", default=False)
1327 MAINTAIN_NODE_HEALTH_OPT = \
1328 cli_option("--maintain-node-health", dest="maintain_node_health",
1329 metavar=_YORNO, default=None, type="bool",
1330 help="Configure the cluster to automatically maintain node"
1331 " health, by shutting down unknown instances, shutting down"
1332 " unknown DRBD devices, etc.")
1334 IDENTIFY_DEFAULTS_OPT = \
1335 cli_option("--identify-defaults", dest="identify_defaults",
1336 default=False, action="store_true",
1337 help="Identify which saved instance parameters are equal to"
1338 " the current cluster defaults and set them as such, instead"
1339 " of marking them as overridden")
1341 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1342 action="store", dest="uid_pool",
1343 help=("A list of user-ids or user-id"
1344 " ranges separated by commas"))
1346 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1347 action="store", dest="add_uids",
1348 help=("A list of user-ids or user-id"
1349 " ranges separated by commas, to be"
1350 " added to the user-id pool"))
1352 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1353 action="store", dest="remove_uids",
1354 help=("A list of user-ids or user-id"
1355 " ranges separated by commas, to be"
1356 " removed from the user-id pool"))
1358 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1359 action="store", dest="reserved_lvs",
1360 help=("A comma-separated list of reserved"
1361 " logical volumes names, that will be"
1362 " ignored by cluster verify"))
1364 ROMAN_OPT = cli_option("--roman",
1365 dest="roman_integers", default=False,
1366 action="store_true",
1367 help="Use roman numbers for positive integers")
1369 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1370 action="store", default=None,
1371 help="Specifies usermode helper for DRBD")
1373 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1374 action="store_false", default=True,
1375 help="Disable support for DRBD")
1377 PRIMARY_IP_VERSION_OPT = \
1378 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1379 action="store", dest="primary_ip_version",
1380 metavar="%d|%d" % (constants.IP4_VERSION,
1381 constants.IP6_VERSION),
1382 help="Cluster-wide IP version for primary IP")
1384 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1385 action="store_true",
1386 help="Show machine name for every line in output")
1388 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1389 action="store_true",
1390 help=("Hide successful results and show failures"
1391 " only (determined by the exit code)"))
1394 def _PriorityOptionCb(option, _, value, parser):
1395 """Callback for processing C{--priority} option.
1398 value = _PRIONAME_TO_VALUE[value]
1400 setattr(parser.values, option.dest, value)
1403 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1404 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1405 choices=_PRIONAME_TO_VALUE.keys(),
1406 action="callback", type="choice",
1407 callback=_PriorityOptionCb,
1408 help="Priority for opcode processing")
1410 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1411 type="bool", default=None, metavar=_YORNO,
1412 help="Sets the hidden flag on the OS")
1414 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1415 type="bool", default=None, metavar=_YORNO,
1416 help="Sets the blacklisted flag on the OS")
1418 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1419 type="bool", metavar=_YORNO,
1420 dest="prealloc_wipe_disks",
1421 help=("Wipe disks prior to instance"
1424 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1425 type="keyval", default=None,
1426 help="Node parameters")
1428 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1429 action="store", metavar="POLICY", default=None,
1430 help="Allocation policy for the node group")
1432 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1433 type="bool", metavar=_YORNO,
1434 dest="node_powered",
1435 help="Specify if the SoR for node is powered")
1437 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1438 default=constants.OOB_TIMEOUT,
1439 help="Maximum time to wait for out-of-band helper")
1441 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1442 default=constants.OOB_POWER_DELAY,
1443 help="Time in seconds to wait between power-ons")
1445 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1446 action="store_true", default=False,
1447 help=("Whether command argument should be treated"
1450 NO_REMEMBER_OPT = cli_option("--no-remember",
1452 action="store_true", default=False,
1453 help="Perform but do not record the change"
1454 " in the configuration")
1456 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1457 default=False, action="store_true",
1458 help="Evacuate primary instances only")
1460 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1461 default=False, action="store_true",
1462 help="Evacuate secondary instances only"
1463 " (applies only to internally mirrored"
1464 " disk templates, e.g. %s)" %
1465 utils.CommaJoin(constants.DTS_INT_MIRROR))
1467 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1468 action="store_true", default=False,
1469 help="Pause instance at startup")
1471 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1472 help="Destination node group (name or uuid)",
1473 default=None, action="append",
1474 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1476 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1477 action="append", dest="ignore_errors",
1478 choices=list(constants.CV_ALL_ECODES_STRINGS),
1479 help="Error code to be ignored")
1481 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1483 help=("Specify disk state information in the"
1485 " storage_type/identifier:option=value,...;"
1486 " note this is unused for now"),
1489 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1491 help=("Specify hypervisor state information in the"
1492 " format hypervisor:option=value,...;"
1493 " note this is unused for now"),
1496 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1497 action="store_true", default=False,
1498 help="Ignore instance policy violations")
1500 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1501 help="Sets the instance's runtime memory,"
1502 " ballooning it up or down to the new value",
1503 default=None, type="unit", metavar="<size>")
1505 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1506 action="store_true", default=False,
1507 help="Marks the grow as absolute instead of the"
1508 " (default) relative mode")
1510 NETWORK_OPT = cli_option("--network",
1511 action="store", default=None, dest="network",
1512 help="IP network in CIDR notation")
1514 GATEWAY_OPT = cli_option("--gateway",
1515 action="store", default=None, dest="gateway",
1516 help="IP address of the router (gateway)")
1518 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1519 action="store", default=None,
1520 dest="add_reserved_ips",
1521 help="Comma-separated list of"
1522 " reserved IPs to add")
1524 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1525 action="store", default=None,
1526 dest="remove_reserved_ips",
1527 help="Comma-delimited list of"
1528 " reserved IPs to remove")
1530 NETWORK_TYPE_OPT = cli_option("--network-type",
1531 action="store", default=None, dest="network_type",
1532 help="Network type: private, public, None")
1534 NETWORK6_OPT = cli_option("--network6",
1535 action="store", default=None, dest="network6",
1536 help="IP network in CIDR notation")
1538 GATEWAY6_OPT = cli_option("--gateway6",
1539 action="store", default=None, dest="gateway6",
1540 help="IP6 address of the router (gateway)")
1542 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1543 dest="conflicts_check",
1545 action="store_false",
1546 help="Don't check for conflicting IPs")
1548 #: Options provided by all commands
1549 COMMON_OPTS = [DEBUG_OPT]
1551 # common options for creating instances. add and import then add their own
1553 COMMON_CREATE_OPTS = [
1558 FILESTORE_DRIVER_OPT,
1564 NOCONFLICTSCHECK_OPT,
1576 # common instance policy options
1577 INSTANCE_POLICY_OPTS = [
1578 SPECS_CPU_COUNT_OPT,
1579 SPECS_DISK_COUNT_OPT,
1580 SPECS_DISK_SIZE_OPT,
1582 SPECS_NIC_COUNT_OPT,
1583 IPOLICY_DISK_TEMPLATES,
1585 IPOLICY_SPINDLE_RATIO,
1589 class _ShowUsage(Exception):
1590 """Exception class for L{_ParseArgs}.
1593 def __init__(self, exit_error):
1594 """Initializes instances of this class.
1596 @type exit_error: bool
1597 @param exit_error: Whether to report failure on exit
1600 Exception.__init__(self)
1601 self.exit_error = exit_error
1604 class _ShowVersion(Exception):
1605 """Exception class for L{_ParseArgs}.
1610 def _ParseArgs(binary, argv, commands, aliases, env_override):
1611 """Parser for the command line arguments.
1613 This function parses the arguments and returns the function which
1614 must be executed together with its (modified) arguments.
1616 @param binary: Script name
1617 @param argv: Command line arguments
1618 @param commands: Dictionary containing command definitions
1619 @param aliases: dictionary with command aliases {"alias": "target", ...}
1620 @param env_override: list of env variables allowed for default args
1621 @raise _ShowUsage: If usage description should be shown
1622 @raise _ShowVersion: If version should be shown
1625 assert not (env_override - set(commands))
1626 assert not (set(aliases.keys()) & set(commands.keys()))
1631 # No option or command given
1632 raise _ShowUsage(exit_error=True)
1634 if cmd == "--version":
1635 raise _ShowVersion()
1636 elif cmd == "--help":
1637 raise _ShowUsage(exit_error=False)
1638 elif not (cmd in commands or cmd in aliases):
1639 raise _ShowUsage(exit_error=True)
1641 # get command, unalias it, and look it up in commands
1643 if aliases[cmd] not in commands:
1644 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1645 " command '%s'" % (cmd, aliases[cmd]))
1649 if cmd in env_override:
1650 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1651 env_args = os.environ.get(args_env_name)
1653 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1655 func, args_def, parser_opts, usage, description = commands[cmd]
1656 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1657 description=description,
1658 formatter=TitledHelpFormatter(),
1659 usage="%%prog %s %s" % (cmd, usage))
1660 parser.disable_interspersed_args()
1661 options, args = parser.parse_args(args=argv[2:])
1663 if not _CheckArguments(cmd, args_def, args):
1664 return None, None, None
1666 return func, options, args
1669 def _FormatUsage(binary, commands):
1670 """Generates a nice description of all commands.
1672 @param binary: Script name
1673 @param commands: Dictionary containing command definitions
1676 # compute the max line length for cmd + usage
1677 mlen = min(60, max(map(len, commands)))
1679 yield "Usage: %s {command} [options...] [argument...]" % binary
1680 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1684 # and format a nice command list
1685 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1686 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1687 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1688 for line in help_lines:
1689 yield " %-*s %s" % (mlen, "", line)
1694 def _CheckArguments(cmd, args_def, args):
1695 """Verifies the arguments using the argument definition.
1699 1. Abort with error if values specified by user but none expected.
1701 1. For each argument in definition
1703 1. Keep running count of minimum number of values (min_count)
1704 1. Keep running count of maximum number of values (max_count)
1705 1. If it has an unlimited number of values
1707 1. Abort with error if it's not the last argument in the definition
1709 1. If last argument has limited number of values
1711 1. Abort with error if number of values doesn't match or is too large
1713 1. Abort with error if user didn't pass enough values (min_count)
1716 if args and not args_def:
1717 ToStderr("Error: Command %s expects no arguments", cmd)
1724 last_idx = len(args_def) - 1
1726 for idx, arg in enumerate(args_def):
1727 if min_count is None:
1729 elif arg.min is not None:
1730 min_count += arg.min
1732 if max_count is None:
1734 elif arg.max is not None:
1735 max_count += arg.max
1738 check_max = (arg.max is not None)
1740 elif arg.max is None:
1741 raise errors.ProgrammerError("Only the last argument can have max=None")
1744 # Command with exact number of arguments
1745 if (min_count is not None and max_count is not None and
1746 min_count == max_count and len(args) != min_count):
1747 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1750 # Command with limited number of arguments
1751 if max_count is not None and len(args) > max_count:
1752 ToStderr("Error: Command %s expects only %d argument(s)",
1756 # Command with some required arguments
1757 if min_count is not None and len(args) < min_count:
1758 ToStderr("Error: Command %s expects at least %d argument(s)",
1765 def SplitNodeOption(value):
1766 """Splits the value of a --node option.
1769 if value and ":" in value:
1770 return value.split(":", 1)
1772 return (value, None)
1775 def CalculateOSNames(os_name, os_variants):
1776 """Calculates all the names an OS can be called, according to its variants.
1778 @type os_name: string
1779 @param os_name: base name of the os
1780 @type os_variants: list or None
1781 @param os_variants: list of supported variants
1783 @return: list of valid names
1787 return ["%s+%s" % (os_name, v) for v in os_variants]
1792 def ParseFields(selected, default):
1793 """Parses the values of "--field"-like options.
1795 @type selected: string or None
1796 @param selected: User-selected options
1798 @param default: Default fields
1801 if selected is None:
1804 if selected.startswith("+"):
1805 return default + selected[1:].split(",")
1807 return selected.split(",")
1810 UsesRPC = rpc.RunWithRPC
1813 def AskUser(text, choices=None):
1814 """Ask the user a question.
1816 @param text: the question to ask
1818 @param choices: list with elements tuples (input_char, return_value,
1819 description); if not given, it will default to: [('y', True,
1820 'Perform the operation'), ('n', False, 'Do no do the operation')];
1821 note that the '?' char is reserved for help
1823 @return: one of the return values from the choices list; if input is
1824 not possible (i.e. not running with a tty, we return the last
1829 choices = [("y", True, "Perform the operation"),
1830 ("n", False, "Do not perform the operation")]
1831 if not choices or not isinstance(choices, list):
1832 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1833 for entry in choices:
1834 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1835 raise errors.ProgrammerError("Invalid choices element to AskUser")
1837 answer = choices[-1][1]
1839 for line in text.splitlines():
1840 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1841 text = "\n".join(new_text)
1843 f = file("/dev/tty", "a+")
1847 chars = [entry[0] for entry in choices]
1848 chars[-1] = "[%s]" % chars[-1]
1850 maps = dict([(entry[0], entry[1]) for entry in choices])
1854 f.write("/".join(chars))
1856 line = f.readline(2).strip().lower()
1861 for entry in choices:
1862 f.write(" %s - %s\n" % (entry[0], entry[2]))
1870 class JobSubmittedException(Exception):
1871 """Job was submitted, client should exit.
1873 This exception has one argument, the ID of the job that was
1874 submitted. The handler should print this ID.
1876 This is not an error, just a structured way to exit from clients.
1881 def SendJob(ops, cl=None):
1882 """Function to submit an opcode without waiting for the results.
1885 @param ops: list of opcodes
1886 @type cl: luxi.Client
1887 @param cl: the luxi client to use for communicating with the master;
1888 if None, a new client will be created
1894 job_id = cl.SubmitJob(ops)
1899 def GenericPollJob(job_id, cbs, report_cbs):
1900 """Generic job-polling function.
1902 @type job_id: number
1903 @param job_id: Job ID
1904 @type cbs: Instance of L{JobPollCbBase}
1905 @param cbs: Data callbacks
1906 @type report_cbs: Instance of L{JobPollReportCbBase}
1907 @param report_cbs: Reporting callbacks
1910 prev_job_info = None
1911 prev_logmsg_serial = None
1916 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1919 # job not found, go away!
1920 raise errors.JobLost("Job with id %s lost" % job_id)
1922 if result == constants.JOB_NOTCHANGED:
1923 report_cbs.ReportNotChanged(job_id, status)
1928 # Split result, a tuple of (field values, log entries)
1929 (job_info, log_entries) = result
1930 (status, ) = job_info
1933 for log_entry in log_entries:
1934 (serial, timestamp, log_type, message) = log_entry
1935 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1937 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1939 # TODO: Handle canceled and archived jobs
1940 elif status in (constants.JOB_STATUS_SUCCESS,
1941 constants.JOB_STATUS_ERROR,
1942 constants.JOB_STATUS_CANCELING,
1943 constants.JOB_STATUS_CANCELED):
1946 prev_job_info = job_info
1948 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1950 raise errors.JobLost("Job with id %s lost" % job_id)
1952 status, opstatus, result = jobs[0]
1954 if status == constants.JOB_STATUS_SUCCESS:
1957 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1958 raise errors.OpExecError("Job was canceled")
1961 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1962 if status == constants.OP_STATUS_SUCCESS:
1964 elif status == constants.OP_STATUS_ERROR:
1965 errors.MaybeRaise(msg)
1968 raise errors.OpExecError("partial failure (opcode %d): %s" %
1971 raise errors.OpExecError(str(msg))
1973 # default failure mode
1974 raise errors.OpExecError(result)
1977 class JobPollCbBase:
1978 """Base class for L{GenericPollJob} callbacks.
1982 """Initializes this class.
1986 def WaitForJobChangeOnce(self, job_id, fields,
1987 prev_job_info, prev_log_serial):
1988 """Waits for changes on a job.
1991 raise NotImplementedError()
1993 def QueryJobs(self, job_ids, fields):
1994 """Returns the selected fields for the selected job IDs.
1996 @type job_ids: list of numbers
1997 @param job_ids: Job IDs
1998 @type fields: list of strings
1999 @param fields: Fields
2002 raise NotImplementedError()
2005 class JobPollReportCbBase:
2006 """Base class for L{GenericPollJob} reporting callbacks.
2010 """Initializes this class.
2014 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2015 """Handles a log message.
2018 raise NotImplementedError()
2020 def ReportNotChanged(self, job_id, status):
2021 """Called for if a job hasn't changed in a while.
2023 @type job_id: number
2024 @param job_id: Job ID
2025 @type status: string or None
2026 @param status: Job status if available
2029 raise NotImplementedError()
2032 class _LuxiJobPollCb(JobPollCbBase):
2033 def __init__(self, cl):
2034 """Initializes this class.
2037 JobPollCbBase.__init__(self)
2040 def WaitForJobChangeOnce(self, job_id, fields,
2041 prev_job_info, prev_log_serial):
2042 """Waits for changes on a job.
2045 return self.cl.WaitForJobChangeOnce(job_id, fields,
2046 prev_job_info, prev_log_serial)
2048 def QueryJobs(self, job_ids, fields):
2049 """Returns the selected fields for the selected job IDs.
2052 return self.cl.QueryJobs(job_ids, fields)
2055 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2056 def __init__(self, feedback_fn):
2057 """Initializes this class.
2060 JobPollReportCbBase.__init__(self)
2062 self.feedback_fn = feedback_fn
2064 assert callable(feedback_fn)
2066 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2067 """Handles a log message.
2070 self.feedback_fn((timestamp, log_type, log_msg))
2072 def ReportNotChanged(self, job_id, status):
2073 """Called if a job hasn't changed in a while.
2079 class StdioJobPollReportCb(JobPollReportCbBase):
2081 """Initializes this class.
2084 JobPollReportCbBase.__init__(self)
2086 self.notified_queued = False
2087 self.notified_waitlock = False
2089 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2090 """Handles a log message.
2093 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2094 FormatLogMessage(log_type, log_msg))
2096 def ReportNotChanged(self, job_id, status):
2097 """Called if a job hasn't changed in a while.
2103 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2104 ToStderr("Job %s is waiting in queue", job_id)
2105 self.notified_queued = True
2107 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2108 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2109 self.notified_waitlock = True
2112 def FormatLogMessage(log_type, log_msg):
2113 """Formats a job message according to its type.
2116 if log_type != constants.ELOG_MESSAGE:
2117 log_msg = str(log_msg)
2119 return utils.SafeEncode(log_msg)
2122 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2123 """Function to poll for the result of a job.
2125 @type job_id: job identified
2126 @param job_id: the job to poll for results
2127 @type cl: luxi.Client
2128 @param cl: the luxi client to use for communicating with the master;
2129 if None, a new client will be created
2135 if reporter is None:
2137 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2139 reporter = StdioJobPollReportCb()
2141 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2143 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2146 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2147 """Legacy function to submit an opcode.
2149 This is just a simple wrapper over the construction of the processor
2150 instance. It should be extended to better handle feedback and
2151 interaction functions.
2157 SetGenericOpcodeOpts([op], opts)
2159 job_id = SendJob([op], cl=cl)
2161 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2164 return op_results[0]
2167 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2168 """Wrapper around SubmitOpCode or SendJob.
2170 This function will decide, based on the 'opts' parameter, whether to
2171 submit and wait for the result of the opcode (and return it), or
2172 whether to just send the job and print its identifier. It is used in
2173 order to simplify the implementation of the '--submit' option.
2175 It will also process the opcodes if we're sending the via SendJob
2176 (otherwise SubmitOpCode does it).
2179 if opts and opts.submit_only:
2181 SetGenericOpcodeOpts(job, opts)
2182 job_id = SendJob(job, cl=cl)
2183 raise JobSubmittedException(job_id)
2185 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2188 def SetGenericOpcodeOpts(opcode_list, options):
2189 """Processor for generic options.
2191 This function updates the given opcodes based on generic command
2192 line options (like debug, dry-run, etc.).
2194 @param opcode_list: list of opcodes
2195 @param options: command line options or None
2196 @return: None (in-place modification)
2201 for op in opcode_list:
2202 op.debug_level = options.debug
2203 if hasattr(options, "dry_run"):
2204 op.dry_run = options.dry_run
2205 if getattr(options, "priority", None) is not None:
2206 op.priority = options.priority
2209 def GetClient(query=False):
2210 """Connects to the a luxi socket and returns a client.
2212 @type query: boolean
2213 @param query: this signifies that the client will only be
2214 used for queries; if the build-time parameter
2215 enable-split-queries is enabled, then the client will be
2216 connected to the query socket instead of the masterd socket
2219 if query and constants.ENABLE_SPLIT_QUERY:
2220 address = pathutils.QUERY_SOCKET
2223 # TODO: Cache object?
2225 client = luxi.Client(address=address)
2226 except luxi.NoMasterError:
2227 ss = ssconf.SimpleStore()
2229 # Try to read ssconf file
2232 except errors.ConfigurationError:
2233 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2234 " not part of a cluster",
2237 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2238 if master != myself:
2239 raise errors.OpPrereqError("This is not the master node, please connect"
2240 " to node '%s' and rerun the command" %
2241 master, errors.ECODE_INVAL)
2246 def FormatError(err):
2247 """Return a formatted error message for a given error.
2249 This function takes an exception instance and returns a tuple
2250 consisting of two values: first, the recommended exit code, and
2251 second, a string describing the error message (not
2252 newline-terminated).
2258 if isinstance(err, errors.ConfigurationError):
2259 txt = "Corrupt configuration file: %s" % msg
2261 obuf.write(txt + "\n")
2262 obuf.write("Aborting.")
2264 elif isinstance(err, errors.HooksAbort):
2265 obuf.write("Failure: hooks execution failed:\n")
2266 for node, script, out in err.args[0]:
2268 obuf.write(" node: %s, script: %s, output: %s\n" %
2269 (node, script, out))
2271 obuf.write(" node: %s, script: %s (no output)\n" %
2273 elif isinstance(err, errors.HooksFailure):
2274 obuf.write("Failure: hooks general failure: %s" % msg)
2275 elif isinstance(err, errors.ResolverError):
2276 this_host = netutils.Hostname.GetSysName()
2277 if err.args[0] == this_host:
2278 msg = "Failure: can't resolve my own hostname ('%s')"
2280 msg = "Failure: can't resolve hostname '%s'"
2281 obuf.write(msg % err.args[0])
2282 elif isinstance(err, errors.OpPrereqError):
2283 if len(err.args) == 2:
2284 obuf.write("Failure: prerequisites not met for this"
2285 " operation:\nerror type: %s, error details:\n%s" %
2286 (err.args[1], err.args[0]))
2288 obuf.write("Failure: prerequisites not met for this"
2289 " operation:\n%s" % msg)
2290 elif isinstance(err, errors.OpExecError):
2291 obuf.write("Failure: command execution error:\n%s" % msg)
2292 elif isinstance(err, errors.TagError):
2293 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2294 elif isinstance(err, errors.JobQueueDrainError):
2295 obuf.write("Failure: the job queue is marked for drain and doesn't"
2296 " accept new requests\n")
2297 elif isinstance(err, errors.JobQueueFull):
2298 obuf.write("Failure: the job queue is full and doesn't accept new"
2299 " job submissions until old jobs are archived\n")
2300 elif isinstance(err, errors.TypeEnforcementError):
2301 obuf.write("Parameter Error: %s" % msg)
2302 elif isinstance(err, errors.ParameterError):
2303 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2304 elif isinstance(err, luxi.NoMasterError):
2305 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2306 " and listening for connections?")
2307 elif isinstance(err, luxi.TimeoutError):
2308 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2309 " been submitted and will continue to run even if the call"
2310 " timed out. Useful commands in this situation are \"gnt-job"
2311 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2313 elif isinstance(err, luxi.PermissionError):
2314 obuf.write("It seems you don't have permissions to connect to the"
2315 " master daemon.\nPlease retry as a different user.")
2316 elif isinstance(err, luxi.ProtocolError):
2317 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2319 elif isinstance(err, errors.JobLost):
2320 obuf.write("Error checking job status: %s" % msg)
2321 elif isinstance(err, errors.QueryFilterParseError):
2322 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2323 obuf.write("\n".join(err.GetDetails()))
2324 elif isinstance(err, errors.GenericError):
2325 obuf.write("Unhandled Ganeti error: %s" % msg)
2326 elif isinstance(err, JobSubmittedException):
2327 obuf.write("JobID: %s\n" % err.args[0])
2330 obuf.write("Unhandled exception: %s" % msg)
2331 return retcode, obuf.getvalue().rstrip("\n")
2334 def GenericMain(commands, override=None, aliases=None,
2335 env_override=frozenset()):
2336 """Generic main function for all the gnt-* commands.
2338 @param commands: a dictionary with a special structure, see the design doc
2339 for command line handling.
2340 @param override: if not None, we expect a dictionary with keys that will
2341 override command line options; this can be used to pass
2342 options from the scripts to generic functions
2343 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2344 @param env_override: list of environment names which are allowed to submit
2345 default args for commands
2348 # save the program name and the entire command line for later logging
2350 binary = os.path.basename(sys.argv[0])
2352 binary = sys.argv[0]
2354 if len(sys.argv) >= 2:
2355 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2359 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2361 binary = "<unknown program>"
2362 cmdline = "<unknown>"
2368 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2370 except _ShowVersion:
2371 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2372 constants.RELEASE_VERSION)
2373 return constants.EXIT_SUCCESS
2374 except _ShowUsage, err:
2375 for line in _FormatUsage(binary, commands):
2379 return constants.EXIT_FAILURE
2381 return constants.EXIT_SUCCESS
2382 except errors.ParameterError, err:
2383 result, err_msg = FormatError(err)
2387 if func is None: # parse error
2390 if override is not None:
2391 for key, val in override.iteritems():
2392 setattr(options, key, val)
2394 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2395 stderr_logging=True)
2397 logging.info("Command line: %s", cmdline)
2400 result = func(options, args)
2401 except (errors.GenericError, luxi.ProtocolError,
2402 JobSubmittedException), err:
2403 result, err_msg = FormatError(err)
2404 logging.exception("Error during command processing")
2406 except KeyboardInterrupt:
2407 result = constants.EXIT_FAILURE
2408 ToStderr("Aborted. Note that if the operation created any jobs, they"
2409 " might have been submitted and"
2410 " will continue to run in the background.")
2411 except IOError, err:
2412 if err.errno == errno.EPIPE:
2413 # our terminal went away, we'll exit
2414 sys.exit(constants.EXIT_FAILURE)
2421 def ParseNicOption(optvalue):
2422 """Parses the value of the --net option(s).
2426 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2427 except (TypeError, ValueError), err:
2428 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2431 nics = [{}] * nic_max
2432 for nidx, ndict in optvalue:
2435 if not isinstance(ndict, dict):
2436 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2437 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2439 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2446 def GenericInstanceCreate(mode, opts, args):
2447 """Add an instance to the cluster via either creation or import.
2449 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2450 @param opts: the command line options selected by the user
2452 @param args: should contain only one element, the new instance name
2454 @return: the desired exit code
2459 (pnode, snode) = SplitNodeOption(opts.node)
2464 hypervisor, hvparams = opts.hypervisor
2467 nics = ParseNicOption(opts.nics)
2471 elif mode == constants.INSTANCE_CREATE:
2472 # default of one nic, all auto
2478 if opts.disk_template == constants.DT_DISKLESS:
2479 if opts.disks or opts.sd_size is not None:
2480 raise errors.OpPrereqError("Diskless instance but disk"
2481 " information passed", errors.ECODE_INVAL)
2484 if (not opts.disks and not opts.sd_size
2485 and mode == constants.INSTANCE_CREATE):
2486 raise errors.OpPrereqError("No disk information specified",
2488 if opts.disks and opts.sd_size is not None:
2489 raise errors.OpPrereqError("Please use either the '--disk' or"
2490 " '-s' option", errors.ECODE_INVAL)
2491 if opts.sd_size is not None:
2492 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2496 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2497 except ValueError, err:
2498 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2500 disks = [{}] * disk_max
2503 for didx, ddict in opts.disks:
2505 if not isinstance(ddict, dict):
2506 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2507 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2508 elif constants.IDISK_SIZE in ddict:
2509 if constants.IDISK_ADOPT in ddict:
2510 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2511 " (disk %d)" % didx, errors.ECODE_INVAL)
2513 ddict[constants.IDISK_SIZE] = \
2514 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2515 except ValueError, err:
2516 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2517 (didx, err), errors.ECODE_INVAL)
2518 elif constants.IDISK_ADOPT in ddict:
2519 if mode == constants.INSTANCE_IMPORT:
2520 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2521 " import", errors.ECODE_INVAL)
2522 ddict[constants.IDISK_SIZE] = 0
2524 raise errors.OpPrereqError("Missing size or adoption source for"
2525 " disk %d" % didx, errors.ECODE_INVAL)
2528 if opts.tags is not None:
2529 tags = opts.tags.split(",")
2533 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2534 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2536 if mode == constants.INSTANCE_CREATE:
2539 force_variant = opts.force_variant
2542 no_install = opts.no_install
2543 identify_defaults = False
2544 elif mode == constants.INSTANCE_IMPORT:
2547 force_variant = False
2548 src_node = opts.src_node
2549 src_path = opts.src_dir
2551 identify_defaults = opts.identify_defaults
2553 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2555 op = opcodes.OpInstanceCreate(instance_name=instance,
2557 disk_template=opts.disk_template,
2559 conflicts_check=opts.conflicts_check,
2560 pnode=pnode, snode=snode,
2561 ip_check=opts.ip_check,
2562 name_check=opts.name_check,
2563 wait_for_sync=opts.wait_for_sync,
2564 file_storage_dir=opts.file_storage_dir,
2565 file_driver=opts.file_driver,
2566 iallocator=opts.iallocator,
2567 hypervisor=hypervisor,
2569 beparams=opts.beparams,
2570 osparams=opts.osparams,
2574 force_variant=force_variant,
2578 no_install=no_install,
2579 identify_defaults=identify_defaults,
2580 ignore_ipolicy=opts.ignore_ipolicy)
2582 SubmitOrSend(op, opts)
2586 class _RunWhileClusterStoppedHelper:
2587 """Helper class for L{RunWhileClusterStopped} to simplify state management
2590 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2591 """Initializes this class.
2593 @type feedback_fn: callable
2594 @param feedback_fn: Feedback function
2595 @type cluster_name: string
2596 @param cluster_name: Cluster name
2597 @type master_node: string
2598 @param master_node Master node name
2599 @type online_nodes: list
2600 @param online_nodes: List of names of online nodes
2603 self.feedback_fn = feedback_fn
2604 self.cluster_name = cluster_name
2605 self.master_node = master_node
2606 self.online_nodes = online_nodes
2608 self.ssh = ssh.SshRunner(self.cluster_name)
2610 self.nonmaster_nodes = [name for name in online_nodes
2611 if name != master_node]
2613 assert self.master_node not in self.nonmaster_nodes
2615 def _RunCmd(self, node_name, cmd):
2616 """Runs a command on the local or a remote machine.
2618 @type node_name: string
2619 @param node_name: Machine name
2624 if node_name is None or node_name == self.master_node:
2625 # No need to use SSH
2626 result = utils.RunCmd(cmd)
2628 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2629 utils.ShellQuoteArgs(cmd))
2632 errmsg = ["Failed to run command %s" % result.cmd]
2634 errmsg.append("on node %s" % node_name)
2635 errmsg.append(": exitcode %s and error %s" %
2636 (result.exit_code, result.output))
2637 raise errors.OpExecError(" ".join(errmsg))
2639 def Call(self, fn, *args):
2640 """Call function while all daemons are stopped.
2643 @param fn: Function to be called
2646 # Pause watcher by acquiring an exclusive lock on watcher state file
2647 self.feedback_fn("Blocking watcher")
2648 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2650 # TODO: Currently, this just blocks. There's no timeout.
2651 # TODO: Should it be a shared lock?
2652 watcher_block.Exclusive(blocking=True)
2654 # Stop master daemons, so that no new jobs can come in and all running
2656 self.feedback_fn("Stopping master daemons")
2657 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2659 # Stop daemons on all nodes
2660 for node_name in self.online_nodes:
2661 self.feedback_fn("Stopping daemons on %s" % node_name)
2662 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2664 # All daemons are shut down now
2666 return fn(self, *args)
2667 except Exception, err:
2668 _, errmsg = FormatError(err)
2669 logging.exception("Caught exception")
2670 self.feedback_fn(errmsg)
2673 # Start cluster again, master node last
2674 for node_name in self.nonmaster_nodes + [self.master_node]:
2675 self.feedback_fn("Starting daemons on %s" % node_name)
2676 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2679 watcher_block.Close()
2682 def RunWhileClusterStopped(feedback_fn, fn, *args):
2683 """Calls a function while all cluster daemons are stopped.
2685 @type feedback_fn: callable
2686 @param feedback_fn: Feedback function
2688 @param fn: Function to be called when daemons are stopped
2691 feedback_fn("Gathering cluster information")
2693 # This ensures we're running on the master daemon
2696 (cluster_name, master_node) = \
2697 cl.QueryConfigValues(["cluster_name", "master_node"])
2699 online_nodes = GetOnlineNodes([], cl=cl)
2701 # Don't keep a reference to the client. The master daemon will go away.
2704 assert master_node in online_nodes
2706 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2707 online_nodes).Call(fn, *args)
2710 def GenerateTable(headers, fields, separator, data,
2711 numfields=None, unitfields=None,
2713 """Prints a table with headers and different fields.
2716 @param headers: dictionary mapping field names to headers for
2719 @param fields: the field names corresponding to each row in
2721 @param separator: the separator to be used; if this is None,
2722 the default 'smart' algorithm is used which computes optimal
2723 field width, otherwise just the separator is used between
2726 @param data: a list of lists, each sublist being one row to be output
2727 @type numfields: list
2728 @param numfields: a list with the fields that hold numeric
2729 values and thus should be right-aligned
2730 @type unitfields: list
2731 @param unitfields: a list with the fields that hold numeric
2732 values that should be formatted with the units field
2733 @type units: string or None
2734 @param units: the units we should use for formatting, or None for
2735 automatic choice (human-readable for non-separator usage, otherwise
2736 megabytes); this is a one-letter string
2745 if numfields is None:
2747 if unitfields is None:
2750 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2751 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2754 for field in fields:
2755 if headers and field not in headers:
2756 # TODO: handle better unknown fields (either revert to old
2757 # style of raising exception, or deal more intelligently with
2759 headers[field] = field
2760 if separator is not None:
2761 format_fields.append("%s")
2762 elif numfields.Matches(field):
2763 format_fields.append("%*s")
2765 format_fields.append("%-*s")
2767 if separator is None:
2768 mlens = [0 for name in fields]
2769 format_str = " ".join(format_fields)
2771 format_str = separator.replace("%", "%%").join(format_fields)
2776 for idx, val in enumerate(row):
2777 if unitfields.Matches(fields[idx]):
2780 except (TypeError, ValueError):
2783 val = row[idx] = utils.FormatUnit(val, units)
2784 val = row[idx] = str(val)
2785 if separator is None:
2786 mlens[idx] = max(mlens[idx], len(val))
2791 for idx, name in enumerate(fields):
2793 if separator is None:
2794 mlens[idx] = max(mlens[idx], len(hdr))
2795 args.append(mlens[idx])
2797 result.append(format_str % tuple(args))
2799 if separator is None:
2800 assert len(mlens) == len(fields)
2802 if fields and not numfields.Matches(fields[-1]):
2808 line = ["-" for _ in fields]
2809 for idx in range(len(fields)):
2810 if separator is None:
2811 args.append(mlens[idx])
2812 args.append(line[idx])
2813 result.append(format_str % tuple(args))
2818 def _FormatBool(value):
2819 """Formats a boolean value as a string.
2827 #: Default formatting for query results; (callback, align right)
2828 _DEFAULT_FORMAT_QUERY = {
2829 constants.QFT_TEXT: (str, False),
2830 constants.QFT_BOOL: (_FormatBool, False),
2831 constants.QFT_NUMBER: (str, True),
2832 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2833 constants.QFT_OTHER: (str, False),
2834 constants.QFT_UNKNOWN: (str, False),
2838 def _GetColumnFormatter(fdef, override, unit):
2839 """Returns formatting function for a field.
2841 @type fdef: L{objects.QueryFieldDefinition}
2842 @type override: dict
2843 @param override: Dictionary for overriding field formatting functions,
2844 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2846 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2847 @rtype: tuple; (callable, bool)
2848 @return: Returns the function to format a value (takes one parameter) and a
2849 boolean for aligning the value on the right-hand side
2852 fmt = override.get(fdef.name, None)
2856 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2858 if fdef.kind == constants.QFT_UNIT:
2859 # Can't keep this information in the static dictionary
2860 return (lambda value: utils.FormatUnit(value, unit), True)
2862 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2866 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2869 class _QueryColumnFormatter:
2870 """Callable class for formatting fields of a query.
2873 def __init__(self, fn, status_fn, verbose):
2874 """Initializes this class.
2877 @param fn: Formatting function
2878 @type status_fn: callable
2879 @param status_fn: Function to report fields' status
2880 @type verbose: boolean
2881 @param verbose: whether to use verbose field descriptions or not
2885 self._status_fn = status_fn
2886 self._verbose = verbose
2888 def __call__(self, data):
2889 """Returns a field's string representation.
2892 (status, value) = data
2895 self._status_fn(status)
2897 if status == constants.RS_NORMAL:
2898 return self._fn(value)
2900 assert value is None, \
2901 "Found value %r for abnormal status %s" % (value, status)
2903 return FormatResultError(status, self._verbose)
2906 def FormatResultError(status, verbose):
2907 """Formats result status other than L{constants.RS_NORMAL}.
2909 @param status: The result status
2910 @type verbose: boolean
2911 @param verbose: Whether to return the verbose text
2912 @return: Text of result status
2915 assert status != constants.RS_NORMAL, \
2916 "FormatResultError called with status equal to constants.RS_NORMAL"
2918 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2920 raise NotImplementedError("Unknown status %s" % status)
2927 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2928 header=False, verbose=False):
2929 """Formats data in L{objects.QueryResponse}.
2931 @type result: L{objects.QueryResponse}
2932 @param result: result of query operation
2934 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2935 see L{utils.text.FormatUnit}
2936 @type format_override: dict
2937 @param format_override: Dictionary for overriding field formatting functions,
2938 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2939 @type separator: string or None
2940 @param separator: String used to separate fields
2942 @param header: Whether to output header row
2943 @type verbose: boolean
2944 @param verbose: whether to use verbose field descriptions or not
2953 if format_override is None:
2954 format_override = {}
2956 stats = dict.fromkeys(constants.RS_ALL, 0)
2958 def _RecordStatus(status):
2963 for fdef in result.fields:
2964 assert fdef.title and fdef.name
2965 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2966 columns.append(TableColumn(fdef.title,
2967 _QueryColumnFormatter(fn, _RecordStatus,
2971 table = FormatTable(result.data, columns, header, separator)
2973 # Collect statistics
2974 assert len(stats) == len(constants.RS_ALL)
2975 assert compat.all(count >= 0 for count in stats.values())
2977 # Determine overall status. If there was no data, unknown fields must be
2978 # detected via the field definitions.
2979 if (stats[constants.RS_UNKNOWN] or
2980 (not result.data and _GetUnknownFields(result.fields))):
2982 elif compat.any(count > 0 for key, count in stats.items()
2983 if key != constants.RS_NORMAL):
2984 status = QR_INCOMPLETE
2988 return (status, table)
2991 def _GetUnknownFields(fdefs):
2992 """Returns list of unknown fields included in C{fdefs}.
2994 @type fdefs: list of L{objects.QueryFieldDefinition}
2997 return [fdef for fdef in fdefs
2998 if fdef.kind == constants.QFT_UNKNOWN]
3001 def _WarnUnknownFields(fdefs):
3002 """Prints a warning to stderr if a query included unknown fields.
3004 @type fdefs: list of L{objects.QueryFieldDefinition}
3007 unknown = _GetUnknownFields(fdefs)
3009 ToStderr("Warning: Queried for unknown fields %s",
3010 utils.CommaJoin(fdef.name for fdef in unknown))
3016 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3017 format_override=None, verbose=False, force_filter=False,
3018 namefield=None, qfilter=None, isnumeric=False):
3019 """Generic implementation for listing all items of a resource.
3021 @param resource: One of L{constants.QR_VIA_LUXI}
3022 @type fields: list of strings
3023 @param fields: List of fields to query for
3024 @type names: list of strings
3025 @param names: Names of items to query for
3026 @type unit: string or None
3027 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3028 None for automatic choice (human-readable for non-separator usage,
3029 otherwise megabytes); this is a one-letter string
3030 @type separator: string or None
3031 @param separator: String used to separate fields
3033 @param header: Whether to show header row
3034 @type force_filter: bool
3035 @param force_filter: Whether to always treat names as filter
3036 @type format_override: dict
3037 @param format_override: Dictionary for overriding field formatting functions,
3038 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3039 @type verbose: boolean
3040 @param verbose: whether to use verbose field descriptions or not
3041 @type namefield: string
3042 @param namefield: Name of field to use for simple filters (see
3043 L{qlang.MakeFilter} for details)
3044 @type qfilter: list or None
3045 @param qfilter: Query filter (in addition to names)
3046 @param isnumeric: bool
3047 @param isnumeric: Whether the namefield's type is numeric, and therefore
3048 any simple filters built by namefield should use integer values to
3055 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3056 isnumeric=isnumeric)
3059 qfilter = namefilter
3060 elif namefilter is not None:
3061 qfilter = [qlang.OP_AND, namefilter, qfilter]
3066 response = cl.Query(resource, fields, qfilter)
3068 found_unknown = _WarnUnknownFields(response.fields)
3070 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3072 format_override=format_override,
3078 assert ((found_unknown and status == QR_UNKNOWN) or
3079 (not found_unknown and status != QR_UNKNOWN))
3081 if status == QR_UNKNOWN:
3082 return constants.EXIT_UNKNOWN_FIELD
3084 # TODO: Should the list command fail if not all data could be collected?
3085 return constants.EXIT_SUCCESS
3088 def _FieldDescValues(fdef):
3089 """Helper function for L{GenericListFields} to get query field description.
3091 @type fdef: L{objects.QueryFieldDefinition}
3097 _QFT_NAMES.get(fdef.kind, fdef.kind),
3103 def GenericListFields(resource, fields, separator, header, cl=None):
3104 """Generic implementation for listing fields for a resource.
3106 @param resource: One of L{constants.QR_VIA_LUXI}
3107 @type fields: list of strings
3108 @param fields: List of fields to query for
3109 @type separator: string or None
3110 @param separator: String used to separate fields
3112 @param header: Whether to show header row
3121 response = cl.QueryFields(resource, fields)
3123 found_unknown = _WarnUnknownFields(response.fields)
3126 TableColumn("Name", str, False),
3127 TableColumn("Type", str, False),
3128 TableColumn("Title", str, False),
3129 TableColumn("Description", str, False),
3132 rows = map(_FieldDescValues, response.fields)
3134 for line in FormatTable(rows, columns, header, separator):
3138 return constants.EXIT_UNKNOWN_FIELD
3140 return constants.EXIT_SUCCESS
3144 """Describes a column for L{FormatTable}.
3147 def __init__(self, title, fn, align_right):
3148 """Initializes this class.
3151 @param title: Column title
3153 @param fn: Formatting function
3154 @type align_right: bool
3155 @param align_right: Whether to align values on the right-hand side
3160 self.align_right = align_right
3163 def _GetColFormatString(width, align_right):
3164 """Returns the format string for a field.
3172 return "%%%s%ss" % (sign, width)
3175 def FormatTable(rows, columns, header, separator):
3176 """Formats data as a table.
3178 @type rows: list of lists
3179 @param rows: Row data, one list per row
3180 @type columns: list of L{TableColumn}
3181 @param columns: Column descriptions
3183 @param header: Whether to show header row
3184 @type separator: string or None
3185 @param separator: String used to separate columns
3189 data = [[col.title for col in columns]]
3190 colwidth = [len(col.title) for col in columns]
3193 colwidth = [0 for _ in columns]
3197 assert len(row) == len(columns)
3199 formatted = [col.format(value) for value, col in zip(row, columns)]
3201 if separator is None:
3202 # Update column widths
3203 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3204 # Modifying a list's items while iterating is fine
3205 colwidth[idx] = max(oldwidth, len(value))
3207 data.append(formatted)
3209 if separator is not None:
3210 # Return early if a separator is used
3211 return [separator.join(row) for row in data]
3213 if columns and not columns[-1].align_right:
3214 # Avoid unnecessary spaces at end of line
3217 # Build format string
3218 fmt = " ".join([_GetColFormatString(width, col.align_right)
3219 for col, width in zip(columns, colwidth)])
3221 return [fmt % tuple(row) for row in data]
3224 def FormatTimestamp(ts):
3225 """Formats a given timestamp.
3228 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3231 @return: a string with the formatted timestamp
3234 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3238 return utils.FormatTime(sec, usecs=usecs)
3241 def ParseTimespec(value):
3242 """Parse a time specification.
3244 The following suffixed will be recognized:
3252 Without any suffix, the value will be taken to be in seconds.
3257 raise errors.OpPrereqError("Empty time specification passed",
3266 if value[-1] not in suffix_map:
3269 except (TypeError, ValueError):
3270 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3273 multiplier = suffix_map[value[-1]]
3275 if not value: # no data left after stripping the suffix
3276 raise errors.OpPrereqError("Invalid time specification (only"
3277 " suffix passed)", errors.ECODE_INVAL)
3279 value = int(value) * multiplier
3280 except (TypeError, ValueError):
3281 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3286 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3287 filter_master=False, nodegroup=None):
3288 """Returns the names of online nodes.
3290 This function will also log a warning on stderr with the names of
3293 @param nodes: if not empty, use only this subset of nodes (minus the
3295 @param cl: if not None, luxi client to use
3296 @type nowarn: boolean
3297 @param nowarn: by default, this function will output a note with the
3298 offline nodes that are skipped; if this parameter is True the
3299 note is not displayed
3300 @type secondary_ips: boolean
3301 @param secondary_ips: if True, return the secondary IPs instead of the
3302 names, useful for doing network traffic over the replication interface
3304 @type filter_master: boolean
3305 @param filter_master: if True, do not return the master node in the list
3306 (useful in coordination with secondary_ips where we cannot check our
3307 node name against the list)
3308 @type nodegroup: string
3309 @param nodegroup: If set, only return nodes in this node group
3318 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3320 if nodegroup is not None:
3321 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3322 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3325 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3328 if len(qfilter) > 1:
3329 final_filter = [qlang.OP_AND] + qfilter
3331 assert len(qfilter) == 1
3332 final_filter = qfilter[0]
3336 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3338 def _IsOffline(row):
3339 (_, (_, offline), _) = row
3343 ((_, name), _, _) = row
3347 (_, _, (_, sip)) = row
3350 (offline, online) = compat.partition(result.data, _IsOffline)
3352 if offline and not nowarn:
3353 ToStderr("Note: skipping offline node(s): %s" %
3354 utils.CommaJoin(map(_GetName, offline)))
3361 return map(fn, online)
3364 def _ToStream(stream, txt, *args):
3365 """Write a message to a stream, bypassing the logging system
3367 @type stream: file object
3368 @param stream: the file to which we should write
3370 @param txt: the message
3376 stream.write(txt % args)
3381 except IOError, err:
3382 if err.errno == errno.EPIPE:
3383 # our terminal went away, we'll exit
3384 sys.exit(constants.EXIT_FAILURE)
3389 def ToStdout(txt, *args):
3390 """Write a message to stdout only, bypassing the logging system
3392 This is just a wrapper over _ToStream.
3395 @param txt: the message
3398 _ToStream(sys.stdout, txt, *args)
3401 def ToStderr(txt, *args):
3402 """Write a message to stderr only, bypassing the logging system
3404 This is just a wrapper over _ToStream.
3407 @param txt: the message
3410 _ToStream(sys.stderr, txt, *args)
3413 class JobExecutor(object):
3414 """Class which manages the submission and execution of multiple jobs.
3416 Note that instances of this class should not be reused between
3420 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3425 self.verbose = verbose
3428 self.feedback_fn = feedback_fn
3429 self._counter = itertools.count()
3432 def _IfName(name, fmt):
3433 """Helper function for formatting name.
3441 def QueueJob(self, name, *ops):
3442 """Record a job for later submit.
3445 @param name: a description of the job, will be used in WaitJobSet
3448 SetGenericOpcodeOpts(ops, self.opts)
3449 self.queue.append((self._counter.next(), name, ops))
3451 def AddJobId(self, name, status, job_id):
3452 """Adds a job ID to the internal queue.
3455 self.jobs.append((self._counter.next(), status, job_id, name))
3457 def SubmitPending(self, each=False):
3458 """Submit all pending jobs.
3463 for (_, _, ops) in self.queue:
3464 # SubmitJob will remove the success status, but raise an exception if
3465 # the submission fails, so we'll notice that anyway.
3466 results.append([True, self.cl.SubmitJob(ops)[0]])
3468 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3469 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3470 self.jobs.append((idx, status, data, name))
3472 def _ChooseJob(self):
3473 """Choose a non-waiting/queued job to poll next.
3476 assert self.jobs, "_ChooseJob called with empty job list"
3478 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3482 for job_data, status in zip(self.jobs, result):
3483 if (isinstance(status, list) and status and
3484 status[0] in (constants.JOB_STATUS_QUEUED,
3485 constants.JOB_STATUS_WAITING,
3486 constants.JOB_STATUS_CANCELING)):
3487 # job is still present and waiting
3489 # good candidate found (either running job or lost job)
3490 self.jobs.remove(job_data)
3494 return self.jobs.pop(0)
3496 def GetResults(self):
3497 """Wait for and return the results of all jobs.
3500 @return: list of tuples (success, job results), in the same order
3501 as the submitted jobs; if a job has failed, instead of the result
3502 there will be the error message
3506 self.SubmitPending()
3509 ok_jobs = [row[2] for row in self.jobs if row[1]]
3511 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3513 # first, remove any non-submitted jobs
3514 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3515 for idx, _, jid, name in failures:
3516 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3517 results.append((idx, False, jid))
3520 (idx, _, jid, name) = self._ChooseJob()
3521 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3523 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3525 except errors.JobLost, err:
3526 _, job_result = FormatError(err)
3527 ToStderr("Job %s%s has been archived, cannot check its result",
3528 jid, self._IfName(name, " for %s"))
3530 except (errors.GenericError, luxi.ProtocolError), err:
3531 _, job_result = FormatError(err)
3533 # the error message will always be shown, verbose or not
3534 ToStderr("Job %s%s has failed: %s",
3535 jid, self._IfName(name, " for %s"), job_result)
3537 results.append((idx, success, job_result))
3539 # sort based on the index, then drop it
3541 results = [i[1:] for i in results]
3545 def WaitOrShow(self, wait):
3546 """Wait for job results or only print the job IDs.
3549 @param wait: whether to wait or not
3553 return self.GetResults()
3556 self.SubmitPending()
3557 for _, status, result, name in self.jobs:
3559 ToStdout("%s: %s", result, name)
3561 ToStderr("Failure for %s: %s", name, result)
3562 return [row[1:3] for row in self.jobs]
3565 def FormatParameterDict(buf, param_dict, actual, level=1):
3566 """Formats a parameter dictionary.
3568 @type buf: L{StringIO}
3569 @param buf: the buffer into which to write
3570 @type param_dict: dict
3571 @param param_dict: the own parameters
3573 @param actual: the current parameter set (including defaults)
3574 @param level: Level of indent
3577 indent = " " * level
3579 for key in sorted(actual):
3581 buf.write("%s- %s:" % (indent, key))
3583 if isinstance(data, dict) and data:
3585 FormatParameterDict(buf, param_dict.get(key, {}), data,
3588 val = param_dict.get(key, "default (%s)" % data)
3589 buf.write(" %s\n" % val)
3592 def ConfirmOperation(names, list_type, text, extra=""):
3593 """Ask the user to confirm an operation on a list of list_type.
3595 This function is used to request confirmation for doing an operation
3596 on a given list of list_type.
3599 @param names: the list of names that we display when
3600 we ask for confirmation
3601 @type list_type: str
3602 @param list_type: Human readable name for elements in the list (e.g. nodes)
3604 @param text: the operation that the user should confirm
3606 @return: True or False depending on user's confirmation.
3610 msg = ("The %s will operate on %d %s.\n%s"
3611 "Do you want to continue?" % (text, count, list_type, extra))
3612 affected = (("\nAffected %s:\n" % list_type) +
3613 "\n".join([" %s" % name for name in names]))
3615 choices = [("y", True, "Yes, execute the %s" % text),
3616 ("n", False, "No, abort the %s" % text)]
3619 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3622 question = msg + affected
3624 choice = AskUser(question, choices)
3627 choice = AskUser(msg + affected, choices)
3631 def _MaybeParseUnit(elements):
3632 """Parses and returns an array of potential values with units.
3636 for k, v in elements.items():
3637 if v == constants.VALUE_DEFAULT:
3640 parsed[k] = utils.ParseUnit(v)
3644 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3645 ispecs_cpu_count=None,
3646 ispecs_disk_count=None,
3647 ispecs_disk_size=None,
3648 ispecs_nic_count=None,
3649 ipolicy_disk_templates=None,
3650 ipolicy_vcpu_ratio=None,
3651 ipolicy_spindle_ratio=None,
3652 group_ipolicy=False,
3653 allowed_values=None,
3655 """Creation of instance policy based on command line options.
3657 @param fill_all: whether for cluster policies we should ensure that
3658 all values are filled
3664 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3665 if ispecs_disk_size:
3666 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3667 except (TypeError, ValueError, errors.UnitParseError), err:
3668 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3670 (ispecs_disk_size, ispecs_mem_size, err),
3673 # prepare ipolicy dict
3674 ipolicy_transposed = {
3675 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3676 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3677 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3678 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3679 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3682 # first, check that the values given are correct
3684 forced_type = TISPECS_GROUP_TYPES
3686 forced_type = TISPECS_CLUSTER_TYPES
3688 for specs in ipolicy_transposed.values():
3689 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3692 ipolicy_out = objects.MakeEmptyIPolicy()
3693 for name, specs in ipolicy_transposed.iteritems():
3694 assert name in constants.ISPECS_PARAMETERS
3695 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3696 ipolicy_out[key][name] = val
3698 # no filldict for non-dicts
3699 if not group_ipolicy and fill_all:
3700 if ipolicy_disk_templates is None:
3701 ipolicy_disk_templates = constants.DISK_TEMPLATES
3702 if ipolicy_vcpu_ratio is None:
3703 ipolicy_vcpu_ratio = \
3704 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3705 if ipolicy_spindle_ratio is None:
3706 ipolicy_spindle_ratio = \
3707 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3708 if ipolicy_disk_templates is not None:
3709 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3710 if ipolicy_vcpu_ratio is not None:
3711 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3712 if ipolicy_spindle_ratio is not None:
3713 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3715 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)