4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
68 "CLUSTER_DOMAIN_SECRET_OPT",
86 "FILESTORE_DRIVER_OPT",
92 "GLOBAL_SHARED_FILEDIR_OPT",
97 "DEFAULT_IALLOCATOR_OPT",
98 "IDENTIFY_DEFAULTS_OPT",
101 "IGNORE_FAILURES_OPT",
102 "IGNORE_OFFLINE_OPT",
103 "IGNORE_REMOVE_FAILURES_OPT",
104 "IGNORE_SECONDARIES_OPT",
108 "MAINTAIN_NODE_HEALTH_OPT",
110 "MASTER_NETMASK_OPT",
112 "MIGRATION_MODE_OPT",
114 "NEW_CLUSTER_CERT_OPT",
115 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116 "NEW_CONFD_HMAC_KEY_OPT",
119 "NEW_SPICE_CERT_OPT",
121 "NODE_FORCE_JOIN_OPT",
123 "NODE_PLACEMENT_OPT",
127 "NODRBD_STORAGE_OPT",
133 "NOMODIFY_ETCHOSTS_OPT",
134 "NOMODIFY_SSH_SETUP_OPT",
138 "NORUNTIME_CHGS_OPT",
141 "NOSSH_KEYCHECK_OPT",
155 "PREALLOC_WIPE_DISKS_OPT",
156 "PRIMARY_IP_VERSION_OPT",
162 "REMOVE_INSTANCE_OPT",
168 "SECONDARY_ONLY_OPT",
172 "SHUTDOWN_TIMEOUT_OPT",
174 "SPECS_CPU_COUNT_OPT",
175 "SPECS_DISK_COUNT_OPT",
176 "SPECS_DISK_SIZE_OPT",
177 "SPECS_MEM_SIZE_OPT",
178 "SPECS_NIC_COUNT_OPT",
179 "IPOLICY_DISK_TEMPLATES",
180 "IPOLICY_VCPU_RATIO",
186 "STARTUP_PAUSED_OPT",
195 "USE_EXTERNAL_MIP_SCRIPT",
203 "IGNORE_IPOLICY_OPT",
204 "INSTANCE_POLICY_OPTS",
205 # Generic functions for CLI programs
207 "CreateIPolicyFromOpts",
209 "GenericInstanceCreate",
215 "JobSubmittedException",
217 "RunWhileClusterStopped",
221 # Formatting functions
222 "ToStderr", "ToStdout",
225 "FormatParameterDict",
234 # command line options support infrastructure
235 "ARGS_MANY_INSTANCES",
254 "OPT_COMPL_INST_ADD_NODES",
255 "OPT_COMPL_MANY_NODES",
256 "OPT_COMPL_ONE_IALLOCATOR",
257 "OPT_COMPL_ONE_INSTANCE",
258 "OPT_COMPL_ONE_NODE",
259 "OPT_COMPL_ONE_NODEGROUP",
265 "COMMON_CREATE_OPTS",
271 #: Priorities (sorted)
273 ("low", constants.OP_PRIO_LOW),
274 ("normal", constants.OP_PRIO_NORMAL),
275 ("high", constants.OP_PRIO_HIGH),
278 #: Priority dictionary for easier lookup
279 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
280 # we migrate to Python 2.6
281 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
283 # Query result status for clients
286 QR_INCOMPLETE) = range(3)
288 #: Maximum batch size for ChooseJob
292 # constants used to create InstancePolicy dictionary
293 TISPECS_GROUP_TYPES = {
294 constants.ISPECS_MIN: constants.VTYPE_INT,
295 constants.ISPECS_MAX: constants.VTYPE_INT,
298 TISPECS_CLUSTER_TYPES = {
299 constants.ISPECS_MIN: constants.VTYPE_INT,
300 constants.ISPECS_MAX: constants.VTYPE_INT,
301 constants.ISPECS_STD: constants.VTYPE_INT,
306 def __init__(self, min=0, max=None): # pylint: disable=W0622
311 return ("<%s min=%s max=%s>" %
312 (self.__class__.__name__, self.min, self.max))
315 class ArgSuggest(_Argument):
316 """Suggesting argument.
318 Value can be any of the ones passed to the constructor.
321 # pylint: disable=W0622
322 def __init__(self, min=0, max=None, choices=None):
323 _Argument.__init__(self, min=min, max=max)
324 self.choices = choices
327 return ("<%s min=%s max=%s choices=%r>" %
328 (self.__class__.__name__, self.min, self.max, self.choices))
331 class ArgChoice(ArgSuggest):
334 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
335 but value must be one of the choices.
340 class ArgUnknown(_Argument):
341 """Unknown argument to program (e.g. determined at runtime).
346 class ArgInstance(_Argument):
347 """Instances argument.
352 class ArgNode(_Argument):
358 class ArgGroup(_Argument):
359 """Node group argument.
364 class ArgJobId(_Argument):
370 class ArgFile(_Argument):
371 """File path argument.
376 class ArgCommand(_Argument):
382 class ArgHost(_Argument):
388 class ArgOs(_Argument):
395 ARGS_MANY_INSTANCES = [ArgInstance()]
396 ARGS_MANY_NODES = [ArgNode()]
397 ARGS_MANY_GROUPS = [ArgGroup()]
398 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
399 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
401 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
402 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
405 def _ExtractTagsObject(opts, args):
406 """Extract the tag type object.
408 Note that this function will modify its args parameter.
411 if not hasattr(opts, "tag_type"):
412 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
414 if kind == constants.TAG_CLUSTER:
416 elif kind in (constants.TAG_NODEGROUP,
418 constants.TAG_INSTANCE):
420 raise errors.OpPrereqError("no arguments passed to the command",
425 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
429 def _ExtendTags(opts, args):
430 """Extend the args if a source file has been given.
432 This function will extend the tags with the contents of the file
433 passed in the 'tags_source' attribute of the opts parameter. A file
434 named '-' will be replaced by stdin.
437 fname = opts.tags_source
443 new_fh = open(fname, "r")
446 # we don't use the nice 'new_data = [line.strip() for line in fh]'
447 # because of python bug 1633941
449 line = new_fh.readline()
452 new_data.append(line.strip())
455 args.extend(new_data)
458 def ListTags(opts, args):
459 """List the tags on a given object.
461 This is a generic implementation that knows how to deal with all
462 three cases of tag objects (cluster, node, instance). The opts
463 argument is expected to contain a tag_type field denoting what
464 object type we work on.
467 kind, name = _ExtractTagsObject(opts, args)
468 cl = GetClient(query=True)
469 result = cl.QueryTags(kind, name)
470 result = list(result)
476 def AddTags(opts, args):
477 """Add tags on a given object.
479 This is a generic implementation that knows how to deal with all
480 three cases of tag objects (cluster, node, instance). The opts
481 argument is expected to contain a tag_type field denoting what
482 object type we work on.
485 kind, name = _ExtractTagsObject(opts, args)
486 _ExtendTags(opts, args)
488 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
489 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
490 SubmitOrSend(op, opts)
493 def RemoveTags(opts, args):
494 """Remove tags from a given object.
496 This is a generic implementation that knows how to deal with all
497 three cases of tag objects (cluster, node, instance). The opts
498 argument is expected to contain a tag_type field denoting what
499 object type we work on.
502 kind, name = _ExtractTagsObject(opts, args)
503 _ExtendTags(opts, args)
505 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
506 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
507 SubmitOrSend(op, opts)
510 def check_unit(option, opt, value): # pylint: disable=W0613
511 """OptParsers custom converter for units.
515 return utils.ParseUnit(value)
516 except errors.UnitParseError, err:
517 raise OptionValueError("option %s: %s" % (opt, err))
520 def _SplitKeyVal(opt, data):
521 """Convert a KeyVal string into a dict.
523 This function will convert a key=val[,...] string into a dict. Empty
524 values will be converted specially: keys which have the prefix 'no_'
525 will have the value=False and the prefix stripped, the others will
529 @param opt: a string holding the option name for which we process the
530 data, used in building error messages
532 @param data: a string of the format key=val,key=val,...
534 @return: {key=val, key=val}
535 @raises errors.ParameterError: if there are duplicate keys
540 for elem in utils.UnescapeAndSplit(data, sep=","):
542 key, val = elem.split("=", 1)
544 if elem.startswith(NO_PREFIX):
545 key, val = elem[len(NO_PREFIX):], False
546 elif elem.startswith(UN_PREFIX):
547 key, val = elem[len(UN_PREFIX):], None
549 key, val = elem, True
551 raise errors.ParameterError("Duplicate key '%s' in option %s" %
557 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
558 """Custom parser for ident:key=val,key=val options.
560 This will store the parsed values as a tuple (ident, {key: val}). As such,
561 multiple uses of this option via action=append is possible.
565 ident, rest = value, ""
567 ident, rest = value.split(":", 1)
569 if ident.startswith(NO_PREFIX):
571 msg = "Cannot pass options when removing parameter groups: %s" % value
572 raise errors.ParameterError(msg)
573 retval = (ident[len(NO_PREFIX):], False)
574 elif (ident.startswith(UN_PREFIX) and
575 (len(ident) <= len(UN_PREFIX) or
576 not ident[len(UN_PREFIX)][0].isdigit())):
578 msg = "Cannot pass options when removing parameter groups: %s" % value
579 raise errors.ParameterError(msg)
580 retval = (ident[len(UN_PREFIX):], None)
582 kv_dict = _SplitKeyVal(opt, rest)
583 retval = (ident, kv_dict)
587 def check_key_val(option, opt, value): # pylint: disable=W0613
588 """Custom parser class for key=val,key=val options.
590 This will store the parsed values as a dict {key: val}.
593 return _SplitKeyVal(opt, value)
596 def check_bool(option, opt, value): # pylint: disable=W0613
597 """Custom parser for yes/no options.
599 This will store the parsed value as either True or False.
602 value = value.lower()
603 if value == constants.VALUE_FALSE or value == "no":
605 elif value == constants.VALUE_TRUE or value == "yes":
608 raise errors.ParameterError("Invalid boolean value '%s'" % value)
611 def check_list(option, opt, value): # pylint: disable=W0613
612 """Custom parser for comma-separated lists.
615 # we have to make this explicit check since "".split(",") is [""],
616 # not an empty list :(
620 return utils.UnescapeAndSplit(value)
623 def check_maybefloat(option, opt, value): # pylint: disable=W0613
624 """Custom parser for float numbers which might be also defaults.
627 value = value.lower()
629 if value == constants.VALUE_DEFAULT:
635 # completion_suggestion is normally a list. Using numeric values not evaluating
636 # to False for dynamic completion.
637 (OPT_COMPL_MANY_NODES,
639 OPT_COMPL_ONE_INSTANCE,
641 OPT_COMPL_ONE_IALLOCATOR,
642 OPT_COMPL_INST_ADD_NODES,
643 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
645 OPT_COMPL_ALL = frozenset([
646 OPT_COMPL_MANY_NODES,
648 OPT_COMPL_ONE_INSTANCE,
650 OPT_COMPL_ONE_IALLOCATOR,
651 OPT_COMPL_INST_ADD_NODES,
652 OPT_COMPL_ONE_NODEGROUP,
656 class CliOption(Option):
657 """Custom option class for optparse.
660 ATTRS = Option.ATTRS + [
661 "completion_suggest",
663 TYPES = Option.TYPES + (
671 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
672 TYPE_CHECKER["identkeyval"] = check_ident_key_val
673 TYPE_CHECKER["keyval"] = check_key_val
674 TYPE_CHECKER["unit"] = check_unit
675 TYPE_CHECKER["bool"] = check_bool
676 TYPE_CHECKER["list"] = check_list
677 TYPE_CHECKER["maybefloat"] = check_maybefloat
680 # optparse.py sets make_option, so we do it for our own option class, too
681 cli_option = CliOption
686 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
687 help="Increase debugging level")
689 NOHDR_OPT = cli_option("--no-headers", default=False,
690 action="store_true", dest="no_headers",
691 help="Don't display column headers")
693 SEP_OPT = cli_option("--separator", default=None,
694 action="store", dest="separator",
695 help=("Separator between output fields"
696 " (defaults to one space)"))
698 USEUNITS_OPT = cli_option("--units", default=None,
699 dest="units", choices=("h", "m", "g", "t"),
700 help="Specify units for output (one of h/m/g/t)")
702 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
703 type="string", metavar="FIELDS",
704 help="Comma separated list of output fields")
706 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
707 default=False, help="Force the operation")
709 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
710 default=False, help="Do not require confirmation")
712 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
713 action="store_true", default=False,
714 help=("Ignore offline nodes and do as much"
717 TAG_ADD_OPT = cli_option("--tags", dest="tags",
718 default=None, help="Comma-separated list of instance"
721 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
722 default=None, help="File with tag names")
724 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
725 default=False, action="store_true",
726 help=("Submit the job and return the job ID, but"
727 " don't wait for the job to finish"))
729 SYNC_OPT = cli_option("--sync", dest="do_locking",
730 default=False, action="store_true",
731 help=("Grab locks while doing the queries"
732 " in order to ensure more consistent results"))
734 DRY_RUN_OPT = cli_option("--dry-run", default=False,
736 help=("Do not execute the operation, just run the"
737 " check steps and verify if it could be"
740 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
742 help="Increase the verbosity of the operation")
744 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
745 action="store_true", dest="simulate_errors",
746 help="Debugging option that makes the operation"
747 " treat most runtime checks as failed")
749 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
750 default=True, action="store_false",
751 help="Don't wait for sync (DANGEROUS!)")
753 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
754 default=False, action="store_true",
755 help="Wait for disks to sync")
757 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
758 action="store_true", default=False,
759 help="Enable offline instance")
761 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
762 action="store_true", default=False,
763 help="Disable down instance")
765 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
766 help=("Custom disk setup (%s)" %
767 utils.CommaJoin(constants.DISK_TEMPLATES)),
768 default=None, metavar="TEMPL",
769 choices=list(constants.DISK_TEMPLATES))
771 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
772 help="Do not create any network cards for"
775 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
776 help="Relative path under default cluster-wide"
777 " file storage dir to store file-based disks",
778 default=None, metavar="<DIR>")
780 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
781 help="Driver to use for image files",
782 default="loop", metavar="<DRIVER>",
783 choices=list(constants.FILE_DRIVER))
785 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
786 help="Select nodes for the instance automatically"
787 " using the <NAME> iallocator plugin",
788 default=None, type="string",
789 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
791 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
793 help="Set the default instance"
795 default=None, type="string",
796 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
798 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
800 completion_suggest=OPT_COMPL_ONE_OS)
802 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
803 type="keyval", default={},
804 help="OS parameters")
806 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
807 action="store_true", default=False,
808 help="Force an unknown variant")
810 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
811 action="store_true", default=False,
812 help="Do not install the OS (will"
815 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
816 dest="allow_runtime_chgs",
817 default=True, action="store_false",
818 help="Don't allow runtime changes")
820 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
821 type="keyval", default={},
822 help="Backend parameters")
824 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
825 default={}, dest="hvparams",
826 help="Hypervisor parameters")
828 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
829 help="Disk template parameters, in the format"
830 " template:option=value,option=value,...",
831 type="identkeyval", action="append", default=[])
833 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
834 type="keyval", default={},
835 help="Memory size specs: list of key=value,"
836 " where key is one of min, max, std"
837 " (in MB or using a unit)")
839 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
840 type="keyval", default={},
841 help="CPU count specs: list of key=value,"
842 " where key is one of min, max, std")
844 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
845 dest="ispecs_disk_count",
846 type="keyval", default={},
847 help="Disk count specs: list of key=value,"
848 " where key is one of min, max, std")
850 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
851 type="keyval", default={},
852 help="Disk size specs: list of key=value,"
853 " where key is one of min, max, std"
854 " (in MB or using a unit)")
856 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
857 type="keyval", default={},
858 help="NIC count specs: list of key=value,"
859 " where key is one of min, max, std")
861 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
862 dest="ipolicy_disk_templates",
863 type="list", default=None,
864 help="Comma-separated list of"
865 " enabled disk templates")
867 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
868 dest="ipolicy_vcpu_ratio",
869 type="maybefloat", default=None,
870 help="The maximum allowed vcpu-to-cpu ratio")
872 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
873 dest="ipolicy_spindle_ratio",
874 type="maybefloat", default=None,
875 help=("The maximum allowed instances to"
878 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
879 help="Hypervisor and hypervisor options, in the"
880 " format hypervisor:option=value,option=value,...",
881 default=None, type="identkeyval")
883 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
884 help="Hypervisor and hypervisor options, in the"
885 " format hypervisor:option=value,option=value,...",
886 default=[], action="append", type="identkeyval")
888 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
889 action="store_false",
890 help="Don't check that the instance's IP"
893 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
894 default=True, action="store_false",
895 help="Don't check that the instance's name"
898 NET_OPT = cli_option("--net",
899 help="NIC parameters", default=[],
900 dest="nics", action="append", type="identkeyval")
902 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
903 dest="disks", action="append", type="identkeyval")
905 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
906 help="Comma-separated list of disks"
907 " indices to act on (e.g. 0,2) (optional,"
908 " defaults to all disks)")
910 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
911 help="Enforces a single-disk configuration using the"
912 " given disk size, in MiB unless a suffix is used",
913 default=None, type="unit", metavar="<size>")
915 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
916 dest="ignore_consistency",
917 action="store_true", default=False,
918 help="Ignore the consistency of the disks on"
921 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
922 dest="allow_failover",
923 action="store_true", default=False,
924 help="If migration is not possible fallback to"
927 NONLIVE_OPT = cli_option("--non-live", dest="live",
928 default=True, action="store_false",
929 help="Do a non-live migration (this usually means"
930 " freeze the instance, save the state, transfer and"
931 " only then resume running on the secondary node)")
933 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
935 choices=list(constants.HT_MIGRATION_MODES),
936 help="Override default migration mode (choose"
937 " either live or non-live")
939 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
940 help="Target node and optional secondary node",
941 metavar="<pnode>[:<snode>]",
942 completion_suggest=OPT_COMPL_INST_ADD_NODES)
944 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
945 action="append", metavar="<node>",
946 help="Use only this node (can be used multiple"
947 " times, if not given defaults to all nodes)",
948 completion_suggest=OPT_COMPL_ONE_NODE)
950 NODEGROUP_OPT_NAME = "--node-group"
951 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
953 help="Node group (name or uuid)",
954 metavar="<nodegroup>",
955 default=None, type="string",
956 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
958 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
960 completion_suggest=OPT_COMPL_ONE_NODE)
962 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
963 action="store_false",
964 help="Don't start the instance after creation")
966 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
967 action="store_true", default=False,
968 help="Show command instead of executing it")
970 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
971 default=False, action="store_true",
972 help="Instead of performing the migration, try to"
973 " recover from a failed cleanup. This is safe"
974 " to run even if the instance is healthy, but it"
975 " will create extra replication traffic and "
976 " disrupt briefly the replication (like during the"
979 STATIC_OPT = cli_option("-s", "--static", dest="static",
980 action="store_true", default=False,
981 help="Only show configuration data, not runtime data")
983 ALL_OPT = cli_option("--all", dest="show_all",
984 default=False, action="store_true",
985 help="Show info on all instances on the cluster."
986 " This can take a long time to run, use wisely")
988 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
989 action="store_true", default=False,
990 help="Interactive OS reinstall, lists available"
991 " OS templates for selection")
993 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
994 action="store_true", default=False,
995 help="Remove the instance from the cluster"
996 " configuration even if there are failures"
997 " during the removal process")
999 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1000 dest="ignore_remove_failures",
1001 action="store_true", default=False,
1002 help="Remove the instance from the"
1003 " cluster configuration even if there"
1004 " are failures during the removal"
1007 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1008 action="store_true", default=False,
1009 help="Remove the instance from the cluster")
1011 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1012 help="Specifies the new node for the instance",
1013 metavar="NODE", default=None,
1014 completion_suggest=OPT_COMPL_ONE_NODE)
1016 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1017 help="Specifies the new secondary node",
1018 metavar="NODE", default=None,
1019 completion_suggest=OPT_COMPL_ONE_NODE)
1021 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1022 default=False, action="store_true",
1023 help="Replace the disk(s) on the primary"
1024 " node (applies only to internally mirrored"
1025 " disk templates, e.g. %s)" %
1026 utils.CommaJoin(constants.DTS_INT_MIRROR))
1028 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1029 default=False, action="store_true",
1030 help="Replace the disk(s) on the secondary"
1031 " node (applies only to internally mirrored"
1032 " disk templates, e.g. %s)" %
1033 utils.CommaJoin(constants.DTS_INT_MIRROR))
1035 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1036 default=False, action="store_true",
1037 help="Lock all nodes and auto-promote as needed"
1040 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1041 default=False, action="store_true",
1042 help="Automatically replace faulty disks"
1043 " (applies only to internally mirrored"
1044 " disk templates, e.g. %s)" %
1045 utils.CommaJoin(constants.DTS_INT_MIRROR))
1047 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1048 default=False, action="store_true",
1049 help="Ignore current recorded size"
1050 " (useful for forcing activation when"
1051 " the recorded size is wrong)")
1053 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1055 completion_suggest=OPT_COMPL_ONE_NODE)
1057 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1060 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1061 help="Specify the secondary ip for the node",
1062 metavar="ADDRESS", default=None)
1064 READD_OPT = cli_option("--readd", dest="readd",
1065 default=False, action="store_true",
1066 help="Readd old node after replacing it")
1068 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1069 default=True, action="store_false",
1070 help="Disable SSH key fingerprint checking")
1072 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1073 default=False, action="store_true",
1074 help="Force the joining of a node")
1076 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1077 type="bool", default=None, metavar=_YORNO,
1078 help="Set the master_candidate flag on the node")
1080 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1081 type="bool", default=None,
1082 help=("Set the offline flag on the node"
1083 " (cluster does not communicate with offline"
1086 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1087 type="bool", default=None,
1088 help=("Set the drained flag on the node"
1089 " (excluded from allocation operations)"))
1091 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1092 type="bool", default=None, metavar=_YORNO,
1093 help="Set the master_capable flag on the node")
1095 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1096 type="bool", default=None, metavar=_YORNO,
1097 help="Set the vm_capable flag on the node")
1099 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1100 type="bool", default=None, metavar=_YORNO,
1101 help="Set the allocatable flag on a volume")
1103 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1104 help="Disable support for lvm based instances"
1106 action="store_false", default=True)
1108 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1109 dest="enabled_hypervisors",
1110 help="Comma-separated list of hypervisors",
1111 type="string", default=None)
1113 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1114 type="keyval", default={},
1115 help="NIC parameters")
1117 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1118 dest="candidate_pool_size", type="int",
1119 help="Set the candidate pool size")
1121 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1122 help=("Enables LVM and specifies the volume group"
1123 " name (cluster-wide) for disk allocation"
1124 " [%s]" % constants.DEFAULT_VG),
1125 metavar="VG", default=None)
1127 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1128 help="Destroy cluster", action="store_true")
1130 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1131 help="Skip node agreement check (dangerous)",
1132 action="store_true", default=False)
1134 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1135 help="Specify the mac prefix for the instance IP"
1136 " addresses, in the format XX:XX:XX",
1140 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1141 help="Specify the node interface (cluster-wide)"
1142 " on which the master IP address will be added"
1143 " (cluster init default: %s)" %
1144 constants.DEFAULT_BRIDGE,
1148 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1149 help="Specify the netmask of the master IP",
1153 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1154 dest="use_external_mip_script",
1155 help="Specify whether to run a"
1156 " user-provided script for the master"
1157 " IP address turnup and"
1158 " turndown operations",
1159 type="bool", metavar=_YORNO, default=None)
1161 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1162 help="Specify the default directory (cluster-"
1163 "wide) for storing the file-based disks [%s]" %
1164 pathutils.DEFAULT_FILE_STORAGE_DIR,
1166 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1168 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1169 "--shared-file-storage-dir",
1170 dest="shared_file_storage_dir",
1171 help="Specify the default directory (cluster-wide) for storing the"
1172 " shared file-based disks [%s]" %
1173 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1174 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1176 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1177 help="Don't modify %s" % pathutils.ETC_HOSTS,
1178 action="store_false", default=True)
1180 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1181 help="Don't initialize SSH keys",
1182 action="store_false", default=True)
1184 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1185 help="Enable parseable error messages",
1186 action="store_true", default=False)
1188 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1189 help="Skip N+1 memory redundancy tests",
1190 action="store_true", default=False)
1192 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1193 help="Type of reboot: soft/hard/full",
1194 default=constants.INSTANCE_REBOOT_HARD,
1196 choices=list(constants.REBOOT_TYPES))
1198 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1199 dest="ignore_secondaries",
1200 default=False, action="store_true",
1201 help="Ignore errors from secondaries")
1203 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1204 action="store_false", default=True,
1205 help="Don't shutdown the instance (unsafe)")
1207 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1208 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1209 help="Maximum time to wait")
1211 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1212 dest="shutdown_timeout", type="int",
1213 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1214 help="Maximum time to wait for instance"
1217 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1219 help=("Number of seconds between repetions of the"
1222 EARLY_RELEASE_OPT = cli_option("--early-release",
1223 dest="early_release", default=False,
1224 action="store_true",
1225 help="Release the locks on the secondary"
1228 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1229 dest="new_cluster_cert",
1230 default=False, action="store_true",
1231 help="Generate a new cluster certificate")
1233 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1235 help="File containing new RAPI certificate")
1237 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1238 default=None, action="store_true",
1239 help=("Generate a new self-signed RAPI"
1242 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1244 help="File containing new SPICE certificate")
1246 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1248 help="File containing the certificate of the CA"
1249 " which signed the SPICE certificate")
1251 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1252 dest="new_spice_cert", default=None,
1253 action="store_true",
1254 help=("Generate a new self-signed SPICE"
1257 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1258 dest="new_confd_hmac_key",
1259 default=False, action="store_true",
1260 help=("Create a new HMAC key for %s" %
1263 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1264 dest="cluster_domain_secret",
1266 help=("Load new new cluster domain"
1267 " secret from file"))
1269 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1270 dest="new_cluster_domain_secret",
1271 default=False, action="store_true",
1272 help=("Create a new cluster domain"
1275 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1276 dest="use_replication_network",
1277 help="Whether to use the replication network"
1278 " for talking to the nodes",
1279 action="store_true", default=False)
1281 MAINTAIN_NODE_HEALTH_OPT = \
1282 cli_option("--maintain-node-health", dest="maintain_node_health",
1283 metavar=_YORNO, default=None, type="bool",
1284 help="Configure the cluster to automatically maintain node"
1285 " health, by shutting down unknown instances, shutting down"
1286 " unknown DRBD devices, etc.")
1288 IDENTIFY_DEFAULTS_OPT = \
1289 cli_option("--identify-defaults", dest="identify_defaults",
1290 default=False, action="store_true",
1291 help="Identify which saved instance parameters are equal to"
1292 " the current cluster defaults and set them as such, instead"
1293 " of marking them as overridden")
1295 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1296 action="store", dest="uid_pool",
1297 help=("A list of user-ids or user-id"
1298 " ranges separated by commas"))
1300 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1301 action="store", dest="add_uids",
1302 help=("A list of user-ids or user-id"
1303 " ranges separated by commas, to be"
1304 " added to the user-id pool"))
1306 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1307 action="store", dest="remove_uids",
1308 help=("A list of user-ids or user-id"
1309 " ranges separated by commas, to be"
1310 " removed from the user-id pool"))
1312 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1313 action="store", dest="reserved_lvs",
1314 help=("A comma-separated list of reserved"
1315 " logical volumes names, that will be"
1316 " ignored by cluster verify"))
1318 ROMAN_OPT = cli_option("--roman",
1319 dest="roman_integers", default=False,
1320 action="store_true",
1321 help="Use roman numbers for positive integers")
1323 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1324 action="store", default=None,
1325 help="Specifies usermode helper for DRBD")
1327 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1328 action="store_false", default=True,
1329 help="Disable support for DRBD")
1331 PRIMARY_IP_VERSION_OPT = \
1332 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1333 action="store", dest="primary_ip_version",
1334 metavar="%d|%d" % (constants.IP4_VERSION,
1335 constants.IP6_VERSION),
1336 help="Cluster-wide IP version for primary IP")
1339 def _PriorityOptionCb(option, _, value, parser):
1340 """Callback for processing C{--priority} option.
1343 value = _PRIONAME_TO_VALUE[value]
1345 setattr(parser.values, option.dest, value)
1348 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1349 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1350 choices=_PRIONAME_TO_VALUE.keys(),
1351 action="callback", type="choice",
1352 callback=_PriorityOptionCb,
1353 help="Priority for opcode processing")
1355 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1356 type="bool", default=None, metavar=_YORNO,
1357 help="Sets the hidden flag on the OS")
1359 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1360 type="bool", default=None, metavar=_YORNO,
1361 help="Sets the blacklisted flag on the OS")
1363 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1364 type="bool", metavar=_YORNO,
1365 dest="prealloc_wipe_disks",
1366 help=("Wipe disks prior to instance"
1369 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1370 type="keyval", default=None,
1371 help="Node parameters")
1373 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1374 action="store", metavar="POLICY", default=None,
1375 help="Allocation policy for the node group")
1377 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1378 type="bool", metavar=_YORNO,
1379 dest="node_powered",
1380 help="Specify if the SoR for node is powered")
1382 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1383 default=constants.OOB_TIMEOUT,
1384 help="Maximum time to wait for out-of-band helper")
1386 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1387 default=constants.OOB_POWER_DELAY,
1388 help="Time in seconds to wait between power-ons")
1390 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1391 action="store_true", default=False,
1392 help=("Whether command argument should be treated"
1395 NO_REMEMBER_OPT = cli_option("--no-remember",
1397 action="store_true", default=False,
1398 help="Perform but do not record the change"
1399 " in the configuration")
1401 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1402 default=False, action="store_true",
1403 help="Evacuate primary instances only")
1405 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1406 default=False, action="store_true",
1407 help="Evacuate secondary instances only"
1408 " (applies only to internally mirrored"
1409 " disk templates, e.g. %s)" %
1410 utils.CommaJoin(constants.DTS_INT_MIRROR))
1412 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1413 action="store_true", default=False,
1414 help="Pause instance at startup")
1416 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1417 help="Destination node group (name or uuid)",
1418 default=None, action="append",
1419 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1421 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1422 action="append", dest="ignore_errors",
1423 choices=list(constants.CV_ALL_ECODES_STRINGS),
1424 help="Error code to be ignored")
1426 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1428 help=("Specify disk state information in the"
1430 " storage_type/identifier:option=value,...;"
1431 " note this is unused for now"),
1434 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1436 help=("Specify hypervisor state information in the"
1437 " format hypervisor:option=value,...;"
1438 " note this is unused for now"),
1441 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1442 action="store_true", default=False,
1443 help="Ignore instance policy violations")
1445 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1446 help="Sets the instance's runtime memory,"
1447 " ballooning it up or down to the new value",
1448 default=None, type="unit", metavar="<size>")
1450 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1451 action="store_true", default=False,
1452 help="Marks the grow as absolute instead of the"
1453 " (default) relative mode")
1455 #: Options provided by all commands
1456 COMMON_OPTS = [DEBUG_OPT]
1458 # common options for creating instances. add and import then add their own
1460 COMMON_CREATE_OPTS = [
1465 FILESTORE_DRIVER_OPT,
1482 # common instance policy options
1483 INSTANCE_POLICY_OPTS = [
1484 SPECS_CPU_COUNT_OPT,
1485 SPECS_DISK_COUNT_OPT,
1486 SPECS_DISK_SIZE_OPT,
1488 SPECS_NIC_COUNT_OPT,
1489 IPOLICY_DISK_TEMPLATES,
1491 IPOLICY_SPINDLE_RATIO,
1495 class _ShowUsage(Exception):
1496 """Exception class for L{_ParseArgs}.
1499 def __init__(self, exit_error):
1500 """Initializes instances of this class.
1502 @type exit_error: bool
1503 @param exit_error: Whether to report failure on exit
1506 Exception.__init__(self)
1507 self.exit_error = exit_error
1510 class _ShowVersion(Exception):
1511 """Exception class for L{_ParseArgs}.
1516 def _ParseArgs(binary, argv, commands, aliases, env_override):
1517 """Parser for the command line arguments.
1519 This function parses the arguments and returns the function which
1520 must be executed together with its (modified) arguments.
1522 @param binary: Script name
1523 @param argv: Command line arguments
1524 @param commands: Dictionary containing command definitions
1525 @param aliases: dictionary with command aliases {"alias": "target", ...}
1526 @param env_override: list of env variables allowed for default args
1527 @raise _ShowUsage: If usage description should be shown
1528 @raise _ShowVersion: If version should be shown
1531 assert not (env_override - set(commands))
1532 assert not (set(aliases.keys()) & set(commands.keys()))
1537 # No option or command given
1538 raise _ShowUsage(exit_error=True)
1540 if cmd == "--version":
1541 raise _ShowVersion()
1542 elif cmd == "--help":
1543 raise _ShowUsage(exit_error=False)
1544 elif not (cmd in commands or cmd in aliases):
1545 raise _ShowUsage(exit_error=True)
1547 # get command, unalias it, and look it up in commands
1549 if aliases[cmd] not in commands:
1550 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1551 " command '%s'" % (cmd, aliases[cmd]))
1555 if cmd in env_override:
1556 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1557 env_args = os.environ.get(args_env_name)
1559 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1561 func, args_def, parser_opts, usage, description = commands[cmd]
1562 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1563 description=description,
1564 formatter=TitledHelpFormatter(),
1565 usage="%%prog %s %s" % (cmd, usage))
1566 parser.disable_interspersed_args()
1567 options, args = parser.parse_args(args=argv[2:])
1569 if not _CheckArguments(cmd, args_def, args):
1570 return None, None, None
1572 return func, options, args
1575 def _FormatUsage(binary, commands):
1576 """Generates a nice description of all commands.
1578 @param binary: Script name
1579 @param commands: Dictionary containing command definitions
1582 # compute the max line length for cmd + usage
1583 mlen = min(60, max(map(len, commands)))
1585 yield "Usage: %s {command} [options...] [argument...]" % binary
1586 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1590 # and format a nice command list
1591 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1592 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1593 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1594 for line in help_lines:
1595 yield " %-*s %s" % (mlen, "", line)
1600 def _CheckArguments(cmd, args_def, args):
1601 """Verifies the arguments using the argument definition.
1605 1. Abort with error if values specified by user but none expected.
1607 1. For each argument in definition
1609 1. Keep running count of minimum number of values (min_count)
1610 1. Keep running count of maximum number of values (max_count)
1611 1. If it has an unlimited number of values
1613 1. Abort with error if it's not the last argument in the definition
1615 1. If last argument has limited number of values
1617 1. Abort with error if number of values doesn't match or is too large
1619 1. Abort with error if user didn't pass enough values (min_count)
1622 if args and not args_def:
1623 ToStderr("Error: Command %s expects no arguments", cmd)
1630 last_idx = len(args_def) - 1
1632 for idx, arg in enumerate(args_def):
1633 if min_count is None:
1635 elif arg.min is not None:
1636 min_count += arg.min
1638 if max_count is None:
1640 elif arg.max is not None:
1641 max_count += arg.max
1644 check_max = (arg.max is not None)
1646 elif arg.max is None:
1647 raise errors.ProgrammerError("Only the last argument can have max=None")
1650 # Command with exact number of arguments
1651 if (min_count is not None and max_count is not None and
1652 min_count == max_count and len(args) != min_count):
1653 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1656 # Command with limited number of arguments
1657 if max_count is not None and len(args) > max_count:
1658 ToStderr("Error: Command %s expects only %d argument(s)",
1662 # Command with some required arguments
1663 if min_count is not None and len(args) < min_count:
1664 ToStderr("Error: Command %s expects at least %d argument(s)",
1671 def SplitNodeOption(value):
1672 """Splits the value of a --node option.
1675 if value and ":" in value:
1676 return value.split(":", 1)
1678 return (value, None)
1681 def CalculateOSNames(os_name, os_variants):
1682 """Calculates all the names an OS can be called, according to its variants.
1684 @type os_name: string
1685 @param os_name: base name of the os
1686 @type os_variants: list or None
1687 @param os_variants: list of supported variants
1689 @return: list of valid names
1693 return ["%s+%s" % (os_name, v) for v in os_variants]
1698 def ParseFields(selected, default):
1699 """Parses the values of "--field"-like options.
1701 @type selected: string or None
1702 @param selected: User-selected options
1704 @param default: Default fields
1707 if selected is None:
1710 if selected.startswith("+"):
1711 return default + selected[1:].split(",")
1713 return selected.split(",")
1716 UsesRPC = rpc.RunWithRPC
1719 def AskUser(text, choices=None):
1720 """Ask the user a question.
1722 @param text: the question to ask
1724 @param choices: list with elements tuples (input_char, return_value,
1725 description); if not given, it will default to: [('y', True,
1726 'Perform the operation'), ('n', False, 'Do no do the operation')];
1727 note that the '?' char is reserved for help
1729 @return: one of the return values from the choices list; if input is
1730 not possible (i.e. not running with a tty, we return the last
1735 choices = [("y", True, "Perform the operation"),
1736 ("n", False, "Do not perform the operation")]
1737 if not choices or not isinstance(choices, list):
1738 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1739 for entry in choices:
1740 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1741 raise errors.ProgrammerError("Invalid choices element to AskUser")
1743 answer = choices[-1][1]
1745 for line in text.splitlines():
1746 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1747 text = "\n".join(new_text)
1749 f = file("/dev/tty", "a+")
1753 chars = [entry[0] for entry in choices]
1754 chars[-1] = "[%s]" % chars[-1]
1756 maps = dict([(entry[0], entry[1]) for entry in choices])
1760 f.write("/".join(chars))
1762 line = f.readline(2).strip().lower()
1767 for entry in choices:
1768 f.write(" %s - %s\n" % (entry[0], entry[2]))
1776 class JobSubmittedException(Exception):
1777 """Job was submitted, client should exit.
1779 This exception has one argument, the ID of the job that was
1780 submitted. The handler should print this ID.
1782 This is not an error, just a structured way to exit from clients.
1787 def SendJob(ops, cl=None):
1788 """Function to submit an opcode without waiting for the results.
1791 @param ops: list of opcodes
1792 @type cl: luxi.Client
1793 @param cl: the luxi client to use for communicating with the master;
1794 if None, a new client will be created
1800 job_id = cl.SubmitJob(ops)
1805 def GenericPollJob(job_id, cbs, report_cbs):
1806 """Generic job-polling function.
1808 @type job_id: number
1809 @param job_id: Job ID
1810 @type cbs: Instance of L{JobPollCbBase}
1811 @param cbs: Data callbacks
1812 @type report_cbs: Instance of L{JobPollReportCbBase}
1813 @param report_cbs: Reporting callbacks
1816 prev_job_info = None
1817 prev_logmsg_serial = None
1822 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1825 # job not found, go away!
1826 raise errors.JobLost("Job with id %s lost" % job_id)
1828 if result == constants.JOB_NOTCHANGED:
1829 report_cbs.ReportNotChanged(job_id, status)
1834 # Split result, a tuple of (field values, log entries)
1835 (job_info, log_entries) = result
1836 (status, ) = job_info
1839 for log_entry in log_entries:
1840 (serial, timestamp, log_type, message) = log_entry
1841 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1843 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1845 # TODO: Handle canceled and archived jobs
1846 elif status in (constants.JOB_STATUS_SUCCESS,
1847 constants.JOB_STATUS_ERROR,
1848 constants.JOB_STATUS_CANCELING,
1849 constants.JOB_STATUS_CANCELED):
1852 prev_job_info = job_info
1854 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1856 raise errors.JobLost("Job with id %s lost" % job_id)
1858 status, opstatus, result = jobs[0]
1860 if status == constants.JOB_STATUS_SUCCESS:
1863 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1864 raise errors.OpExecError("Job was canceled")
1867 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1868 if status == constants.OP_STATUS_SUCCESS:
1870 elif status == constants.OP_STATUS_ERROR:
1871 errors.MaybeRaise(msg)
1874 raise errors.OpExecError("partial failure (opcode %d): %s" %
1877 raise errors.OpExecError(str(msg))
1879 # default failure mode
1880 raise errors.OpExecError(result)
1883 class JobPollCbBase:
1884 """Base class for L{GenericPollJob} callbacks.
1888 """Initializes this class.
1892 def WaitForJobChangeOnce(self, job_id, fields,
1893 prev_job_info, prev_log_serial):
1894 """Waits for changes on a job.
1897 raise NotImplementedError()
1899 def QueryJobs(self, job_ids, fields):
1900 """Returns the selected fields for the selected job IDs.
1902 @type job_ids: list of numbers
1903 @param job_ids: Job IDs
1904 @type fields: list of strings
1905 @param fields: Fields
1908 raise NotImplementedError()
1911 class JobPollReportCbBase:
1912 """Base class for L{GenericPollJob} reporting callbacks.
1916 """Initializes this class.
1920 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1921 """Handles a log message.
1924 raise NotImplementedError()
1926 def ReportNotChanged(self, job_id, status):
1927 """Called for if a job hasn't changed in a while.
1929 @type job_id: number
1930 @param job_id: Job ID
1931 @type status: string or None
1932 @param status: Job status if available
1935 raise NotImplementedError()
1938 class _LuxiJobPollCb(JobPollCbBase):
1939 def __init__(self, cl):
1940 """Initializes this class.
1943 JobPollCbBase.__init__(self)
1946 def WaitForJobChangeOnce(self, job_id, fields,
1947 prev_job_info, prev_log_serial):
1948 """Waits for changes on a job.
1951 return self.cl.WaitForJobChangeOnce(job_id, fields,
1952 prev_job_info, prev_log_serial)
1954 def QueryJobs(self, job_ids, fields):
1955 """Returns the selected fields for the selected job IDs.
1958 return self.cl.QueryJobs(job_ids, fields)
1961 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1962 def __init__(self, feedback_fn):
1963 """Initializes this class.
1966 JobPollReportCbBase.__init__(self)
1968 self.feedback_fn = feedback_fn
1970 assert callable(feedback_fn)
1972 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1973 """Handles a log message.
1976 self.feedback_fn((timestamp, log_type, log_msg))
1978 def ReportNotChanged(self, job_id, status):
1979 """Called if a job hasn't changed in a while.
1985 class StdioJobPollReportCb(JobPollReportCbBase):
1987 """Initializes this class.
1990 JobPollReportCbBase.__init__(self)
1992 self.notified_queued = False
1993 self.notified_waitlock = False
1995 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1996 """Handles a log message.
1999 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2000 FormatLogMessage(log_type, log_msg))
2002 def ReportNotChanged(self, job_id, status):
2003 """Called if a job hasn't changed in a while.
2009 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2010 ToStderr("Job %s is waiting in queue", job_id)
2011 self.notified_queued = True
2013 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2014 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2015 self.notified_waitlock = True
2018 def FormatLogMessage(log_type, log_msg):
2019 """Formats a job message according to its type.
2022 if log_type != constants.ELOG_MESSAGE:
2023 log_msg = str(log_msg)
2025 return utils.SafeEncode(log_msg)
2028 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2029 """Function to poll for the result of a job.
2031 @type job_id: job identified
2032 @param job_id: the job to poll for results
2033 @type cl: luxi.Client
2034 @param cl: the luxi client to use for communicating with the master;
2035 if None, a new client will be created
2041 if reporter is None:
2043 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2045 reporter = StdioJobPollReportCb()
2047 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2049 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2052 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2053 """Legacy function to submit an opcode.
2055 This is just a simple wrapper over the construction of the processor
2056 instance. It should be extended to better handle feedback and
2057 interaction functions.
2063 SetGenericOpcodeOpts([op], opts)
2065 job_id = SendJob([op], cl=cl)
2067 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2070 return op_results[0]
2073 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2074 """Wrapper around SubmitOpCode or SendJob.
2076 This function will decide, based on the 'opts' parameter, whether to
2077 submit and wait for the result of the opcode (and return it), or
2078 whether to just send the job and print its identifier. It is used in
2079 order to simplify the implementation of the '--submit' option.
2081 It will also process the opcodes if we're sending the via SendJob
2082 (otherwise SubmitOpCode does it).
2085 if opts and opts.submit_only:
2087 SetGenericOpcodeOpts(job, opts)
2088 job_id = SendJob(job, cl=cl)
2089 raise JobSubmittedException(job_id)
2091 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2094 def SetGenericOpcodeOpts(opcode_list, options):
2095 """Processor for generic options.
2097 This function updates the given opcodes based on generic command
2098 line options (like debug, dry-run, etc.).
2100 @param opcode_list: list of opcodes
2101 @param options: command line options or None
2102 @return: None (in-place modification)
2107 for op in opcode_list:
2108 op.debug_level = options.debug
2109 if hasattr(options, "dry_run"):
2110 op.dry_run = options.dry_run
2111 if getattr(options, "priority", None) is not None:
2112 op.priority = options.priority
2115 def GetClient(query=False):
2116 """Connects to the a luxi socket and returns a client.
2118 @type query: boolean
2119 @param query: this signifies that the client will only be
2120 used for queries; if the build-time parameter
2121 enable-split-queries is enabled, then the client will be
2122 connected to the query socket instead of the masterd socket
2125 if query and constants.ENABLE_SPLIT_QUERY:
2126 address = pathutils.QUERY_SOCKET
2129 # TODO: Cache object?
2131 client = luxi.Client(address=address)
2132 except luxi.NoMasterError:
2133 ss = ssconf.SimpleStore()
2135 # Try to read ssconf file
2138 except errors.ConfigurationError:
2139 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2140 " not part of a cluster",
2143 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2144 if master != myself:
2145 raise errors.OpPrereqError("This is not the master node, please connect"
2146 " to node '%s' and rerun the command" %
2147 master, errors.ECODE_INVAL)
2152 def FormatError(err):
2153 """Return a formatted error message for a given error.
2155 This function takes an exception instance and returns a tuple
2156 consisting of two values: first, the recommended exit code, and
2157 second, a string describing the error message (not
2158 newline-terminated).
2164 if isinstance(err, errors.ConfigurationError):
2165 txt = "Corrupt configuration file: %s" % msg
2167 obuf.write(txt + "\n")
2168 obuf.write("Aborting.")
2170 elif isinstance(err, errors.HooksAbort):
2171 obuf.write("Failure: hooks execution failed:\n")
2172 for node, script, out in err.args[0]:
2174 obuf.write(" node: %s, script: %s, output: %s\n" %
2175 (node, script, out))
2177 obuf.write(" node: %s, script: %s (no output)\n" %
2179 elif isinstance(err, errors.HooksFailure):
2180 obuf.write("Failure: hooks general failure: %s" % msg)
2181 elif isinstance(err, errors.ResolverError):
2182 this_host = netutils.Hostname.GetSysName()
2183 if err.args[0] == this_host:
2184 msg = "Failure: can't resolve my own hostname ('%s')"
2186 msg = "Failure: can't resolve hostname '%s'"
2187 obuf.write(msg % err.args[0])
2188 elif isinstance(err, errors.OpPrereqError):
2189 if len(err.args) == 2:
2190 obuf.write("Failure: prerequisites not met for this"
2191 " operation:\nerror type: %s, error details:\n%s" %
2192 (err.args[1], err.args[0]))
2194 obuf.write("Failure: prerequisites not met for this"
2195 " operation:\n%s" % msg)
2196 elif isinstance(err, errors.OpExecError):
2197 obuf.write("Failure: command execution error:\n%s" % msg)
2198 elif isinstance(err, errors.TagError):
2199 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2200 elif isinstance(err, errors.JobQueueDrainError):
2201 obuf.write("Failure: the job queue is marked for drain and doesn't"
2202 " accept new requests\n")
2203 elif isinstance(err, errors.JobQueueFull):
2204 obuf.write("Failure: the job queue is full and doesn't accept new"
2205 " job submissions until old jobs are archived\n")
2206 elif isinstance(err, errors.TypeEnforcementError):
2207 obuf.write("Parameter Error: %s" % msg)
2208 elif isinstance(err, errors.ParameterError):
2209 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2210 elif isinstance(err, luxi.NoMasterError):
2211 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2212 " and listening for connections?")
2213 elif isinstance(err, luxi.TimeoutError):
2214 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2215 " been submitted and will continue to run even if the call"
2216 " timed out. Useful commands in this situation are \"gnt-job"
2217 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2219 elif isinstance(err, luxi.PermissionError):
2220 obuf.write("It seems you don't have permissions to connect to the"
2221 " master daemon.\nPlease retry as a different user.")
2222 elif isinstance(err, luxi.ProtocolError):
2223 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2225 elif isinstance(err, errors.JobLost):
2226 obuf.write("Error checking job status: %s" % msg)
2227 elif isinstance(err, errors.QueryFilterParseError):
2228 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2229 obuf.write("\n".join(err.GetDetails()))
2230 elif isinstance(err, errors.GenericError):
2231 obuf.write("Unhandled Ganeti error: %s" % msg)
2232 elif isinstance(err, JobSubmittedException):
2233 obuf.write("JobID: %s\n" % err.args[0])
2236 obuf.write("Unhandled exception: %s" % msg)
2237 return retcode, obuf.getvalue().rstrip("\n")
2240 def GenericMain(commands, override=None, aliases=None,
2241 env_override=frozenset()):
2242 """Generic main function for all the gnt-* commands.
2244 @param commands: a dictionary with a special structure, see the design doc
2245 for command line handling.
2246 @param override: if not None, we expect a dictionary with keys that will
2247 override command line options; this can be used to pass
2248 options from the scripts to generic functions
2249 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2250 @param env_override: list of environment names which are allowed to submit
2251 default args for commands
2254 # save the program name and the entire command line for later logging
2256 binary = os.path.basename(sys.argv[0])
2258 binary = sys.argv[0]
2260 if len(sys.argv) >= 2:
2261 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2265 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2267 binary = "<unknown program>"
2268 cmdline = "<unknown>"
2274 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2276 except _ShowVersion:
2277 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2278 constants.RELEASE_VERSION)
2279 return constants.EXIT_SUCCESS
2280 except _ShowUsage, err:
2281 for line in _FormatUsage(binary, commands):
2285 return constants.EXIT_FAILURE
2287 return constants.EXIT_SUCCESS
2288 except errors.ParameterError, err:
2289 result, err_msg = FormatError(err)
2293 if func is None: # parse error
2296 if override is not None:
2297 for key, val in override.iteritems():
2298 setattr(options, key, val)
2300 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2301 stderr_logging=True)
2303 logging.info("Command line: %s", cmdline)
2306 result = func(options, args)
2307 except (errors.GenericError, luxi.ProtocolError,
2308 JobSubmittedException), err:
2309 result, err_msg = FormatError(err)
2310 logging.exception("Error during command processing")
2312 except KeyboardInterrupt:
2313 result = constants.EXIT_FAILURE
2314 ToStderr("Aborted. Note that if the operation created any jobs, they"
2315 " might have been submitted and"
2316 " will continue to run in the background.")
2317 except IOError, err:
2318 if err.errno == errno.EPIPE:
2319 # our terminal went away, we'll exit
2320 sys.exit(constants.EXIT_FAILURE)
2327 def ParseNicOption(optvalue):
2328 """Parses the value of the --net option(s).
2332 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2333 except (TypeError, ValueError), err:
2334 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2337 nics = [{}] * nic_max
2338 for nidx, ndict in optvalue:
2341 if not isinstance(ndict, dict):
2342 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2343 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2345 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2352 def GenericInstanceCreate(mode, opts, args):
2353 """Add an instance to the cluster via either creation or import.
2355 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2356 @param opts: the command line options selected by the user
2358 @param args: should contain only one element, the new instance name
2360 @return: the desired exit code
2365 (pnode, snode) = SplitNodeOption(opts.node)
2370 hypervisor, hvparams = opts.hypervisor
2373 nics = ParseNicOption(opts.nics)
2377 elif mode == constants.INSTANCE_CREATE:
2378 # default of one nic, all auto
2384 if opts.disk_template == constants.DT_DISKLESS:
2385 if opts.disks or opts.sd_size is not None:
2386 raise errors.OpPrereqError("Diskless instance but disk"
2387 " information passed", errors.ECODE_INVAL)
2390 if (not opts.disks and not opts.sd_size
2391 and mode == constants.INSTANCE_CREATE):
2392 raise errors.OpPrereqError("No disk information specified",
2394 if opts.disks and opts.sd_size is not None:
2395 raise errors.OpPrereqError("Please use either the '--disk' or"
2396 " '-s' option", errors.ECODE_INVAL)
2397 if opts.sd_size is not None:
2398 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2402 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2403 except ValueError, err:
2404 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2406 disks = [{}] * disk_max
2409 for didx, ddict in opts.disks:
2411 if not isinstance(ddict, dict):
2412 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2413 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2414 elif constants.IDISK_SIZE in ddict:
2415 if constants.IDISK_ADOPT in ddict:
2416 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2417 " (disk %d)" % didx, errors.ECODE_INVAL)
2419 ddict[constants.IDISK_SIZE] = \
2420 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2421 except ValueError, err:
2422 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2423 (didx, err), errors.ECODE_INVAL)
2424 elif constants.IDISK_ADOPT in ddict:
2425 if mode == constants.INSTANCE_IMPORT:
2426 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2427 " import", errors.ECODE_INVAL)
2428 ddict[constants.IDISK_SIZE] = 0
2430 raise errors.OpPrereqError("Missing size or adoption source for"
2431 " disk %d" % didx, errors.ECODE_INVAL)
2434 if opts.tags is not None:
2435 tags = opts.tags.split(",")
2439 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2440 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2442 if mode == constants.INSTANCE_CREATE:
2445 force_variant = opts.force_variant
2448 no_install = opts.no_install
2449 identify_defaults = False
2450 elif mode == constants.INSTANCE_IMPORT:
2453 force_variant = False
2454 src_node = opts.src_node
2455 src_path = opts.src_dir
2457 identify_defaults = opts.identify_defaults
2459 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2461 op = opcodes.OpInstanceCreate(instance_name=instance,
2463 disk_template=opts.disk_template,
2465 pnode=pnode, snode=snode,
2466 ip_check=opts.ip_check,
2467 name_check=opts.name_check,
2468 wait_for_sync=opts.wait_for_sync,
2469 file_storage_dir=opts.file_storage_dir,
2470 file_driver=opts.file_driver,
2471 iallocator=opts.iallocator,
2472 hypervisor=hypervisor,
2474 beparams=opts.beparams,
2475 osparams=opts.osparams,
2479 force_variant=force_variant,
2483 no_install=no_install,
2484 identify_defaults=identify_defaults,
2485 ignore_ipolicy=opts.ignore_ipolicy)
2487 SubmitOrSend(op, opts)
2491 class _RunWhileClusterStoppedHelper:
2492 """Helper class for L{RunWhileClusterStopped} to simplify state management
2495 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2496 """Initializes this class.
2498 @type feedback_fn: callable
2499 @param feedback_fn: Feedback function
2500 @type cluster_name: string
2501 @param cluster_name: Cluster name
2502 @type master_node: string
2503 @param master_node Master node name
2504 @type online_nodes: list
2505 @param online_nodes: List of names of online nodes
2508 self.feedback_fn = feedback_fn
2509 self.cluster_name = cluster_name
2510 self.master_node = master_node
2511 self.online_nodes = online_nodes
2513 self.ssh = ssh.SshRunner(self.cluster_name)
2515 self.nonmaster_nodes = [name for name in online_nodes
2516 if name != master_node]
2518 assert self.master_node not in self.nonmaster_nodes
2520 def _RunCmd(self, node_name, cmd):
2521 """Runs a command on the local or a remote machine.
2523 @type node_name: string
2524 @param node_name: Machine name
2529 if node_name is None or node_name == self.master_node:
2530 # No need to use SSH
2531 result = utils.RunCmd(cmd)
2533 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2534 utils.ShellQuoteArgs(cmd))
2537 errmsg = ["Failed to run command %s" % result.cmd]
2539 errmsg.append("on node %s" % node_name)
2540 errmsg.append(": exitcode %s and error %s" %
2541 (result.exit_code, result.output))
2542 raise errors.OpExecError(" ".join(errmsg))
2544 def Call(self, fn, *args):
2545 """Call function while all daemons are stopped.
2548 @param fn: Function to be called
2551 # Pause watcher by acquiring an exclusive lock on watcher state file
2552 self.feedback_fn("Blocking watcher")
2553 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2555 # TODO: Currently, this just blocks. There's no timeout.
2556 # TODO: Should it be a shared lock?
2557 watcher_block.Exclusive(blocking=True)
2559 # Stop master daemons, so that no new jobs can come in and all running
2561 self.feedback_fn("Stopping master daemons")
2562 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2564 # Stop daemons on all nodes
2565 for node_name in self.online_nodes:
2566 self.feedback_fn("Stopping daemons on %s" % node_name)
2567 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2569 # All daemons are shut down now
2571 return fn(self, *args)
2572 except Exception, err:
2573 _, errmsg = FormatError(err)
2574 logging.exception("Caught exception")
2575 self.feedback_fn(errmsg)
2578 # Start cluster again, master node last
2579 for node_name in self.nonmaster_nodes + [self.master_node]:
2580 self.feedback_fn("Starting daemons on %s" % node_name)
2581 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2584 watcher_block.Close()
2587 def RunWhileClusterStopped(feedback_fn, fn, *args):
2588 """Calls a function while all cluster daemons are stopped.
2590 @type feedback_fn: callable
2591 @param feedback_fn: Feedback function
2593 @param fn: Function to be called when daemons are stopped
2596 feedback_fn("Gathering cluster information")
2598 # This ensures we're running on the master daemon
2601 (cluster_name, master_node) = \
2602 cl.QueryConfigValues(["cluster_name", "master_node"])
2604 online_nodes = GetOnlineNodes([], cl=cl)
2606 # Don't keep a reference to the client. The master daemon will go away.
2609 assert master_node in online_nodes
2611 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2612 online_nodes).Call(fn, *args)
2615 def GenerateTable(headers, fields, separator, data,
2616 numfields=None, unitfields=None,
2618 """Prints a table with headers and different fields.
2621 @param headers: dictionary mapping field names to headers for
2624 @param fields: the field names corresponding to each row in
2626 @param separator: the separator to be used; if this is None,
2627 the default 'smart' algorithm is used which computes optimal
2628 field width, otherwise just the separator is used between
2631 @param data: a list of lists, each sublist being one row to be output
2632 @type numfields: list
2633 @param numfields: a list with the fields that hold numeric
2634 values and thus should be right-aligned
2635 @type unitfields: list
2636 @param unitfields: a list with the fields that hold numeric
2637 values that should be formatted with the units field
2638 @type units: string or None
2639 @param units: the units we should use for formatting, or None for
2640 automatic choice (human-readable for non-separator usage, otherwise
2641 megabytes); this is a one-letter string
2650 if numfields is None:
2652 if unitfields is None:
2655 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2656 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2659 for field in fields:
2660 if headers and field not in headers:
2661 # TODO: handle better unknown fields (either revert to old
2662 # style of raising exception, or deal more intelligently with
2664 headers[field] = field
2665 if separator is not None:
2666 format_fields.append("%s")
2667 elif numfields.Matches(field):
2668 format_fields.append("%*s")
2670 format_fields.append("%-*s")
2672 if separator is None:
2673 mlens = [0 for name in fields]
2674 format_str = " ".join(format_fields)
2676 format_str = separator.replace("%", "%%").join(format_fields)
2681 for idx, val in enumerate(row):
2682 if unitfields.Matches(fields[idx]):
2685 except (TypeError, ValueError):
2688 val = row[idx] = utils.FormatUnit(val, units)
2689 val = row[idx] = str(val)
2690 if separator is None:
2691 mlens[idx] = max(mlens[idx], len(val))
2696 for idx, name in enumerate(fields):
2698 if separator is None:
2699 mlens[idx] = max(mlens[idx], len(hdr))
2700 args.append(mlens[idx])
2702 result.append(format_str % tuple(args))
2704 if separator is None:
2705 assert len(mlens) == len(fields)
2707 if fields and not numfields.Matches(fields[-1]):
2713 line = ["-" for _ in fields]
2714 for idx in range(len(fields)):
2715 if separator is None:
2716 args.append(mlens[idx])
2717 args.append(line[idx])
2718 result.append(format_str % tuple(args))
2723 def _FormatBool(value):
2724 """Formats a boolean value as a string.
2732 #: Default formatting for query results; (callback, align right)
2733 _DEFAULT_FORMAT_QUERY = {
2734 constants.QFT_TEXT: (str, False),
2735 constants.QFT_BOOL: (_FormatBool, False),
2736 constants.QFT_NUMBER: (str, True),
2737 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2738 constants.QFT_OTHER: (str, False),
2739 constants.QFT_UNKNOWN: (str, False),
2743 def _GetColumnFormatter(fdef, override, unit):
2744 """Returns formatting function for a field.
2746 @type fdef: L{objects.QueryFieldDefinition}
2747 @type override: dict
2748 @param override: Dictionary for overriding field formatting functions,
2749 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2751 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2752 @rtype: tuple; (callable, bool)
2753 @return: Returns the function to format a value (takes one parameter) and a
2754 boolean for aligning the value on the right-hand side
2757 fmt = override.get(fdef.name, None)
2761 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2763 if fdef.kind == constants.QFT_UNIT:
2764 # Can't keep this information in the static dictionary
2765 return (lambda value: utils.FormatUnit(value, unit), True)
2767 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2771 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2774 class _QueryColumnFormatter:
2775 """Callable class for formatting fields of a query.
2778 def __init__(self, fn, status_fn, verbose):
2779 """Initializes this class.
2782 @param fn: Formatting function
2783 @type status_fn: callable
2784 @param status_fn: Function to report fields' status
2785 @type verbose: boolean
2786 @param verbose: whether to use verbose field descriptions or not
2790 self._status_fn = status_fn
2791 self._verbose = verbose
2793 def __call__(self, data):
2794 """Returns a field's string representation.
2797 (status, value) = data
2800 self._status_fn(status)
2802 if status == constants.RS_NORMAL:
2803 return self._fn(value)
2805 assert value is None, \
2806 "Found value %r for abnormal status %s" % (value, status)
2808 return FormatResultError(status, self._verbose)
2811 def FormatResultError(status, verbose):
2812 """Formats result status other than L{constants.RS_NORMAL}.
2814 @param status: The result status
2815 @type verbose: boolean
2816 @param verbose: Whether to return the verbose text
2817 @return: Text of result status
2820 assert status != constants.RS_NORMAL, \
2821 "FormatResultError called with status equal to constants.RS_NORMAL"
2823 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2825 raise NotImplementedError("Unknown status %s" % status)
2832 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2833 header=False, verbose=False):
2834 """Formats data in L{objects.QueryResponse}.
2836 @type result: L{objects.QueryResponse}
2837 @param result: result of query operation
2839 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2840 see L{utils.text.FormatUnit}
2841 @type format_override: dict
2842 @param format_override: Dictionary for overriding field formatting functions,
2843 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2844 @type separator: string or None
2845 @param separator: String used to separate fields
2847 @param header: Whether to output header row
2848 @type verbose: boolean
2849 @param verbose: whether to use verbose field descriptions or not
2858 if format_override is None:
2859 format_override = {}
2861 stats = dict.fromkeys(constants.RS_ALL, 0)
2863 def _RecordStatus(status):
2868 for fdef in result.fields:
2869 assert fdef.title and fdef.name
2870 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2871 columns.append(TableColumn(fdef.title,
2872 _QueryColumnFormatter(fn, _RecordStatus,
2876 table = FormatTable(result.data, columns, header, separator)
2878 # Collect statistics
2879 assert len(stats) == len(constants.RS_ALL)
2880 assert compat.all(count >= 0 for count in stats.values())
2882 # Determine overall status. If there was no data, unknown fields must be
2883 # detected via the field definitions.
2884 if (stats[constants.RS_UNKNOWN] or
2885 (not result.data and _GetUnknownFields(result.fields))):
2887 elif compat.any(count > 0 for key, count in stats.items()
2888 if key != constants.RS_NORMAL):
2889 status = QR_INCOMPLETE
2893 return (status, table)
2896 def _GetUnknownFields(fdefs):
2897 """Returns list of unknown fields included in C{fdefs}.
2899 @type fdefs: list of L{objects.QueryFieldDefinition}
2902 return [fdef for fdef in fdefs
2903 if fdef.kind == constants.QFT_UNKNOWN]
2906 def _WarnUnknownFields(fdefs):
2907 """Prints a warning to stderr if a query included unknown fields.
2909 @type fdefs: list of L{objects.QueryFieldDefinition}
2912 unknown = _GetUnknownFields(fdefs)
2914 ToStderr("Warning: Queried for unknown fields %s",
2915 utils.CommaJoin(fdef.name for fdef in unknown))
2921 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2922 format_override=None, verbose=False, force_filter=False,
2923 namefield=None, qfilter=None, isnumeric=False):
2924 """Generic implementation for listing all items of a resource.
2926 @param resource: One of L{constants.QR_VIA_LUXI}
2927 @type fields: list of strings
2928 @param fields: List of fields to query for
2929 @type names: list of strings
2930 @param names: Names of items to query for
2931 @type unit: string or None
2932 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2933 None for automatic choice (human-readable for non-separator usage,
2934 otherwise megabytes); this is a one-letter string
2935 @type separator: string or None
2936 @param separator: String used to separate fields
2938 @param header: Whether to show header row
2939 @type force_filter: bool
2940 @param force_filter: Whether to always treat names as filter
2941 @type format_override: dict
2942 @param format_override: Dictionary for overriding field formatting functions,
2943 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2944 @type verbose: boolean
2945 @param verbose: whether to use verbose field descriptions or not
2946 @type namefield: string
2947 @param namefield: Name of field to use for simple filters (see
2948 L{qlang.MakeFilter} for details)
2949 @type qfilter: list or None
2950 @param qfilter: Query filter (in addition to names)
2951 @param isnumeric: bool
2952 @param isnumeric: Whether the namefield's type is numeric, and therefore
2953 any simple filters built by namefield should use integer values to
2960 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2961 isnumeric=isnumeric)
2964 qfilter = namefilter
2965 elif namefilter is not None:
2966 qfilter = [qlang.OP_AND, namefilter, qfilter]
2971 response = cl.Query(resource, fields, qfilter)
2973 found_unknown = _WarnUnknownFields(response.fields)
2975 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2977 format_override=format_override,
2983 assert ((found_unknown and status == QR_UNKNOWN) or
2984 (not found_unknown and status != QR_UNKNOWN))
2986 if status == QR_UNKNOWN:
2987 return constants.EXIT_UNKNOWN_FIELD
2989 # TODO: Should the list command fail if not all data could be collected?
2990 return constants.EXIT_SUCCESS
2993 def GenericListFields(resource, fields, separator, header, cl=None):
2994 """Generic implementation for listing fields for a resource.
2996 @param resource: One of L{constants.QR_VIA_LUXI}
2997 @type fields: list of strings
2998 @param fields: List of fields to query for
2999 @type separator: string or None
3000 @param separator: String used to separate fields
3002 @param header: Whether to show header row
3011 response = cl.QueryFields(resource, fields)
3013 found_unknown = _WarnUnknownFields(response.fields)
3016 TableColumn("Name", str, False),
3017 TableColumn("Title", str, False),
3018 TableColumn("Description", str, False),
3021 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3023 for line in FormatTable(rows, columns, header, separator):
3027 return constants.EXIT_UNKNOWN_FIELD
3029 return constants.EXIT_SUCCESS
3033 """Describes a column for L{FormatTable}.
3036 def __init__(self, title, fn, align_right):
3037 """Initializes this class.
3040 @param title: Column title
3042 @param fn: Formatting function
3043 @type align_right: bool
3044 @param align_right: Whether to align values on the right-hand side
3049 self.align_right = align_right
3052 def _GetColFormatString(width, align_right):
3053 """Returns the format string for a field.
3061 return "%%%s%ss" % (sign, width)
3064 def FormatTable(rows, columns, header, separator):
3065 """Formats data as a table.
3067 @type rows: list of lists
3068 @param rows: Row data, one list per row
3069 @type columns: list of L{TableColumn}
3070 @param columns: Column descriptions
3072 @param header: Whether to show header row
3073 @type separator: string or None
3074 @param separator: String used to separate columns
3078 data = [[col.title for col in columns]]
3079 colwidth = [len(col.title) for col in columns]
3082 colwidth = [0 for _ in columns]
3086 assert len(row) == len(columns)
3088 formatted = [col.format(value) for value, col in zip(row, columns)]
3090 if separator is None:
3091 # Update column widths
3092 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3093 # Modifying a list's items while iterating is fine
3094 colwidth[idx] = max(oldwidth, len(value))
3096 data.append(formatted)
3098 if separator is not None:
3099 # Return early if a separator is used
3100 return [separator.join(row) for row in data]
3102 if columns and not columns[-1].align_right:
3103 # Avoid unnecessary spaces at end of line
3106 # Build format string
3107 fmt = " ".join([_GetColFormatString(width, col.align_right)
3108 for col, width in zip(columns, colwidth)])
3110 return [fmt % tuple(row) for row in data]
3113 def FormatTimestamp(ts):
3114 """Formats a given timestamp.
3117 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3120 @return: a string with the formatted timestamp
3123 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3127 return utils.FormatTime(sec, usecs=usecs)
3130 def ParseTimespec(value):
3131 """Parse a time specification.
3133 The following suffixed will be recognized:
3141 Without any suffix, the value will be taken to be in seconds.
3146 raise errors.OpPrereqError("Empty time specification passed",
3155 if value[-1] not in suffix_map:
3158 except (TypeError, ValueError):
3159 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3162 multiplier = suffix_map[value[-1]]
3164 if not value: # no data left after stripping the suffix
3165 raise errors.OpPrereqError("Invalid time specification (only"
3166 " suffix passed)", errors.ECODE_INVAL)
3168 value = int(value) * multiplier
3169 except (TypeError, ValueError):
3170 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3175 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3176 filter_master=False, nodegroup=None):
3177 """Returns the names of online nodes.
3179 This function will also log a warning on stderr with the names of
3182 @param nodes: if not empty, use only this subset of nodes (minus the
3184 @param cl: if not None, luxi client to use
3185 @type nowarn: boolean
3186 @param nowarn: by default, this function will output a note with the
3187 offline nodes that are skipped; if this parameter is True the
3188 note is not displayed
3189 @type secondary_ips: boolean
3190 @param secondary_ips: if True, return the secondary IPs instead of the
3191 names, useful for doing network traffic over the replication interface
3193 @type filter_master: boolean
3194 @param filter_master: if True, do not return the master node in the list
3195 (useful in coordination with secondary_ips where we cannot check our
3196 node name against the list)
3197 @type nodegroup: string
3198 @param nodegroup: If set, only return nodes in this node group
3207 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3209 if nodegroup is not None:
3210 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3211 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3214 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3217 if len(qfilter) > 1:
3218 final_filter = [qlang.OP_AND] + qfilter
3220 assert len(qfilter) == 1
3221 final_filter = qfilter[0]
3225 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3227 def _IsOffline(row):
3228 (_, (_, offline), _) = row
3232 ((_, name), _, _) = row
3236 (_, _, (_, sip)) = row
3239 (offline, online) = compat.partition(result.data, _IsOffline)
3241 if offline and not nowarn:
3242 ToStderr("Note: skipping offline node(s): %s" %
3243 utils.CommaJoin(map(_GetName, offline)))
3250 return map(fn, online)
3253 def _ToStream(stream, txt, *args):
3254 """Write a message to a stream, bypassing the logging system
3256 @type stream: file object
3257 @param stream: the file to which we should write
3259 @param txt: the message
3265 stream.write(txt % args)
3270 except IOError, err:
3271 if err.errno == errno.EPIPE:
3272 # our terminal went away, we'll exit
3273 sys.exit(constants.EXIT_FAILURE)
3278 def ToStdout(txt, *args):
3279 """Write a message to stdout only, bypassing the logging system
3281 This is just a wrapper over _ToStream.
3284 @param txt: the message
3287 _ToStream(sys.stdout, txt, *args)
3290 def ToStderr(txt, *args):
3291 """Write a message to stderr only, bypassing the logging system
3293 This is just a wrapper over _ToStream.
3296 @param txt: the message
3299 _ToStream(sys.stderr, txt, *args)
3302 class JobExecutor(object):
3303 """Class which manages the submission and execution of multiple jobs.
3305 Note that instances of this class should not be reused between
3309 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3314 self.verbose = verbose
3317 self.feedback_fn = feedback_fn
3318 self._counter = itertools.count()
3321 def _IfName(name, fmt):
3322 """Helper function for formatting name.
3330 def QueueJob(self, name, *ops):
3331 """Record a job for later submit.
3334 @param name: a description of the job, will be used in WaitJobSet
3337 SetGenericOpcodeOpts(ops, self.opts)
3338 self.queue.append((self._counter.next(), name, ops))
3340 def AddJobId(self, name, status, job_id):
3341 """Adds a job ID to the internal queue.
3344 self.jobs.append((self._counter.next(), status, job_id, name))
3346 def SubmitPending(self, each=False):
3347 """Submit all pending jobs.
3352 for (_, _, ops) in self.queue:
3353 # SubmitJob will remove the success status, but raise an exception if
3354 # the submission fails, so we'll notice that anyway.
3355 results.append([True, self.cl.SubmitJob(ops)[0]])
3357 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3358 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3359 self.jobs.append((idx, status, data, name))
3361 def _ChooseJob(self):
3362 """Choose a non-waiting/queued job to poll next.
3365 assert self.jobs, "_ChooseJob called with empty job list"
3367 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3371 for job_data, status in zip(self.jobs, result):
3372 if (isinstance(status, list) and status and
3373 status[0] in (constants.JOB_STATUS_QUEUED,
3374 constants.JOB_STATUS_WAITING,
3375 constants.JOB_STATUS_CANCELING)):
3376 # job is still present and waiting
3378 # good candidate found (either running job or lost job)
3379 self.jobs.remove(job_data)
3383 return self.jobs.pop(0)
3385 def GetResults(self):
3386 """Wait for and return the results of all jobs.
3389 @return: list of tuples (success, job results), in the same order
3390 as the submitted jobs; if a job has failed, instead of the result
3391 there will be the error message
3395 self.SubmitPending()
3398 ok_jobs = [row[2] for row in self.jobs if row[1]]
3400 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3402 # first, remove any non-submitted jobs
3403 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3404 for idx, _, jid, name in failures:
3405 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3406 results.append((idx, False, jid))
3409 (idx, _, jid, name) = self._ChooseJob()
3410 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3412 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3414 except errors.JobLost, err:
3415 _, job_result = FormatError(err)
3416 ToStderr("Job %s%s has been archived, cannot check its result",
3417 jid, self._IfName(name, " for %s"))
3419 except (errors.GenericError, luxi.ProtocolError), err:
3420 _, job_result = FormatError(err)
3422 # the error message will always be shown, verbose or not
3423 ToStderr("Job %s%s has failed: %s",
3424 jid, self._IfName(name, " for %s"), job_result)
3426 results.append((idx, success, job_result))
3428 # sort based on the index, then drop it
3430 results = [i[1:] for i in results]
3434 def WaitOrShow(self, wait):
3435 """Wait for job results or only print the job IDs.
3438 @param wait: whether to wait or not
3442 return self.GetResults()
3445 self.SubmitPending()
3446 for _, status, result, name in self.jobs:
3448 ToStdout("%s: %s", result, name)
3450 ToStderr("Failure for %s: %s", name, result)
3451 return [row[1:3] for row in self.jobs]
3454 def FormatParameterDict(buf, param_dict, actual, level=1):
3455 """Formats a parameter dictionary.
3457 @type buf: L{StringIO}
3458 @param buf: the buffer into which to write
3459 @type param_dict: dict
3460 @param param_dict: the own parameters
3462 @param actual: the current parameter set (including defaults)
3463 @param level: Level of indent
3466 indent = " " * level
3468 for key in sorted(actual):
3470 buf.write("%s- %s:" % (indent, key))
3472 if isinstance(data, dict) and data:
3474 FormatParameterDict(buf, param_dict.get(key, {}), data,
3477 val = param_dict.get(key, "default (%s)" % data)
3478 buf.write(" %s\n" % val)
3481 def ConfirmOperation(names, list_type, text, extra=""):
3482 """Ask the user to confirm an operation on a list of list_type.
3484 This function is used to request confirmation for doing an operation
3485 on a given list of list_type.
3488 @param names: the list of names that we display when
3489 we ask for confirmation
3490 @type list_type: str
3491 @param list_type: Human readable name for elements in the list (e.g. nodes)
3493 @param text: the operation that the user should confirm
3495 @return: True or False depending on user's confirmation.
3499 msg = ("The %s will operate on %d %s.\n%s"
3500 "Do you want to continue?" % (text, count, list_type, extra))
3501 affected = (("\nAffected %s:\n" % list_type) +
3502 "\n".join([" %s" % name for name in names]))
3504 choices = [("y", True, "Yes, execute the %s" % text),
3505 ("n", False, "No, abort the %s" % text)]
3508 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3511 question = msg + affected
3513 choice = AskUser(question, choices)
3516 choice = AskUser(msg + affected, choices)
3520 def _MaybeParseUnit(elements):
3521 """Parses and returns an array of potential values with units.
3525 for k, v in elements.items():
3526 if v == constants.VALUE_DEFAULT:
3529 parsed[k] = utils.ParseUnit(v)
3533 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3534 ispecs_cpu_count=None,
3535 ispecs_disk_count=None,
3536 ispecs_disk_size=None,
3537 ispecs_nic_count=None,
3538 ipolicy_disk_templates=None,
3539 ipolicy_vcpu_ratio=None,
3540 ipolicy_spindle_ratio=None,
3541 group_ipolicy=False,
3542 allowed_values=None,
3544 """Creation of instance policy based on command line options.
3546 @param fill_all: whether for cluster policies we should ensure that
3547 all values are filled
3553 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3554 if ispecs_disk_size:
3555 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3556 except (TypeError, ValueError, errors.UnitParseError), err:
3557 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3559 (ispecs_disk_size, ispecs_mem_size, err),
3562 # prepare ipolicy dict
3563 ipolicy_transposed = {
3564 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3565 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3566 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3567 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3568 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3571 # first, check that the values given are correct
3573 forced_type = TISPECS_GROUP_TYPES
3575 forced_type = TISPECS_CLUSTER_TYPES
3577 for specs in ipolicy_transposed.values():
3578 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3581 ipolicy_out = objects.MakeEmptyIPolicy()
3582 for name, specs in ipolicy_transposed.iteritems():
3583 assert name in constants.ISPECS_PARAMETERS
3584 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3585 ipolicy_out[key][name] = val
3587 # no filldict for non-dicts
3588 if not group_ipolicy and fill_all:
3589 if ipolicy_disk_templates is None:
3590 ipolicy_disk_templates = constants.DISK_TEMPLATES
3591 if ipolicy_vcpu_ratio is None:
3592 ipolicy_vcpu_ratio = \
3593 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3594 if ipolicy_spindle_ratio is None:
3595 ipolicy_spindle_ratio = \
3596 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3597 if ipolicy_disk_templates is not None:
3598 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3599 if ipolicy_vcpu_ratio is not None:
3600 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3601 if ipolicy_spindle_ratio is not None:
3602 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3604 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)