4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
68 "CLUSTER_DOMAIN_SECRET_OPT",
86 "FILESTORE_DRIVER_OPT",
92 "GLOBAL_SHARED_FILEDIR_OPT",
97 "DEFAULT_IALLOCATOR_OPT",
98 "IDENTIFY_DEFAULTS_OPT",
101 "IGNORE_FAILURES_OPT",
102 "IGNORE_OFFLINE_OPT",
103 "IGNORE_REMOVE_FAILURES_OPT",
104 "IGNORE_SECONDARIES_OPT",
108 "MAINTAIN_NODE_HEALTH_OPT",
110 "MASTER_NETMASK_OPT",
112 "MIGRATION_MODE_OPT",
114 "NEW_CLUSTER_CERT_OPT",
115 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
116 "NEW_CONFD_HMAC_KEY_OPT",
119 "NEW_SPICE_CERT_OPT",
121 "NODE_FORCE_JOIN_OPT",
123 "NODE_PLACEMENT_OPT",
127 "NODRBD_STORAGE_OPT",
133 "NOMODIFY_ETCHOSTS_OPT",
134 "NOMODIFY_SSH_SETUP_OPT",
138 "NORUNTIME_CHGS_OPT",
141 "NOSSH_KEYCHECK_OPT",
155 "PREALLOC_WIPE_DISKS_OPT",
156 "PRIMARY_IP_VERSION_OPT",
162 "REMOVE_INSTANCE_OPT",
168 "SECONDARY_ONLY_OPT",
173 "SHUTDOWN_TIMEOUT_OPT",
175 "SPECS_CPU_COUNT_OPT",
176 "SPECS_DISK_COUNT_OPT",
177 "SPECS_DISK_SIZE_OPT",
178 "SPECS_MEM_SIZE_OPT",
179 "SPECS_NIC_COUNT_OPT",
180 "IPOLICY_DISK_TEMPLATES",
181 "IPOLICY_VCPU_RATIO",
187 "STARTUP_PAUSED_OPT",
196 "USE_EXTERNAL_MIP_SCRIPT",
204 "IGNORE_IPOLICY_OPT",
205 "INSTANCE_POLICY_OPTS",
206 # Generic functions for CLI programs
208 "CreateIPolicyFromOpts",
210 "GenericInstanceCreate",
216 "JobSubmittedException",
218 "RunWhileClusterStopped",
222 # Formatting functions
223 "ToStderr", "ToStdout",
226 "FormatParameterDict",
235 # command line options support infrastructure
236 "ARGS_MANY_INSTANCES",
255 "OPT_COMPL_INST_ADD_NODES",
256 "OPT_COMPL_MANY_NODES",
257 "OPT_COMPL_ONE_IALLOCATOR",
258 "OPT_COMPL_ONE_INSTANCE",
259 "OPT_COMPL_ONE_NODE",
260 "OPT_COMPL_ONE_NODEGROUP",
266 "COMMON_CREATE_OPTS",
272 #: Priorities (sorted)
274 ("low", constants.OP_PRIO_LOW),
275 ("normal", constants.OP_PRIO_NORMAL),
276 ("high", constants.OP_PRIO_HIGH),
279 #: Priority dictionary for easier lookup
280 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
281 # we migrate to Python 2.6
282 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
284 # Query result status for clients
287 QR_INCOMPLETE) = range(3)
289 #: Maximum batch size for ChooseJob
293 # constants used to create InstancePolicy dictionary
294 TISPECS_GROUP_TYPES = {
295 constants.ISPECS_MIN: constants.VTYPE_INT,
296 constants.ISPECS_MAX: constants.VTYPE_INT,
299 TISPECS_CLUSTER_TYPES = {
300 constants.ISPECS_MIN: constants.VTYPE_INT,
301 constants.ISPECS_MAX: constants.VTYPE_INT,
302 constants.ISPECS_STD: constants.VTYPE_INT,
307 def __init__(self, min=0, max=None): # pylint: disable=W0622
312 return ("<%s min=%s max=%s>" %
313 (self.__class__.__name__, self.min, self.max))
316 class ArgSuggest(_Argument):
317 """Suggesting argument.
319 Value can be any of the ones passed to the constructor.
322 # pylint: disable=W0622
323 def __init__(self, min=0, max=None, choices=None):
324 _Argument.__init__(self, min=min, max=max)
325 self.choices = choices
328 return ("<%s min=%s max=%s choices=%r>" %
329 (self.__class__.__name__, self.min, self.max, self.choices))
332 class ArgChoice(ArgSuggest):
335 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
336 but value must be one of the choices.
341 class ArgUnknown(_Argument):
342 """Unknown argument to program (e.g. determined at runtime).
347 class ArgInstance(_Argument):
348 """Instances argument.
353 class ArgNode(_Argument):
359 class ArgGroup(_Argument):
360 """Node group argument.
365 class ArgJobId(_Argument):
371 class ArgFile(_Argument):
372 """File path argument.
377 class ArgCommand(_Argument):
383 class ArgHost(_Argument):
389 class ArgOs(_Argument):
396 ARGS_MANY_INSTANCES = [ArgInstance()]
397 ARGS_MANY_NODES = [ArgNode()]
398 ARGS_MANY_GROUPS = [ArgGroup()]
399 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
400 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
402 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
403 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
406 def _ExtractTagsObject(opts, args):
407 """Extract the tag type object.
409 Note that this function will modify its args parameter.
412 if not hasattr(opts, "tag_type"):
413 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
415 if kind == constants.TAG_CLUSTER:
417 elif kind in (constants.TAG_NODEGROUP,
419 constants.TAG_INSTANCE):
421 raise errors.OpPrereqError("no arguments passed to the command",
426 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
430 def _ExtendTags(opts, args):
431 """Extend the args if a source file has been given.
433 This function will extend the tags with the contents of the file
434 passed in the 'tags_source' attribute of the opts parameter. A file
435 named '-' will be replaced by stdin.
438 fname = opts.tags_source
444 new_fh = open(fname, "r")
447 # we don't use the nice 'new_data = [line.strip() for line in fh]'
448 # because of python bug 1633941
450 line = new_fh.readline()
453 new_data.append(line.strip())
456 args.extend(new_data)
459 def ListTags(opts, args):
460 """List the tags on a given object.
462 This is a generic implementation that knows how to deal with all
463 three cases of tag objects (cluster, node, instance). The opts
464 argument is expected to contain a tag_type field denoting what
465 object type we work on.
468 kind, name = _ExtractTagsObject(opts, args)
469 cl = GetClient(query=True)
470 result = cl.QueryTags(kind, name)
471 result = list(result)
477 def AddTags(opts, args):
478 """Add tags on a given object.
480 This is a generic implementation that knows how to deal with all
481 three cases of tag objects (cluster, node, instance). The opts
482 argument is expected to contain a tag_type field denoting what
483 object type we work on.
486 kind, name = _ExtractTagsObject(opts, args)
487 _ExtendTags(opts, args)
489 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
490 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
491 SubmitOrSend(op, opts)
494 def RemoveTags(opts, args):
495 """Remove tags from a given object.
497 This is a generic implementation that knows how to deal with all
498 three cases of tag objects (cluster, node, instance). The opts
499 argument is expected to contain a tag_type field denoting what
500 object type we work on.
503 kind, name = _ExtractTagsObject(opts, args)
504 _ExtendTags(opts, args)
506 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
507 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
508 SubmitOrSend(op, opts)
511 def check_unit(option, opt, value): # pylint: disable=W0613
512 """OptParsers custom converter for units.
516 return utils.ParseUnit(value)
517 except errors.UnitParseError, err:
518 raise OptionValueError("option %s: %s" % (opt, err))
521 def _SplitKeyVal(opt, data):
522 """Convert a KeyVal string into a dict.
524 This function will convert a key=val[,...] string into a dict. Empty
525 values will be converted specially: keys which have the prefix 'no_'
526 will have the value=False and the prefix stripped, the others will
530 @param opt: a string holding the option name for which we process the
531 data, used in building error messages
533 @param data: a string of the format key=val,key=val,...
535 @return: {key=val, key=val}
536 @raises errors.ParameterError: if there are duplicate keys
541 for elem in utils.UnescapeAndSplit(data, sep=","):
543 key, val = elem.split("=", 1)
545 if elem.startswith(NO_PREFIX):
546 key, val = elem[len(NO_PREFIX):], False
547 elif elem.startswith(UN_PREFIX):
548 key, val = elem[len(UN_PREFIX):], None
550 key, val = elem, True
552 raise errors.ParameterError("Duplicate key '%s' in option %s" %
558 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
559 """Custom parser for ident:key=val,key=val options.
561 This will store the parsed values as a tuple (ident, {key: val}). As such,
562 multiple uses of this option via action=append is possible.
566 ident, rest = value, ""
568 ident, rest = value.split(":", 1)
570 if ident.startswith(NO_PREFIX):
572 msg = "Cannot pass options when removing parameter groups: %s" % value
573 raise errors.ParameterError(msg)
574 retval = (ident[len(NO_PREFIX):], False)
575 elif (ident.startswith(UN_PREFIX) and
576 (len(ident) <= len(UN_PREFIX) or
577 not ident[len(UN_PREFIX)][0].isdigit())):
579 msg = "Cannot pass options when removing parameter groups: %s" % value
580 raise errors.ParameterError(msg)
581 retval = (ident[len(UN_PREFIX):], None)
583 kv_dict = _SplitKeyVal(opt, rest)
584 retval = (ident, kv_dict)
588 def check_key_val(option, opt, value): # pylint: disable=W0613
589 """Custom parser class for key=val,key=val options.
591 This will store the parsed values as a dict {key: val}.
594 return _SplitKeyVal(opt, value)
597 def check_bool(option, opt, value): # pylint: disable=W0613
598 """Custom parser for yes/no options.
600 This will store the parsed value as either True or False.
603 value = value.lower()
604 if value == constants.VALUE_FALSE or value == "no":
606 elif value == constants.VALUE_TRUE or value == "yes":
609 raise errors.ParameterError("Invalid boolean value '%s'" % value)
612 def check_list(option, opt, value): # pylint: disable=W0613
613 """Custom parser for comma-separated lists.
616 # we have to make this explicit check since "".split(",") is [""],
617 # not an empty list :(
621 return utils.UnescapeAndSplit(value)
624 def check_maybefloat(option, opt, value): # pylint: disable=W0613
625 """Custom parser for float numbers which might be also defaults.
628 value = value.lower()
630 if value == constants.VALUE_DEFAULT:
636 # completion_suggestion is normally a list. Using numeric values not evaluating
637 # to False for dynamic completion.
638 (OPT_COMPL_MANY_NODES,
640 OPT_COMPL_ONE_INSTANCE,
642 OPT_COMPL_ONE_IALLOCATOR,
643 OPT_COMPL_INST_ADD_NODES,
644 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
646 OPT_COMPL_ALL = frozenset([
647 OPT_COMPL_MANY_NODES,
649 OPT_COMPL_ONE_INSTANCE,
651 OPT_COMPL_ONE_IALLOCATOR,
652 OPT_COMPL_INST_ADD_NODES,
653 OPT_COMPL_ONE_NODEGROUP,
657 class CliOption(Option):
658 """Custom option class for optparse.
661 ATTRS = Option.ATTRS + [
662 "completion_suggest",
664 TYPES = Option.TYPES + (
672 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
673 TYPE_CHECKER["identkeyval"] = check_ident_key_val
674 TYPE_CHECKER["keyval"] = check_key_val
675 TYPE_CHECKER["unit"] = check_unit
676 TYPE_CHECKER["bool"] = check_bool
677 TYPE_CHECKER["list"] = check_list
678 TYPE_CHECKER["maybefloat"] = check_maybefloat
681 # optparse.py sets make_option, so we do it for our own option class, too
682 cli_option = CliOption
687 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
688 help="Increase debugging level")
690 NOHDR_OPT = cli_option("--no-headers", default=False,
691 action="store_true", dest="no_headers",
692 help="Don't display column headers")
694 SEP_OPT = cli_option("--separator", default=None,
695 action="store", dest="separator",
696 help=("Separator between output fields"
697 " (defaults to one space)"))
699 USEUNITS_OPT = cli_option("--units", default=None,
700 dest="units", choices=("h", "m", "g", "t"),
701 help="Specify units for output (one of h/m/g/t)")
703 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
704 type="string", metavar="FIELDS",
705 help="Comma separated list of output fields")
707 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
708 default=False, help="Force the operation")
710 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
711 default=False, help="Do not require confirmation")
713 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
714 action="store_true", default=False,
715 help=("Ignore offline nodes and do as much"
718 TAG_ADD_OPT = cli_option("--tags", dest="tags",
719 default=None, help="Comma-separated list of instance"
722 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
723 default=None, help="File with tag names")
725 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
726 default=False, action="store_true",
727 help=("Submit the job and return the job ID, but"
728 " don't wait for the job to finish"))
730 SYNC_OPT = cli_option("--sync", dest="do_locking",
731 default=False, action="store_true",
732 help=("Grab locks while doing the queries"
733 " in order to ensure more consistent results"))
735 DRY_RUN_OPT = cli_option("--dry-run", default=False,
737 help=("Do not execute the operation, just run the"
738 " check steps and verify if it could be"
741 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
743 help="Increase the verbosity of the operation")
745 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
746 action="store_true", dest="simulate_errors",
747 help="Debugging option that makes the operation"
748 " treat most runtime checks as failed")
750 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
751 default=True, action="store_false",
752 help="Don't wait for sync (DANGEROUS!)")
754 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
755 default=False, action="store_true",
756 help="Wait for disks to sync")
758 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
759 action="store_true", default=False,
760 help="Enable offline instance")
762 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
763 action="store_true", default=False,
764 help="Disable down instance")
766 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
767 help=("Custom disk setup (%s)" %
768 utils.CommaJoin(constants.DISK_TEMPLATES)),
769 default=None, metavar="TEMPL",
770 choices=list(constants.DISK_TEMPLATES))
772 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
773 help="Do not create any network cards for"
776 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
777 help="Relative path under default cluster-wide"
778 " file storage dir to store file-based disks",
779 default=None, metavar="<DIR>")
781 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
782 help="Driver to use for image files",
783 default="loop", metavar="<DRIVER>",
784 choices=list(constants.FILE_DRIVER))
786 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
787 help="Select nodes for the instance automatically"
788 " using the <NAME> iallocator plugin",
789 default=None, type="string",
790 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
792 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
794 help="Set the default instance"
796 default=None, type="string",
797 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
799 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
801 completion_suggest=OPT_COMPL_ONE_OS)
803 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
804 type="keyval", default={},
805 help="OS parameters")
807 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
808 action="store_true", default=False,
809 help="Force an unknown variant")
811 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
812 action="store_true", default=False,
813 help="Do not install the OS (will"
816 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
817 dest="allow_runtime_chgs",
818 default=True, action="store_false",
819 help="Don't allow runtime changes")
821 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
822 type="keyval", default={},
823 help="Backend parameters")
825 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
826 default={}, dest="hvparams",
827 help="Hypervisor parameters")
829 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
830 help="Disk template parameters, in the format"
831 " template:option=value,option=value,...",
832 type="identkeyval", action="append", default=[])
834 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
835 type="keyval", default={},
836 help="Memory size specs: list of key=value,"
837 " where key is one of min, max, std"
838 " (in MB or using a unit)")
840 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
841 type="keyval", default={},
842 help="CPU count specs: list of key=value,"
843 " where key is one of min, max, std")
845 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
846 dest="ispecs_disk_count",
847 type="keyval", default={},
848 help="Disk count specs: list of key=value,"
849 " where key is one of min, max, std")
851 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
852 type="keyval", default={},
853 help="Disk size specs: list of key=value,"
854 " where key is one of min, max, std"
855 " (in MB or using a unit)")
857 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
858 type="keyval", default={},
859 help="NIC count specs: list of key=value,"
860 " where key is one of min, max, std")
862 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
863 dest="ipolicy_disk_templates",
864 type="list", default=None,
865 help="Comma-separated list of"
866 " enabled disk templates")
868 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
869 dest="ipolicy_vcpu_ratio",
870 type="maybefloat", default=None,
871 help="The maximum allowed vcpu-to-cpu ratio")
873 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
874 dest="ipolicy_spindle_ratio",
875 type="maybefloat", default=None,
876 help=("The maximum allowed instances to"
879 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
880 help="Hypervisor and hypervisor options, in the"
881 " format hypervisor:option=value,option=value,...",
882 default=None, type="identkeyval")
884 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
885 help="Hypervisor and hypervisor options, in the"
886 " format hypervisor:option=value,option=value,...",
887 default=[], action="append", type="identkeyval")
889 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
890 action="store_false",
891 help="Don't check that the instance's IP"
894 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
895 default=True, action="store_false",
896 help="Don't check that the instance's name"
899 NET_OPT = cli_option("--net",
900 help="NIC parameters", default=[],
901 dest="nics", action="append", type="identkeyval")
903 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
904 dest="disks", action="append", type="identkeyval")
906 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
907 help="Comma-separated list of disks"
908 " indices to act on (e.g. 0,2) (optional,"
909 " defaults to all disks)")
911 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
912 help="Enforces a single-disk configuration using the"
913 " given disk size, in MiB unless a suffix is used",
914 default=None, type="unit", metavar="<size>")
916 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
917 dest="ignore_consistency",
918 action="store_true", default=False,
919 help="Ignore the consistency of the disks on"
922 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
923 dest="allow_failover",
924 action="store_true", default=False,
925 help="If migration is not possible fallback to"
928 NONLIVE_OPT = cli_option("--non-live", dest="live",
929 default=True, action="store_false",
930 help="Do a non-live migration (this usually means"
931 " freeze the instance, save the state, transfer and"
932 " only then resume running on the secondary node)")
934 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
936 choices=list(constants.HT_MIGRATION_MODES),
937 help="Override default migration mode (choose"
938 " either live or non-live")
940 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
941 help="Target node and optional secondary node",
942 metavar="<pnode>[:<snode>]",
943 completion_suggest=OPT_COMPL_INST_ADD_NODES)
945 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
946 action="append", metavar="<node>",
947 help="Use only this node (can be used multiple"
948 " times, if not given defaults to all nodes)",
949 completion_suggest=OPT_COMPL_ONE_NODE)
951 NODEGROUP_OPT_NAME = "--node-group"
952 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
954 help="Node group (name or uuid)",
955 metavar="<nodegroup>",
956 default=None, type="string",
957 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
959 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
961 completion_suggest=OPT_COMPL_ONE_NODE)
963 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
964 action="store_false",
965 help="Don't start the instance after creation")
967 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
968 action="store_true", default=False,
969 help="Show command instead of executing it")
971 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
972 default=False, action="store_true",
973 help="Instead of performing the migration, try to"
974 " recover from a failed cleanup. This is safe"
975 " to run even if the instance is healthy, but it"
976 " will create extra replication traffic and "
977 " disrupt briefly the replication (like during the"
980 STATIC_OPT = cli_option("-s", "--static", dest="static",
981 action="store_true", default=False,
982 help="Only show configuration data, not runtime data")
984 ALL_OPT = cli_option("--all", dest="show_all",
985 default=False, action="store_true",
986 help="Show info on all instances on the cluster."
987 " This can take a long time to run, use wisely")
989 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
990 action="store_true", default=False,
991 help="Interactive OS reinstall, lists available"
992 " OS templates for selection")
994 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
995 action="store_true", default=False,
996 help="Remove the instance from the cluster"
997 " configuration even if there are failures"
998 " during the removal process")
1000 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1001 dest="ignore_remove_failures",
1002 action="store_true", default=False,
1003 help="Remove the instance from the"
1004 " cluster configuration even if there"
1005 " are failures during the removal"
1008 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1009 action="store_true", default=False,
1010 help="Remove the instance from the cluster")
1012 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1013 help="Specifies the new node for the instance",
1014 metavar="NODE", default=None,
1015 completion_suggest=OPT_COMPL_ONE_NODE)
1017 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1018 help="Specifies the new secondary node",
1019 metavar="NODE", default=None,
1020 completion_suggest=OPT_COMPL_ONE_NODE)
1022 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1023 default=False, action="store_true",
1024 help="Replace the disk(s) on the primary"
1025 " node (applies only to internally mirrored"
1026 " disk templates, e.g. %s)" %
1027 utils.CommaJoin(constants.DTS_INT_MIRROR))
1029 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1030 default=False, action="store_true",
1031 help="Replace the disk(s) on the secondary"
1032 " node (applies only to internally mirrored"
1033 " disk templates, e.g. %s)" %
1034 utils.CommaJoin(constants.DTS_INT_MIRROR))
1036 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1037 default=False, action="store_true",
1038 help="Lock all nodes and auto-promote as needed"
1041 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1042 default=False, action="store_true",
1043 help="Automatically replace faulty disks"
1044 " (applies only to internally mirrored"
1045 " disk templates, e.g. %s)" %
1046 utils.CommaJoin(constants.DTS_INT_MIRROR))
1048 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1049 default=False, action="store_true",
1050 help="Ignore current recorded size"
1051 " (useful for forcing activation when"
1052 " the recorded size is wrong)")
1054 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1056 completion_suggest=OPT_COMPL_ONE_NODE)
1058 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1061 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1062 help="Specify the secondary ip for the node",
1063 metavar="ADDRESS", default=None)
1065 READD_OPT = cli_option("--readd", dest="readd",
1066 default=False, action="store_true",
1067 help="Readd old node after replacing it")
1069 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1070 default=True, action="store_false",
1071 help="Disable SSH key fingerprint checking")
1073 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1074 default=False, action="store_true",
1075 help="Force the joining of a node")
1077 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1078 type="bool", default=None, metavar=_YORNO,
1079 help="Set the master_candidate flag on the node")
1081 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1082 type="bool", default=None,
1083 help=("Set the offline flag on the node"
1084 " (cluster does not communicate with offline"
1087 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1088 type="bool", default=None,
1089 help=("Set the drained flag on the node"
1090 " (excluded from allocation operations)"))
1092 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1093 type="bool", default=None, metavar=_YORNO,
1094 help="Set the master_capable flag on the node")
1096 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1097 type="bool", default=None, metavar=_YORNO,
1098 help="Set the vm_capable flag on the node")
1100 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1101 type="bool", default=None, metavar=_YORNO,
1102 help="Set the allocatable flag on a volume")
1104 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1105 help="Disable support for lvm based instances"
1107 action="store_false", default=True)
1109 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1110 dest="enabled_hypervisors",
1111 help="Comma-separated list of hypervisors",
1112 type="string", default=None)
1114 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1115 type="keyval", default={},
1116 help="NIC parameters")
1118 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1119 dest="candidate_pool_size", type="int",
1120 help="Set the candidate pool size")
1122 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1123 help=("Enables LVM and specifies the volume group"
1124 " name (cluster-wide) for disk allocation"
1125 " [%s]" % constants.DEFAULT_VG),
1126 metavar="VG", default=None)
1128 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1129 help="Destroy cluster", action="store_true")
1131 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1132 help="Skip node agreement check (dangerous)",
1133 action="store_true", default=False)
1135 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1136 help="Specify the mac prefix for the instance IP"
1137 " addresses, in the format XX:XX:XX",
1141 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1142 help="Specify the node interface (cluster-wide)"
1143 " on which the master IP address will be added"
1144 " (cluster init default: %s)" %
1145 constants.DEFAULT_BRIDGE,
1149 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1150 help="Specify the netmask of the master IP",
1154 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1155 dest="use_external_mip_script",
1156 help="Specify whether to run a"
1157 " user-provided script for the master"
1158 " IP address turnup and"
1159 " turndown operations",
1160 type="bool", metavar=_YORNO, default=None)
1162 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1163 help="Specify the default directory (cluster-"
1164 "wide) for storing the file-based disks [%s]" %
1165 pathutils.DEFAULT_FILE_STORAGE_DIR,
1167 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1169 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1170 "--shared-file-storage-dir",
1171 dest="shared_file_storage_dir",
1172 help="Specify the default directory (cluster-wide) for storing the"
1173 " shared file-based disks [%s]" %
1174 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1175 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1177 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1178 help="Don't modify %s" % pathutils.ETC_HOSTS,
1179 action="store_false", default=True)
1181 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1182 help="Don't initialize SSH keys",
1183 action="store_false", default=True)
1185 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1186 help="Enable parseable error messages",
1187 action="store_true", default=False)
1189 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1190 help="Skip N+1 memory redundancy tests",
1191 action="store_true", default=False)
1193 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1194 help="Type of reboot: soft/hard/full",
1195 default=constants.INSTANCE_REBOOT_HARD,
1197 choices=list(constants.REBOOT_TYPES))
1199 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1200 dest="ignore_secondaries",
1201 default=False, action="store_true",
1202 help="Ignore errors from secondaries")
1204 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1205 action="store_false", default=True,
1206 help="Don't shutdown the instance (unsafe)")
1208 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1209 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1210 help="Maximum time to wait")
1212 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1213 dest="shutdown_timeout", type="int",
1214 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1215 help="Maximum time to wait for instance"
1218 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1220 help=("Number of seconds between repetions of the"
1223 EARLY_RELEASE_OPT = cli_option("--early-release",
1224 dest="early_release", default=False,
1225 action="store_true",
1226 help="Release the locks on the secondary"
1229 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1230 dest="new_cluster_cert",
1231 default=False, action="store_true",
1232 help="Generate a new cluster certificate")
1234 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1236 help="File containing new RAPI certificate")
1238 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1239 default=None, action="store_true",
1240 help=("Generate a new self-signed RAPI"
1243 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1245 help="File containing new SPICE certificate")
1247 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1249 help="File containing the certificate of the CA"
1250 " which signed the SPICE certificate")
1252 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1253 dest="new_spice_cert", default=None,
1254 action="store_true",
1255 help=("Generate a new self-signed SPICE"
1258 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1259 dest="new_confd_hmac_key",
1260 default=False, action="store_true",
1261 help=("Create a new HMAC key for %s" %
1264 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1265 dest="cluster_domain_secret",
1267 help=("Load new new cluster domain"
1268 " secret from file"))
1270 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1271 dest="new_cluster_domain_secret",
1272 default=False, action="store_true",
1273 help=("Create a new cluster domain"
1276 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1277 dest="use_replication_network",
1278 help="Whether to use the replication network"
1279 " for talking to the nodes",
1280 action="store_true", default=False)
1282 MAINTAIN_NODE_HEALTH_OPT = \
1283 cli_option("--maintain-node-health", dest="maintain_node_health",
1284 metavar=_YORNO, default=None, type="bool",
1285 help="Configure the cluster to automatically maintain node"
1286 " health, by shutting down unknown instances, shutting down"
1287 " unknown DRBD devices, etc.")
1289 IDENTIFY_DEFAULTS_OPT = \
1290 cli_option("--identify-defaults", dest="identify_defaults",
1291 default=False, action="store_true",
1292 help="Identify which saved instance parameters are equal to"
1293 " the current cluster defaults and set them as such, instead"
1294 " of marking them as overridden")
1296 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1297 action="store", dest="uid_pool",
1298 help=("A list of user-ids or user-id"
1299 " ranges separated by commas"))
1301 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1302 action="store", dest="add_uids",
1303 help=("A list of user-ids or user-id"
1304 " ranges separated by commas, to be"
1305 " added to the user-id pool"))
1307 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1308 action="store", dest="remove_uids",
1309 help=("A list of user-ids or user-id"
1310 " ranges separated by commas, to be"
1311 " removed from the user-id pool"))
1313 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1314 action="store", dest="reserved_lvs",
1315 help=("A comma-separated list of reserved"
1316 " logical volumes names, that will be"
1317 " ignored by cluster verify"))
1319 ROMAN_OPT = cli_option("--roman",
1320 dest="roman_integers", default=False,
1321 action="store_true",
1322 help="Use roman numbers for positive integers")
1324 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1325 action="store", default=None,
1326 help="Specifies usermode helper for DRBD")
1328 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1329 action="store_false", default=True,
1330 help="Disable support for DRBD")
1332 PRIMARY_IP_VERSION_OPT = \
1333 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1334 action="store", dest="primary_ip_version",
1335 metavar="%d|%d" % (constants.IP4_VERSION,
1336 constants.IP6_VERSION),
1337 help="Cluster-wide IP version for primary IP")
1339 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1340 action="store_true",
1341 help="Show machine name for every line in output")
1344 def _PriorityOptionCb(option, _, value, parser):
1345 """Callback for processing C{--priority} option.
1348 value = _PRIONAME_TO_VALUE[value]
1350 setattr(parser.values, option.dest, value)
1353 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1354 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1355 choices=_PRIONAME_TO_VALUE.keys(),
1356 action="callback", type="choice",
1357 callback=_PriorityOptionCb,
1358 help="Priority for opcode processing")
1360 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1361 type="bool", default=None, metavar=_YORNO,
1362 help="Sets the hidden flag on the OS")
1364 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1365 type="bool", default=None, metavar=_YORNO,
1366 help="Sets the blacklisted flag on the OS")
1368 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1369 type="bool", metavar=_YORNO,
1370 dest="prealloc_wipe_disks",
1371 help=("Wipe disks prior to instance"
1374 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1375 type="keyval", default=None,
1376 help="Node parameters")
1378 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1379 action="store", metavar="POLICY", default=None,
1380 help="Allocation policy for the node group")
1382 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1383 type="bool", metavar=_YORNO,
1384 dest="node_powered",
1385 help="Specify if the SoR for node is powered")
1387 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1388 default=constants.OOB_TIMEOUT,
1389 help="Maximum time to wait for out-of-band helper")
1391 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1392 default=constants.OOB_POWER_DELAY,
1393 help="Time in seconds to wait between power-ons")
1395 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1396 action="store_true", default=False,
1397 help=("Whether command argument should be treated"
1400 NO_REMEMBER_OPT = cli_option("--no-remember",
1402 action="store_true", default=False,
1403 help="Perform but do not record the change"
1404 " in the configuration")
1406 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1407 default=False, action="store_true",
1408 help="Evacuate primary instances only")
1410 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1411 default=False, action="store_true",
1412 help="Evacuate secondary instances only"
1413 " (applies only to internally mirrored"
1414 " disk templates, e.g. %s)" %
1415 utils.CommaJoin(constants.DTS_INT_MIRROR))
1417 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1418 action="store_true", default=False,
1419 help="Pause instance at startup")
1421 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1422 help="Destination node group (name or uuid)",
1423 default=None, action="append",
1424 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1426 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1427 action="append", dest="ignore_errors",
1428 choices=list(constants.CV_ALL_ECODES_STRINGS),
1429 help="Error code to be ignored")
1431 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1433 help=("Specify disk state information in the"
1435 " storage_type/identifier:option=value,...;"
1436 " note this is unused for now"),
1439 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1441 help=("Specify hypervisor state information in the"
1442 " format hypervisor:option=value,...;"
1443 " note this is unused for now"),
1446 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1447 action="store_true", default=False,
1448 help="Ignore instance policy violations")
1450 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1451 help="Sets the instance's runtime memory,"
1452 " ballooning it up or down to the new value",
1453 default=None, type="unit", metavar="<size>")
1455 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1456 action="store_true", default=False,
1457 help="Marks the grow as absolute instead of the"
1458 " (default) relative mode")
1460 #: Options provided by all commands
1461 COMMON_OPTS = [DEBUG_OPT]
1463 # common options for creating instances. add and import then add their own
1465 COMMON_CREATE_OPTS = [
1470 FILESTORE_DRIVER_OPT,
1487 # common instance policy options
1488 INSTANCE_POLICY_OPTS = [
1489 SPECS_CPU_COUNT_OPT,
1490 SPECS_DISK_COUNT_OPT,
1491 SPECS_DISK_SIZE_OPT,
1493 SPECS_NIC_COUNT_OPT,
1494 IPOLICY_DISK_TEMPLATES,
1496 IPOLICY_SPINDLE_RATIO,
1500 class _ShowUsage(Exception):
1501 """Exception class for L{_ParseArgs}.
1504 def __init__(self, exit_error):
1505 """Initializes instances of this class.
1507 @type exit_error: bool
1508 @param exit_error: Whether to report failure on exit
1511 Exception.__init__(self)
1512 self.exit_error = exit_error
1515 class _ShowVersion(Exception):
1516 """Exception class for L{_ParseArgs}.
1521 def _ParseArgs(binary, argv, commands, aliases, env_override):
1522 """Parser for the command line arguments.
1524 This function parses the arguments and returns the function which
1525 must be executed together with its (modified) arguments.
1527 @param binary: Script name
1528 @param argv: Command line arguments
1529 @param commands: Dictionary containing command definitions
1530 @param aliases: dictionary with command aliases {"alias": "target", ...}
1531 @param env_override: list of env variables allowed for default args
1532 @raise _ShowUsage: If usage description should be shown
1533 @raise _ShowVersion: If version should be shown
1536 assert not (env_override - set(commands))
1537 assert not (set(aliases.keys()) & set(commands.keys()))
1542 # No option or command given
1543 raise _ShowUsage(exit_error=True)
1545 if cmd == "--version":
1546 raise _ShowVersion()
1547 elif cmd == "--help":
1548 raise _ShowUsage(exit_error=False)
1549 elif not (cmd in commands or cmd in aliases):
1550 raise _ShowUsage(exit_error=True)
1552 # get command, unalias it, and look it up in commands
1554 if aliases[cmd] not in commands:
1555 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1556 " command '%s'" % (cmd, aliases[cmd]))
1560 if cmd in env_override:
1561 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1562 env_args = os.environ.get(args_env_name)
1564 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1566 func, args_def, parser_opts, usage, description = commands[cmd]
1567 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1568 description=description,
1569 formatter=TitledHelpFormatter(),
1570 usage="%%prog %s %s" % (cmd, usage))
1571 parser.disable_interspersed_args()
1572 options, args = parser.parse_args(args=argv[2:])
1574 if not _CheckArguments(cmd, args_def, args):
1575 return None, None, None
1577 return func, options, args
1580 def _FormatUsage(binary, commands):
1581 """Generates a nice description of all commands.
1583 @param binary: Script name
1584 @param commands: Dictionary containing command definitions
1587 # compute the max line length for cmd + usage
1588 mlen = min(60, max(map(len, commands)))
1590 yield "Usage: %s {command} [options...] [argument...]" % binary
1591 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1595 # and format a nice command list
1596 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1597 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1598 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1599 for line in help_lines:
1600 yield " %-*s %s" % (mlen, "", line)
1605 def _CheckArguments(cmd, args_def, args):
1606 """Verifies the arguments using the argument definition.
1610 1. Abort with error if values specified by user but none expected.
1612 1. For each argument in definition
1614 1. Keep running count of minimum number of values (min_count)
1615 1. Keep running count of maximum number of values (max_count)
1616 1. If it has an unlimited number of values
1618 1. Abort with error if it's not the last argument in the definition
1620 1. If last argument has limited number of values
1622 1. Abort with error if number of values doesn't match or is too large
1624 1. Abort with error if user didn't pass enough values (min_count)
1627 if args and not args_def:
1628 ToStderr("Error: Command %s expects no arguments", cmd)
1635 last_idx = len(args_def) - 1
1637 for idx, arg in enumerate(args_def):
1638 if min_count is None:
1640 elif arg.min is not None:
1641 min_count += arg.min
1643 if max_count is None:
1645 elif arg.max is not None:
1646 max_count += arg.max
1649 check_max = (arg.max is not None)
1651 elif arg.max is None:
1652 raise errors.ProgrammerError("Only the last argument can have max=None")
1655 # Command with exact number of arguments
1656 if (min_count is not None and max_count is not None and
1657 min_count == max_count and len(args) != min_count):
1658 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1661 # Command with limited number of arguments
1662 if max_count is not None and len(args) > max_count:
1663 ToStderr("Error: Command %s expects only %d argument(s)",
1667 # Command with some required arguments
1668 if min_count is not None and len(args) < min_count:
1669 ToStderr("Error: Command %s expects at least %d argument(s)",
1676 def SplitNodeOption(value):
1677 """Splits the value of a --node option.
1680 if value and ":" in value:
1681 return value.split(":", 1)
1683 return (value, None)
1686 def CalculateOSNames(os_name, os_variants):
1687 """Calculates all the names an OS can be called, according to its variants.
1689 @type os_name: string
1690 @param os_name: base name of the os
1691 @type os_variants: list or None
1692 @param os_variants: list of supported variants
1694 @return: list of valid names
1698 return ["%s+%s" % (os_name, v) for v in os_variants]
1703 def ParseFields(selected, default):
1704 """Parses the values of "--field"-like options.
1706 @type selected: string or None
1707 @param selected: User-selected options
1709 @param default: Default fields
1712 if selected is None:
1715 if selected.startswith("+"):
1716 return default + selected[1:].split(",")
1718 return selected.split(",")
1721 UsesRPC = rpc.RunWithRPC
1724 def AskUser(text, choices=None):
1725 """Ask the user a question.
1727 @param text: the question to ask
1729 @param choices: list with elements tuples (input_char, return_value,
1730 description); if not given, it will default to: [('y', True,
1731 'Perform the operation'), ('n', False, 'Do no do the operation')];
1732 note that the '?' char is reserved for help
1734 @return: one of the return values from the choices list; if input is
1735 not possible (i.e. not running with a tty, we return the last
1740 choices = [("y", True, "Perform the operation"),
1741 ("n", False, "Do not perform the operation")]
1742 if not choices or not isinstance(choices, list):
1743 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1744 for entry in choices:
1745 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1746 raise errors.ProgrammerError("Invalid choices element to AskUser")
1748 answer = choices[-1][1]
1750 for line in text.splitlines():
1751 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1752 text = "\n".join(new_text)
1754 f = file("/dev/tty", "a+")
1758 chars = [entry[0] for entry in choices]
1759 chars[-1] = "[%s]" % chars[-1]
1761 maps = dict([(entry[0], entry[1]) for entry in choices])
1765 f.write("/".join(chars))
1767 line = f.readline(2).strip().lower()
1772 for entry in choices:
1773 f.write(" %s - %s\n" % (entry[0], entry[2]))
1781 class JobSubmittedException(Exception):
1782 """Job was submitted, client should exit.
1784 This exception has one argument, the ID of the job that was
1785 submitted. The handler should print this ID.
1787 This is not an error, just a structured way to exit from clients.
1792 def SendJob(ops, cl=None):
1793 """Function to submit an opcode without waiting for the results.
1796 @param ops: list of opcodes
1797 @type cl: luxi.Client
1798 @param cl: the luxi client to use for communicating with the master;
1799 if None, a new client will be created
1805 job_id = cl.SubmitJob(ops)
1810 def GenericPollJob(job_id, cbs, report_cbs):
1811 """Generic job-polling function.
1813 @type job_id: number
1814 @param job_id: Job ID
1815 @type cbs: Instance of L{JobPollCbBase}
1816 @param cbs: Data callbacks
1817 @type report_cbs: Instance of L{JobPollReportCbBase}
1818 @param report_cbs: Reporting callbacks
1821 prev_job_info = None
1822 prev_logmsg_serial = None
1827 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1830 # job not found, go away!
1831 raise errors.JobLost("Job with id %s lost" % job_id)
1833 if result == constants.JOB_NOTCHANGED:
1834 report_cbs.ReportNotChanged(job_id, status)
1839 # Split result, a tuple of (field values, log entries)
1840 (job_info, log_entries) = result
1841 (status, ) = job_info
1844 for log_entry in log_entries:
1845 (serial, timestamp, log_type, message) = log_entry
1846 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1848 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1850 # TODO: Handle canceled and archived jobs
1851 elif status in (constants.JOB_STATUS_SUCCESS,
1852 constants.JOB_STATUS_ERROR,
1853 constants.JOB_STATUS_CANCELING,
1854 constants.JOB_STATUS_CANCELED):
1857 prev_job_info = job_info
1859 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1861 raise errors.JobLost("Job with id %s lost" % job_id)
1863 status, opstatus, result = jobs[0]
1865 if status == constants.JOB_STATUS_SUCCESS:
1868 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1869 raise errors.OpExecError("Job was canceled")
1872 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1873 if status == constants.OP_STATUS_SUCCESS:
1875 elif status == constants.OP_STATUS_ERROR:
1876 errors.MaybeRaise(msg)
1879 raise errors.OpExecError("partial failure (opcode %d): %s" %
1882 raise errors.OpExecError(str(msg))
1884 # default failure mode
1885 raise errors.OpExecError(result)
1888 class JobPollCbBase:
1889 """Base class for L{GenericPollJob} callbacks.
1893 """Initializes this class.
1897 def WaitForJobChangeOnce(self, job_id, fields,
1898 prev_job_info, prev_log_serial):
1899 """Waits for changes on a job.
1902 raise NotImplementedError()
1904 def QueryJobs(self, job_ids, fields):
1905 """Returns the selected fields for the selected job IDs.
1907 @type job_ids: list of numbers
1908 @param job_ids: Job IDs
1909 @type fields: list of strings
1910 @param fields: Fields
1913 raise NotImplementedError()
1916 class JobPollReportCbBase:
1917 """Base class for L{GenericPollJob} reporting callbacks.
1921 """Initializes this class.
1925 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1926 """Handles a log message.
1929 raise NotImplementedError()
1931 def ReportNotChanged(self, job_id, status):
1932 """Called for if a job hasn't changed in a while.
1934 @type job_id: number
1935 @param job_id: Job ID
1936 @type status: string or None
1937 @param status: Job status if available
1940 raise NotImplementedError()
1943 class _LuxiJobPollCb(JobPollCbBase):
1944 def __init__(self, cl):
1945 """Initializes this class.
1948 JobPollCbBase.__init__(self)
1951 def WaitForJobChangeOnce(self, job_id, fields,
1952 prev_job_info, prev_log_serial):
1953 """Waits for changes on a job.
1956 return self.cl.WaitForJobChangeOnce(job_id, fields,
1957 prev_job_info, prev_log_serial)
1959 def QueryJobs(self, job_ids, fields):
1960 """Returns the selected fields for the selected job IDs.
1963 return self.cl.QueryJobs(job_ids, fields)
1966 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1967 def __init__(self, feedback_fn):
1968 """Initializes this class.
1971 JobPollReportCbBase.__init__(self)
1973 self.feedback_fn = feedback_fn
1975 assert callable(feedback_fn)
1977 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1978 """Handles a log message.
1981 self.feedback_fn((timestamp, log_type, log_msg))
1983 def ReportNotChanged(self, job_id, status):
1984 """Called if a job hasn't changed in a while.
1990 class StdioJobPollReportCb(JobPollReportCbBase):
1992 """Initializes this class.
1995 JobPollReportCbBase.__init__(self)
1997 self.notified_queued = False
1998 self.notified_waitlock = False
2000 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2001 """Handles a log message.
2004 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2005 FormatLogMessage(log_type, log_msg))
2007 def ReportNotChanged(self, job_id, status):
2008 """Called if a job hasn't changed in a while.
2014 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2015 ToStderr("Job %s is waiting in queue", job_id)
2016 self.notified_queued = True
2018 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2019 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2020 self.notified_waitlock = True
2023 def FormatLogMessage(log_type, log_msg):
2024 """Formats a job message according to its type.
2027 if log_type != constants.ELOG_MESSAGE:
2028 log_msg = str(log_msg)
2030 return utils.SafeEncode(log_msg)
2033 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2034 """Function to poll for the result of a job.
2036 @type job_id: job identified
2037 @param job_id: the job to poll for results
2038 @type cl: luxi.Client
2039 @param cl: the luxi client to use for communicating with the master;
2040 if None, a new client will be created
2046 if reporter is None:
2048 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2050 reporter = StdioJobPollReportCb()
2052 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2054 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2057 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2058 """Legacy function to submit an opcode.
2060 This is just a simple wrapper over the construction of the processor
2061 instance. It should be extended to better handle feedback and
2062 interaction functions.
2068 SetGenericOpcodeOpts([op], opts)
2070 job_id = SendJob([op], cl=cl)
2072 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2075 return op_results[0]
2078 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2079 """Wrapper around SubmitOpCode or SendJob.
2081 This function will decide, based on the 'opts' parameter, whether to
2082 submit and wait for the result of the opcode (and return it), or
2083 whether to just send the job and print its identifier. It is used in
2084 order to simplify the implementation of the '--submit' option.
2086 It will also process the opcodes if we're sending the via SendJob
2087 (otherwise SubmitOpCode does it).
2090 if opts and opts.submit_only:
2092 SetGenericOpcodeOpts(job, opts)
2093 job_id = SendJob(job, cl=cl)
2094 raise JobSubmittedException(job_id)
2096 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2099 def SetGenericOpcodeOpts(opcode_list, options):
2100 """Processor for generic options.
2102 This function updates the given opcodes based on generic command
2103 line options (like debug, dry-run, etc.).
2105 @param opcode_list: list of opcodes
2106 @param options: command line options or None
2107 @return: None (in-place modification)
2112 for op in opcode_list:
2113 op.debug_level = options.debug
2114 if hasattr(options, "dry_run"):
2115 op.dry_run = options.dry_run
2116 if getattr(options, "priority", None) is not None:
2117 op.priority = options.priority
2120 def GetClient(query=False):
2121 """Connects to the a luxi socket and returns a client.
2123 @type query: boolean
2124 @param query: this signifies that the client will only be
2125 used for queries; if the build-time parameter
2126 enable-split-queries is enabled, then the client will be
2127 connected to the query socket instead of the masterd socket
2130 if query and constants.ENABLE_SPLIT_QUERY:
2131 address = pathutils.QUERY_SOCKET
2134 # TODO: Cache object?
2136 client = luxi.Client(address=address)
2137 except luxi.NoMasterError:
2138 ss = ssconf.SimpleStore()
2140 # Try to read ssconf file
2143 except errors.ConfigurationError:
2144 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2145 " not part of a cluster",
2148 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2149 if master != myself:
2150 raise errors.OpPrereqError("This is not the master node, please connect"
2151 " to node '%s' and rerun the command" %
2152 master, errors.ECODE_INVAL)
2157 def FormatError(err):
2158 """Return a formatted error message for a given error.
2160 This function takes an exception instance and returns a tuple
2161 consisting of two values: first, the recommended exit code, and
2162 second, a string describing the error message (not
2163 newline-terminated).
2169 if isinstance(err, errors.ConfigurationError):
2170 txt = "Corrupt configuration file: %s" % msg
2172 obuf.write(txt + "\n")
2173 obuf.write("Aborting.")
2175 elif isinstance(err, errors.HooksAbort):
2176 obuf.write("Failure: hooks execution failed:\n")
2177 for node, script, out in err.args[0]:
2179 obuf.write(" node: %s, script: %s, output: %s\n" %
2180 (node, script, out))
2182 obuf.write(" node: %s, script: %s (no output)\n" %
2184 elif isinstance(err, errors.HooksFailure):
2185 obuf.write("Failure: hooks general failure: %s" % msg)
2186 elif isinstance(err, errors.ResolverError):
2187 this_host = netutils.Hostname.GetSysName()
2188 if err.args[0] == this_host:
2189 msg = "Failure: can't resolve my own hostname ('%s')"
2191 msg = "Failure: can't resolve hostname '%s'"
2192 obuf.write(msg % err.args[0])
2193 elif isinstance(err, errors.OpPrereqError):
2194 if len(err.args) == 2:
2195 obuf.write("Failure: prerequisites not met for this"
2196 " operation:\nerror type: %s, error details:\n%s" %
2197 (err.args[1], err.args[0]))
2199 obuf.write("Failure: prerequisites not met for this"
2200 " operation:\n%s" % msg)
2201 elif isinstance(err, errors.OpExecError):
2202 obuf.write("Failure: command execution error:\n%s" % msg)
2203 elif isinstance(err, errors.TagError):
2204 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2205 elif isinstance(err, errors.JobQueueDrainError):
2206 obuf.write("Failure: the job queue is marked for drain and doesn't"
2207 " accept new requests\n")
2208 elif isinstance(err, errors.JobQueueFull):
2209 obuf.write("Failure: the job queue is full and doesn't accept new"
2210 " job submissions until old jobs are archived\n")
2211 elif isinstance(err, errors.TypeEnforcementError):
2212 obuf.write("Parameter Error: %s" % msg)
2213 elif isinstance(err, errors.ParameterError):
2214 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2215 elif isinstance(err, luxi.NoMasterError):
2216 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2217 " and listening for connections?")
2218 elif isinstance(err, luxi.TimeoutError):
2219 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2220 " been submitted and will continue to run even if the call"
2221 " timed out. Useful commands in this situation are \"gnt-job"
2222 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2224 elif isinstance(err, luxi.PermissionError):
2225 obuf.write("It seems you don't have permissions to connect to the"
2226 " master daemon.\nPlease retry as a different user.")
2227 elif isinstance(err, luxi.ProtocolError):
2228 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2230 elif isinstance(err, errors.JobLost):
2231 obuf.write("Error checking job status: %s" % msg)
2232 elif isinstance(err, errors.QueryFilterParseError):
2233 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2234 obuf.write("\n".join(err.GetDetails()))
2235 elif isinstance(err, errors.GenericError):
2236 obuf.write("Unhandled Ganeti error: %s" % msg)
2237 elif isinstance(err, JobSubmittedException):
2238 obuf.write("JobID: %s\n" % err.args[0])
2241 obuf.write("Unhandled exception: %s" % msg)
2242 return retcode, obuf.getvalue().rstrip("\n")
2245 def GenericMain(commands, override=None, aliases=None,
2246 env_override=frozenset()):
2247 """Generic main function for all the gnt-* commands.
2249 @param commands: a dictionary with a special structure, see the design doc
2250 for command line handling.
2251 @param override: if not None, we expect a dictionary with keys that will
2252 override command line options; this can be used to pass
2253 options from the scripts to generic functions
2254 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2255 @param env_override: list of environment names which are allowed to submit
2256 default args for commands
2259 # save the program name and the entire command line for later logging
2261 binary = os.path.basename(sys.argv[0])
2263 binary = sys.argv[0]
2265 if len(sys.argv) >= 2:
2266 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2270 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2272 binary = "<unknown program>"
2273 cmdline = "<unknown>"
2279 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2281 except _ShowVersion:
2282 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2283 constants.RELEASE_VERSION)
2284 return constants.EXIT_SUCCESS
2285 except _ShowUsage, err:
2286 for line in _FormatUsage(binary, commands):
2290 return constants.EXIT_FAILURE
2292 return constants.EXIT_SUCCESS
2293 except errors.ParameterError, err:
2294 result, err_msg = FormatError(err)
2298 if func is None: # parse error
2301 if override is not None:
2302 for key, val in override.iteritems():
2303 setattr(options, key, val)
2305 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2306 stderr_logging=True)
2308 logging.info("Command line: %s", cmdline)
2311 result = func(options, args)
2312 except (errors.GenericError, luxi.ProtocolError,
2313 JobSubmittedException), err:
2314 result, err_msg = FormatError(err)
2315 logging.exception("Error during command processing")
2317 except KeyboardInterrupt:
2318 result = constants.EXIT_FAILURE
2319 ToStderr("Aborted. Note that if the operation created any jobs, they"
2320 " might have been submitted and"
2321 " will continue to run in the background.")
2322 except IOError, err:
2323 if err.errno == errno.EPIPE:
2324 # our terminal went away, we'll exit
2325 sys.exit(constants.EXIT_FAILURE)
2332 def ParseNicOption(optvalue):
2333 """Parses the value of the --net option(s).
2337 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2338 except (TypeError, ValueError), err:
2339 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2342 nics = [{}] * nic_max
2343 for nidx, ndict in optvalue:
2346 if not isinstance(ndict, dict):
2347 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2348 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2350 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2357 def GenericInstanceCreate(mode, opts, args):
2358 """Add an instance to the cluster via either creation or import.
2360 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2361 @param opts: the command line options selected by the user
2363 @param args: should contain only one element, the new instance name
2365 @return: the desired exit code
2370 (pnode, snode) = SplitNodeOption(opts.node)
2375 hypervisor, hvparams = opts.hypervisor
2378 nics = ParseNicOption(opts.nics)
2382 elif mode == constants.INSTANCE_CREATE:
2383 # default of one nic, all auto
2389 if opts.disk_template == constants.DT_DISKLESS:
2390 if opts.disks or opts.sd_size is not None:
2391 raise errors.OpPrereqError("Diskless instance but disk"
2392 " information passed", errors.ECODE_INVAL)
2395 if (not opts.disks and not opts.sd_size
2396 and mode == constants.INSTANCE_CREATE):
2397 raise errors.OpPrereqError("No disk information specified",
2399 if opts.disks and opts.sd_size is not None:
2400 raise errors.OpPrereqError("Please use either the '--disk' or"
2401 " '-s' option", errors.ECODE_INVAL)
2402 if opts.sd_size is not None:
2403 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2407 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2408 except ValueError, err:
2409 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2411 disks = [{}] * disk_max
2414 for didx, ddict in opts.disks:
2416 if not isinstance(ddict, dict):
2417 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2418 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2419 elif constants.IDISK_SIZE in ddict:
2420 if constants.IDISK_ADOPT in ddict:
2421 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2422 " (disk %d)" % didx, errors.ECODE_INVAL)
2424 ddict[constants.IDISK_SIZE] = \
2425 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2426 except ValueError, err:
2427 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2428 (didx, err), errors.ECODE_INVAL)
2429 elif constants.IDISK_ADOPT in ddict:
2430 if mode == constants.INSTANCE_IMPORT:
2431 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2432 " import", errors.ECODE_INVAL)
2433 ddict[constants.IDISK_SIZE] = 0
2435 raise errors.OpPrereqError("Missing size or adoption source for"
2436 " disk %d" % didx, errors.ECODE_INVAL)
2439 if opts.tags is not None:
2440 tags = opts.tags.split(",")
2444 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2445 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2447 if mode == constants.INSTANCE_CREATE:
2450 force_variant = opts.force_variant
2453 no_install = opts.no_install
2454 identify_defaults = False
2455 elif mode == constants.INSTANCE_IMPORT:
2458 force_variant = False
2459 src_node = opts.src_node
2460 src_path = opts.src_dir
2462 identify_defaults = opts.identify_defaults
2464 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2466 op = opcodes.OpInstanceCreate(instance_name=instance,
2468 disk_template=opts.disk_template,
2470 conflicts_check=opts.conflicts_check,
2471 pnode=pnode, snode=snode,
2472 ip_check=opts.ip_check,
2473 name_check=opts.name_check,
2474 wait_for_sync=opts.wait_for_sync,
2475 file_storage_dir=opts.file_storage_dir,
2476 file_driver=opts.file_driver,
2477 iallocator=opts.iallocator,
2478 hypervisor=hypervisor,
2480 beparams=opts.beparams,
2481 osparams=opts.osparams,
2485 force_variant=force_variant,
2489 no_install=no_install,
2490 identify_defaults=identify_defaults,
2491 ignore_ipolicy=opts.ignore_ipolicy)
2493 SubmitOrSend(op, opts)
2497 class _RunWhileClusterStoppedHelper:
2498 """Helper class for L{RunWhileClusterStopped} to simplify state management
2501 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2502 """Initializes this class.
2504 @type feedback_fn: callable
2505 @param feedback_fn: Feedback function
2506 @type cluster_name: string
2507 @param cluster_name: Cluster name
2508 @type master_node: string
2509 @param master_node Master node name
2510 @type online_nodes: list
2511 @param online_nodes: List of names of online nodes
2514 self.feedback_fn = feedback_fn
2515 self.cluster_name = cluster_name
2516 self.master_node = master_node
2517 self.online_nodes = online_nodes
2519 self.ssh = ssh.SshRunner(self.cluster_name)
2521 self.nonmaster_nodes = [name for name in online_nodes
2522 if name != master_node]
2524 assert self.master_node not in self.nonmaster_nodes
2526 def _RunCmd(self, node_name, cmd):
2527 """Runs a command on the local or a remote machine.
2529 @type node_name: string
2530 @param node_name: Machine name
2535 if node_name is None or node_name == self.master_node:
2536 # No need to use SSH
2537 result = utils.RunCmd(cmd)
2539 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2540 utils.ShellQuoteArgs(cmd))
2543 errmsg = ["Failed to run command %s" % result.cmd]
2545 errmsg.append("on node %s" % node_name)
2546 errmsg.append(": exitcode %s and error %s" %
2547 (result.exit_code, result.output))
2548 raise errors.OpExecError(" ".join(errmsg))
2550 def Call(self, fn, *args):
2551 """Call function while all daemons are stopped.
2554 @param fn: Function to be called
2557 # Pause watcher by acquiring an exclusive lock on watcher state file
2558 self.feedback_fn("Blocking watcher")
2559 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2561 # TODO: Currently, this just blocks. There's no timeout.
2562 # TODO: Should it be a shared lock?
2563 watcher_block.Exclusive(blocking=True)
2565 # Stop master daemons, so that no new jobs can come in and all running
2567 self.feedback_fn("Stopping master daemons")
2568 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2570 # Stop daemons on all nodes
2571 for node_name in self.online_nodes:
2572 self.feedback_fn("Stopping daemons on %s" % node_name)
2573 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2575 # All daemons are shut down now
2577 return fn(self, *args)
2578 except Exception, err:
2579 _, errmsg = FormatError(err)
2580 logging.exception("Caught exception")
2581 self.feedback_fn(errmsg)
2584 # Start cluster again, master node last
2585 for node_name in self.nonmaster_nodes + [self.master_node]:
2586 self.feedback_fn("Starting daemons on %s" % node_name)
2587 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2590 watcher_block.Close()
2593 def RunWhileClusterStopped(feedback_fn, fn, *args):
2594 """Calls a function while all cluster daemons are stopped.
2596 @type feedback_fn: callable
2597 @param feedback_fn: Feedback function
2599 @param fn: Function to be called when daemons are stopped
2602 feedback_fn("Gathering cluster information")
2604 # This ensures we're running on the master daemon
2607 (cluster_name, master_node) = \
2608 cl.QueryConfigValues(["cluster_name", "master_node"])
2610 online_nodes = GetOnlineNodes([], cl=cl)
2612 # Don't keep a reference to the client. The master daemon will go away.
2615 assert master_node in online_nodes
2617 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2618 online_nodes).Call(fn, *args)
2621 def GenerateTable(headers, fields, separator, data,
2622 numfields=None, unitfields=None,
2624 """Prints a table with headers and different fields.
2627 @param headers: dictionary mapping field names to headers for
2630 @param fields: the field names corresponding to each row in
2632 @param separator: the separator to be used; if this is None,
2633 the default 'smart' algorithm is used which computes optimal
2634 field width, otherwise just the separator is used between
2637 @param data: a list of lists, each sublist being one row to be output
2638 @type numfields: list
2639 @param numfields: a list with the fields that hold numeric
2640 values and thus should be right-aligned
2641 @type unitfields: list
2642 @param unitfields: a list with the fields that hold numeric
2643 values that should be formatted with the units field
2644 @type units: string or None
2645 @param units: the units we should use for formatting, or None for
2646 automatic choice (human-readable for non-separator usage, otherwise
2647 megabytes); this is a one-letter string
2656 if numfields is None:
2658 if unitfields is None:
2661 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2662 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2665 for field in fields:
2666 if headers and field not in headers:
2667 # TODO: handle better unknown fields (either revert to old
2668 # style of raising exception, or deal more intelligently with
2670 headers[field] = field
2671 if separator is not None:
2672 format_fields.append("%s")
2673 elif numfields.Matches(field):
2674 format_fields.append("%*s")
2676 format_fields.append("%-*s")
2678 if separator is None:
2679 mlens = [0 for name in fields]
2680 format_str = " ".join(format_fields)
2682 format_str = separator.replace("%", "%%").join(format_fields)
2687 for idx, val in enumerate(row):
2688 if unitfields.Matches(fields[idx]):
2691 except (TypeError, ValueError):
2694 val = row[idx] = utils.FormatUnit(val, units)
2695 val = row[idx] = str(val)
2696 if separator is None:
2697 mlens[idx] = max(mlens[idx], len(val))
2702 for idx, name in enumerate(fields):
2704 if separator is None:
2705 mlens[idx] = max(mlens[idx], len(hdr))
2706 args.append(mlens[idx])
2708 result.append(format_str % tuple(args))
2710 if separator is None:
2711 assert len(mlens) == len(fields)
2713 if fields and not numfields.Matches(fields[-1]):
2719 line = ["-" for _ in fields]
2720 for idx in range(len(fields)):
2721 if separator is None:
2722 args.append(mlens[idx])
2723 args.append(line[idx])
2724 result.append(format_str % tuple(args))
2729 def _FormatBool(value):
2730 """Formats a boolean value as a string.
2738 #: Default formatting for query results; (callback, align right)
2739 _DEFAULT_FORMAT_QUERY = {
2740 constants.QFT_TEXT: (str, False),
2741 constants.QFT_BOOL: (_FormatBool, False),
2742 constants.QFT_NUMBER: (str, True),
2743 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2744 constants.QFT_OTHER: (str, False),
2745 constants.QFT_UNKNOWN: (str, False),
2749 def _GetColumnFormatter(fdef, override, unit):
2750 """Returns formatting function for a field.
2752 @type fdef: L{objects.QueryFieldDefinition}
2753 @type override: dict
2754 @param override: Dictionary for overriding field formatting functions,
2755 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2757 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2758 @rtype: tuple; (callable, bool)
2759 @return: Returns the function to format a value (takes one parameter) and a
2760 boolean for aligning the value on the right-hand side
2763 fmt = override.get(fdef.name, None)
2767 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2769 if fdef.kind == constants.QFT_UNIT:
2770 # Can't keep this information in the static dictionary
2771 return (lambda value: utils.FormatUnit(value, unit), True)
2773 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2777 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2780 class _QueryColumnFormatter:
2781 """Callable class for formatting fields of a query.
2784 def __init__(self, fn, status_fn, verbose):
2785 """Initializes this class.
2788 @param fn: Formatting function
2789 @type status_fn: callable
2790 @param status_fn: Function to report fields' status
2791 @type verbose: boolean
2792 @param verbose: whether to use verbose field descriptions or not
2796 self._status_fn = status_fn
2797 self._verbose = verbose
2799 def __call__(self, data):
2800 """Returns a field's string representation.
2803 (status, value) = data
2806 self._status_fn(status)
2808 if status == constants.RS_NORMAL:
2809 return self._fn(value)
2811 assert value is None, \
2812 "Found value %r for abnormal status %s" % (value, status)
2814 return FormatResultError(status, self._verbose)
2817 def FormatResultError(status, verbose):
2818 """Formats result status other than L{constants.RS_NORMAL}.
2820 @param status: The result status
2821 @type verbose: boolean
2822 @param verbose: Whether to return the verbose text
2823 @return: Text of result status
2826 assert status != constants.RS_NORMAL, \
2827 "FormatResultError called with status equal to constants.RS_NORMAL"
2829 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2831 raise NotImplementedError("Unknown status %s" % status)
2838 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2839 header=False, verbose=False):
2840 """Formats data in L{objects.QueryResponse}.
2842 @type result: L{objects.QueryResponse}
2843 @param result: result of query operation
2845 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2846 see L{utils.text.FormatUnit}
2847 @type format_override: dict
2848 @param format_override: Dictionary for overriding field formatting functions,
2849 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2850 @type separator: string or None
2851 @param separator: String used to separate fields
2853 @param header: Whether to output header row
2854 @type verbose: boolean
2855 @param verbose: whether to use verbose field descriptions or not
2864 if format_override is None:
2865 format_override = {}
2867 stats = dict.fromkeys(constants.RS_ALL, 0)
2869 def _RecordStatus(status):
2874 for fdef in result.fields:
2875 assert fdef.title and fdef.name
2876 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2877 columns.append(TableColumn(fdef.title,
2878 _QueryColumnFormatter(fn, _RecordStatus,
2882 table = FormatTable(result.data, columns, header, separator)
2884 # Collect statistics
2885 assert len(stats) == len(constants.RS_ALL)
2886 assert compat.all(count >= 0 for count in stats.values())
2888 # Determine overall status. If there was no data, unknown fields must be
2889 # detected via the field definitions.
2890 if (stats[constants.RS_UNKNOWN] or
2891 (not result.data and _GetUnknownFields(result.fields))):
2893 elif compat.any(count > 0 for key, count in stats.items()
2894 if key != constants.RS_NORMAL):
2895 status = QR_INCOMPLETE
2899 return (status, table)
2902 def _GetUnknownFields(fdefs):
2903 """Returns list of unknown fields included in C{fdefs}.
2905 @type fdefs: list of L{objects.QueryFieldDefinition}
2908 return [fdef for fdef in fdefs
2909 if fdef.kind == constants.QFT_UNKNOWN]
2912 def _WarnUnknownFields(fdefs):
2913 """Prints a warning to stderr if a query included unknown fields.
2915 @type fdefs: list of L{objects.QueryFieldDefinition}
2918 unknown = _GetUnknownFields(fdefs)
2920 ToStderr("Warning: Queried for unknown fields %s",
2921 utils.CommaJoin(fdef.name for fdef in unknown))
2927 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2928 format_override=None, verbose=False, force_filter=False,
2929 namefield=None, qfilter=None, isnumeric=False):
2930 """Generic implementation for listing all items of a resource.
2932 @param resource: One of L{constants.QR_VIA_LUXI}
2933 @type fields: list of strings
2934 @param fields: List of fields to query for
2935 @type names: list of strings
2936 @param names: Names of items to query for
2937 @type unit: string or None
2938 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2939 None for automatic choice (human-readable for non-separator usage,
2940 otherwise megabytes); this is a one-letter string
2941 @type separator: string or None
2942 @param separator: String used to separate fields
2944 @param header: Whether to show header row
2945 @type force_filter: bool
2946 @param force_filter: Whether to always treat names as filter
2947 @type format_override: dict
2948 @param format_override: Dictionary for overriding field formatting functions,
2949 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2950 @type verbose: boolean
2951 @param verbose: whether to use verbose field descriptions or not
2952 @type namefield: string
2953 @param namefield: Name of field to use for simple filters (see
2954 L{qlang.MakeFilter} for details)
2955 @type qfilter: list or None
2956 @param qfilter: Query filter (in addition to names)
2957 @param isnumeric: bool
2958 @param isnumeric: Whether the namefield's type is numeric, and therefore
2959 any simple filters built by namefield should use integer values to
2966 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2967 isnumeric=isnumeric)
2970 qfilter = namefilter
2971 elif namefilter is not None:
2972 qfilter = [qlang.OP_AND, namefilter, qfilter]
2977 response = cl.Query(resource, fields, qfilter)
2979 found_unknown = _WarnUnknownFields(response.fields)
2981 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2983 format_override=format_override,
2989 assert ((found_unknown and status == QR_UNKNOWN) or
2990 (not found_unknown and status != QR_UNKNOWN))
2992 if status == QR_UNKNOWN:
2993 return constants.EXIT_UNKNOWN_FIELD
2995 # TODO: Should the list command fail if not all data could be collected?
2996 return constants.EXIT_SUCCESS
2999 def GenericListFields(resource, fields, separator, header, cl=None):
3000 """Generic implementation for listing fields for a resource.
3002 @param resource: One of L{constants.QR_VIA_LUXI}
3003 @type fields: list of strings
3004 @param fields: List of fields to query for
3005 @type separator: string or None
3006 @param separator: String used to separate fields
3008 @param header: Whether to show header row
3017 response = cl.QueryFields(resource, fields)
3019 found_unknown = _WarnUnknownFields(response.fields)
3022 TableColumn("Name", str, False),
3023 TableColumn("Title", str, False),
3024 TableColumn("Description", str, False),
3027 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3029 for line in FormatTable(rows, columns, header, separator):
3033 return constants.EXIT_UNKNOWN_FIELD
3035 return constants.EXIT_SUCCESS
3039 """Describes a column for L{FormatTable}.
3042 def __init__(self, title, fn, align_right):
3043 """Initializes this class.
3046 @param title: Column title
3048 @param fn: Formatting function
3049 @type align_right: bool
3050 @param align_right: Whether to align values on the right-hand side
3055 self.align_right = align_right
3058 def _GetColFormatString(width, align_right):
3059 """Returns the format string for a field.
3067 return "%%%s%ss" % (sign, width)
3070 def FormatTable(rows, columns, header, separator):
3071 """Formats data as a table.
3073 @type rows: list of lists
3074 @param rows: Row data, one list per row
3075 @type columns: list of L{TableColumn}
3076 @param columns: Column descriptions
3078 @param header: Whether to show header row
3079 @type separator: string or None
3080 @param separator: String used to separate columns
3084 data = [[col.title for col in columns]]
3085 colwidth = [len(col.title) for col in columns]
3088 colwidth = [0 for _ in columns]
3092 assert len(row) == len(columns)
3094 formatted = [col.format(value) for value, col in zip(row, columns)]
3096 if separator is None:
3097 # Update column widths
3098 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3099 # Modifying a list's items while iterating is fine
3100 colwidth[idx] = max(oldwidth, len(value))
3102 data.append(formatted)
3104 if separator is not None:
3105 # Return early if a separator is used
3106 return [separator.join(row) for row in data]
3108 if columns and not columns[-1].align_right:
3109 # Avoid unnecessary spaces at end of line
3112 # Build format string
3113 fmt = " ".join([_GetColFormatString(width, col.align_right)
3114 for col, width in zip(columns, colwidth)])
3116 return [fmt % tuple(row) for row in data]
3119 def FormatTimestamp(ts):
3120 """Formats a given timestamp.
3123 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3126 @return: a string with the formatted timestamp
3129 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3133 return utils.FormatTime(sec, usecs=usecs)
3136 def ParseTimespec(value):
3137 """Parse a time specification.
3139 The following suffixed will be recognized:
3147 Without any suffix, the value will be taken to be in seconds.
3152 raise errors.OpPrereqError("Empty time specification passed",
3161 if value[-1] not in suffix_map:
3164 except (TypeError, ValueError):
3165 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3168 multiplier = suffix_map[value[-1]]
3170 if not value: # no data left after stripping the suffix
3171 raise errors.OpPrereqError("Invalid time specification (only"
3172 " suffix passed)", errors.ECODE_INVAL)
3174 value = int(value) * multiplier
3175 except (TypeError, ValueError):
3176 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3181 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3182 filter_master=False, nodegroup=None):
3183 """Returns the names of online nodes.
3185 This function will also log a warning on stderr with the names of
3188 @param nodes: if not empty, use only this subset of nodes (minus the
3190 @param cl: if not None, luxi client to use
3191 @type nowarn: boolean
3192 @param nowarn: by default, this function will output a note with the
3193 offline nodes that are skipped; if this parameter is True the
3194 note is not displayed
3195 @type secondary_ips: boolean
3196 @param secondary_ips: if True, return the secondary IPs instead of the
3197 names, useful for doing network traffic over the replication interface
3199 @type filter_master: boolean
3200 @param filter_master: if True, do not return the master node in the list
3201 (useful in coordination with secondary_ips where we cannot check our
3202 node name against the list)
3203 @type nodegroup: string
3204 @param nodegroup: If set, only return nodes in this node group
3213 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3215 if nodegroup is not None:
3216 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3217 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3220 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3223 if len(qfilter) > 1:
3224 final_filter = [qlang.OP_AND] + qfilter
3226 assert len(qfilter) == 1
3227 final_filter = qfilter[0]
3231 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3233 def _IsOffline(row):
3234 (_, (_, offline), _) = row
3238 ((_, name), _, _) = row
3242 (_, _, (_, sip)) = row
3245 (offline, online) = compat.partition(result.data, _IsOffline)
3247 if offline and not nowarn:
3248 ToStderr("Note: skipping offline node(s): %s" %
3249 utils.CommaJoin(map(_GetName, offline)))
3256 return map(fn, online)
3259 def _ToStream(stream, txt, *args):
3260 """Write a message to a stream, bypassing the logging system
3262 @type stream: file object
3263 @param stream: the file to which we should write
3265 @param txt: the message
3271 stream.write(txt % args)
3276 except IOError, err:
3277 if err.errno == errno.EPIPE:
3278 # our terminal went away, we'll exit
3279 sys.exit(constants.EXIT_FAILURE)
3284 def ToStdout(txt, *args):
3285 """Write a message to stdout only, bypassing the logging system
3287 This is just a wrapper over _ToStream.
3290 @param txt: the message
3293 _ToStream(sys.stdout, txt, *args)
3296 def ToStderr(txt, *args):
3297 """Write a message to stderr only, bypassing the logging system
3299 This is just a wrapper over _ToStream.
3302 @param txt: the message
3305 _ToStream(sys.stderr, txt, *args)
3308 class JobExecutor(object):
3309 """Class which manages the submission and execution of multiple jobs.
3311 Note that instances of this class should not be reused between
3315 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3320 self.verbose = verbose
3323 self.feedback_fn = feedback_fn
3324 self._counter = itertools.count()
3327 def _IfName(name, fmt):
3328 """Helper function for formatting name.
3336 def QueueJob(self, name, *ops):
3337 """Record a job for later submit.
3340 @param name: a description of the job, will be used in WaitJobSet
3343 SetGenericOpcodeOpts(ops, self.opts)
3344 self.queue.append((self._counter.next(), name, ops))
3346 def AddJobId(self, name, status, job_id):
3347 """Adds a job ID to the internal queue.
3350 self.jobs.append((self._counter.next(), status, job_id, name))
3352 def SubmitPending(self, each=False):
3353 """Submit all pending jobs.
3358 for (_, _, ops) in self.queue:
3359 # SubmitJob will remove the success status, but raise an exception if
3360 # the submission fails, so we'll notice that anyway.
3361 results.append([True, self.cl.SubmitJob(ops)[0]])
3363 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3364 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3365 self.jobs.append((idx, status, data, name))
3367 def _ChooseJob(self):
3368 """Choose a non-waiting/queued job to poll next.
3371 assert self.jobs, "_ChooseJob called with empty job list"
3373 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3377 for job_data, status in zip(self.jobs, result):
3378 if (isinstance(status, list) and status and
3379 status[0] in (constants.JOB_STATUS_QUEUED,
3380 constants.JOB_STATUS_WAITING,
3381 constants.JOB_STATUS_CANCELING)):
3382 # job is still present and waiting
3384 # good candidate found (either running job or lost job)
3385 self.jobs.remove(job_data)
3389 return self.jobs.pop(0)
3391 def GetResults(self):
3392 """Wait for and return the results of all jobs.
3395 @return: list of tuples (success, job results), in the same order
3396 as the submitted jobs; if a job has failed, instead of the result
3397 there will be the error message
3401 self.SubmitPending()
3404 ok_jobs = [row[2] for row in self.jobs if row[1]]
3406 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3408 # first, remove any non-submitted jobs
3409 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3410 for idx, _, jid, name in failures:
3411 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3412 results.append((idx, False, jid))
3415 (idx, _, jid, name) = self._ChooseJob()
3416 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3418 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3420 except errors.JobLost, err:
3421 _, job_result = FormatError(err)
3422 ToStderr("Job %s%s has been archived, cannot check its result",
3423 jid, self._IfName(name, " for %s"))
3425 except (errors.GenericError, luxi.ProtocolError), err:
3426 _, job_result = FormatError(err)
3428 # the error message will always be shown, verbose or not
3429 ToStderr("Job %s%s has failed: %s",
3430 jid, self._IfName(name, " for %s"), job_result)
3432 results.append((idx, success, job_result))
3434 # sort based on the index, then drop it
3436 results = [i[1:] for i in results]
3440 def WaitOrShow(self, wait):
3441 """Wait for job results or only print the job IDs.
3444 @param wait: whether to wait or not
3448 return self.GetResults()
3451 self.SubmitPending()
3452 for _, status, result, name in self.jobs:
3454 ToStdout("%s: %s", result, name)
3456 ToStderr("Failure for %s: %s", name, result)
3457 return [row[1:3] for row in self.jobs]
3460 def FormatParameterDict(buf, param_dict, actual, level=1):
3461 """Formats a parameter dictionary.
3463 @type buf: L{StringIO}
3464 @param buf: the buffer into which to write
3465 @type param_dict: dict
3466 @param param_dict: the own parameters
3468 @param actual: the current parameter set (including defaults)
3469 @param level: Level of indent
3472 indent = " " * level
3474 for key in sorted(actual):
3476 buf.write("%s- %s:" % (indent, key))
3478 if isinstance(data, dict) and data:
3480 FormatParameterDict(buf, param_dict.get(key, {}), data,
3483 val = param_dict.get(key, "default (%s)" % data)
3484 buf.write(" %s\n" % val)
3487 def ConfirmOperation(names, list_type, text, extra=""):
3488 """Ask the user to confirm an operation on a list of list_type.
3490 This function is used to request confirmation for doing an operation
3491 on a given list of list_type.
3494 @param names: the list of names that we display when
3495 we ask for confirmation
3496 @type list_type: str
3497 @param list_type: Human readable name for elements in the list (e.g. nodes)
3499 @param text: the operation that the user should confirm
3501 @return: True or False depending on user's confirmation.
3505 msg = ("The %s will operate on %d %s.\n%s"
3506 "Do you want to continue?" % (text, count, list_type, extra))
3507 affected = (("\nAffected %s:\n" % list_type) +
3508 "\n".join([" %s" % name for name in names]))
3510 choices = [("y", True, "Yes, execute the %s" % text),
3511 ("n", False, "No, abort the %s" % text)]
3514 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3517 question = msg + affected
3519 choice = AskUser(question, choices)
3522 choice = AskUser(msg + affected, choices)
3526 def _MaybeParseUnit(elements):
3527 """Parses and returns an array of potential values with units.
3531 for k, v in elements.items():
3532 if v == constants.VALUE_DEFAULT:
3535 parsed[k] = utils.ParseUnit(v)
3539 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3540 ispecs_cpu_count=None,
3541 ispecs_disk_count=None,
3542 ispecs_disk_size=None,
3543 ispecs_nic_count=None,
3544 ipolicy_disk_templates=None,
3545 ipolicy_vcpu_ratio=None,
3546 ipolicy_spindle_ratio=None,
3547 group_ipolicy=False,
3548 allowed_values=None,
3550 """Creation of instance policy based on command line options.
3552 @param fill_all: whether for cluster policies we should ensure that
3553 all values are filled
3559 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3560 if ispecs_disk_size:
3561 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3562 except (TypeError, ValueError, errors.UnitParseError), err:
3563 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3565 (ispecs_disk_size, ispecs_mem_size, err),
3568 # prepare ipolicy dict
3569 ipolicy_transposed = {
3570 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3571 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3572 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3573 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3574 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3577 # first, check that the values given are correct
3579 forced_type = TISPECS_GROUP_TYPES
3581 forced_type = TISPECS_CLUSTER_TYPES
3583 for specs in ipolicy_transposed.values():
3584 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3587 ipolicy_out = objects.MakeEmptyIPolicy()
3588 for name, specs in ipolicy_transposed.iteritems():
3589 assert name in constants.ISPECS_PARAMETERS
3590 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3591 ipolicy_out[key][name] = val
3593 # no filldict for non-dicts
3594 if not group_ipolicy and fill_all:
3595 if ipolicy_disk_templates is None:
3596 ipolicy_disk_templates = constants.DISK_TEMPLATES
3597 if ipolicy_vcpu_ratio is None:
3598 ipolicy_vcpu_ratio = \
3599 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3600 if ipolicy_spindle_ratio is None:
3601 ipolicy_spindle_ratio = \
3602 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3603 if ipolicy_disk_templates is not None:
3604 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3605 if ipolicy_vcpu_ratio is not None:
3606 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3607 if ipolicy_spindle_ratio is not None:
3608 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3610 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)