4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
67 "CLUSTER_DOMAIN_SECRET_OPT",
85 "FILESTORE_DRIVER_OPT",
91 "GLOBAL_SHARED_FILEDIR_OPT",
96 "DEFAULT_IALLOCATOR_OPT",
97 "IDENTIFY_DEFAULTS_OPT",
100 "IGNORE_FAILURES_OPT",
101 "IGNORE_OFFLINE_OPT",
102 "IGNORE_REMOVE_FAILURES_OPT",
103 "IGNORE_SECONDARIES_OPT",
107 "MAINTAIN_NODE_HEALTH_OPT",
109 "MASTER_NETMASK_OPT",
111 "MIGRATION_MODE_OPT",
113 "NEW_CLUSTER_CERT_OPT",
114 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115 "NEW_CONFD_HMAC_KEY_OPT",
118 "NEW_SPICE_CERT_OPT",
120 "NODE_FORCE_JOIN_OPT",
122 "NODE_PLACEMENT_OPT",
126 "NODRBD_STORAGE_OPT",
132 "NOMODIFY_ETCHOSTS_OPT",
133 "NOMODIFY_SSH_SETUP_OPT",
137 "NORUNTIME_CHGS_OPT",
140 "NOSSH_KEYCHECK_OPT",
154 "PREALLOC_WIPE_DISKS_OPT",
155 "PRIMARY_IP_VERSION_OPT",
161 "REMOVE_INSTANCE_OPT",
167 "SECONDARY_ONLY_OPT",
171 "SHUTDOWN_TIMEOUT_OPT",
173 "SPECS_CPU_COUNT_OPT",
174 "SPECS_DISK_COUNT_OPT",
175 "SPECS_DISK_SIZE_OPT",
176 "SPECS_MEM_SIZE_OPT",
177 "SPECS_NIC_COUNT_OPT",
178 "IPOLICY_DISK_TEMPLATES",
179 "IPOLICY_VCPU_RATIO",
185 "STARTUP_PAUSED_OPT",
194 "USE_EXTERNAL_MIP_SCRIPT",
202 "IGNORE_IPOLICY_OPT",
203 "INSTANCE_POLICY_OPTS",
204 # Generic functions for CLI programs
206 "CreateIPolicyFromOpts",
208 "GenericInstanceCreate",
214 "JobSubmittedException",
216 "RunWhileClusterStopped",
220 # Formatting functions
221 "ToStderr", "ToStdout",
224 "FormatParameterDict",
233 # command line options support infrastructure
234 "ARGS_MANY_INSTANCES",
253 "OPT_COMPL_INST_ADD_NODES",
254 "OPT_COMPL_MANY_NODES",
255 "OPT_COMPL_ONE_IALLOCATOR",
256 "OPT_COMPL_ONE_INSTANCE",
257 "OPT_COMPL_ONE_NODE",
258 "OPT_COMPL_ONE_NODEGROUP",
264 "COMMON_CREATE_OPTS",
270 #: Priorities (sorted)
272 ("low", constants.OP_PRIO_LOW),
273 ("normal", constants.OP_PRIO_NORMAL),
274 ("high", constants.OP_PRIO_HIGH),
277 #: Priority dictionary for easier lookup
278 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
279 # we migrate to Python 2.6
280 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
282 # Query result status for clients
285 QR_INCOMPLETE) = range(3)
287 #: Maximum batch size for ChooseJob
291 # constants used to create InstancePolicy dictionary
292 TISPECS_GROUP_TYPES = {
293 constants.ISPECS_MIN: constants.VTYPE_INT,
294 constants.ISPECS_MAX: constants.VTYPE_INT,
297 TISPECS_CLUSTER_TYPES = {
298 constants.ISPECS_MIN: constants.VTYPE_INT,
299 constants.ISPECS_MAX: constants.VTYPE_INT,
300 constants.ISPECS_STD: constants.VTYPE_INT,
305 def __init__(self, min=0, max=None): # pylint: disable=W0622
310 return ("<%s min=%s max=%s>" %
311 (self.__class__.__name__, self.min, self.max))
314 class ArgSuggest(_Argument):
315 """Suggesting argument.
317 Value can be any of the ones passed to the constructor.
320 # pylint: disable=W0622
321 def __init__(self, min=0, max=None, choices=None):
322 _Argument.__init__(self, min=min, max=max)
323 self.choices = choices
326 return ("<%s min=%s max=%s choices=%r>" %
327 (self.__class__.__name__, self.min, self.max, self.choices))
330 class ArgChoice(ArgSuggest):
333 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
334 but value must be one of the choices.
339 class ArgUnknown(_Argument):
340 """Unknown argument to program (e.g. determined at runtime).
345 class ArgInstance(_Argument):
346 """Instances argument.
351 class ArgNode(_Argument):
357 class ArgGroup(_Argument):
358 """Node group argument.
363 class ArgJobId(_Argument):
369 class ArgFile(_Argument):
370 """File path argument.
375 class ArgCommand(_Argument):
381 class ArgHost(_Argument):
387 class ArgOs(_Argument):
394 ARGS_MANY_INSTANCES = [ArgInstance()]
395 ARGS_MANY_NODES = [ArgNode()]
396 ARGS_MANY_GROUPS = [ArgGroup()]
397 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
398 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
400 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
401 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
404 def _ExtractTagsObject(opts, args):
405 """Extract the tag type object.
407 Note that this function will modify its args parameter.
410 if not hasattr(opts, "tag_type"):
411 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
413 if kind == constants.TAG_CLUSTER:
415 elif kind in (constants.TAG_NODEGROUP,
417 constants.TAG_INSTANCE):
419 raise errors.OpPrereqError("no arguments passed to the command")
423 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
427 def _ExtendTags(opts, args):
428 """Extend the args if a source file has been given.
430 This function will extend the tags with the contents of the file
431 passed in the 'tags_source' attribute of the opts parameter. A file
432 named '-' will be replaced by stdin.
435 fname = opts.tags_source
441 new_fh = open(fname, "r")
444 # we don't use the nice 'new_data = [line.strip() for line in fh]'
445 # because of python bug 1633941
447 line = new_fh.readline()
450 new_data.append(line.strip())
453 args.extend(new_data)
456 def ListTags(opts, args):
457 """List the tags on a given object.
459 This is a generic implementation that knows how to deal with all
460 three cases of tag objects (cluster, node, instance). The opts
461 argument is expected to contain a tag_type field denoting what
462 object type we work on.
465 kind, name = _ExtractTagsObject(opts, args)
467 result = cl.QueryTags(kind, name)
468 result = list(result)
474 def AddTags(opts, args):
475 """Add tags on a given object.
477 This is a generic implementation that knows how to deal with all
478 three cases of tag objects (cluster, node, instance). The opts
479 argument is expected to contain a tag_type field denoting what
480 object type we work on.
483 kind, name = _ExtractTagsObject(opts, args)
484 _ExtendTags(opts, args)
486 raise errors.OpPrereqError("No tags to be added")
487 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
488 SubmitOrSend(op, opts)
491 def RemoveTags(opts, args):
492 """Remove tags from a given object.
494 This is a generic implementation that knows how to deal with all
495 three cases of tag objects (cluster, node, instance). The opts
496 argument is expected to contain a tag_type field denoting what
497 object type we work on.
500 kind, name = _ExtractTagsObject(opts, args)
501 _ExtendTags(opts, args)
503 raise errors.OpPrereqError("No tags to be removed")
504 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
505 SubmitOrSend(op, opts)
508 def check_unit(option, opt, value): # pylint: disable=W0613
509 """OptParsers custom converter for units.
513 return utils.ParseUnit(value)
514 except errors.UnitParseError, err:
515 raise OptionValueError("option %s: %s" % (opt, err))
518 def _SplitKeyVal(opt, data):
519 """Convert a KeyVal string into a dict.
521 This function will convert a key=val[,...] string into a dict. Empty
522 values will be converted specially: keys which have the prefix 'no_'
523 will have the value=False and the prefix stripped, the others will
527 @param opt: a string holding the option name for which we process the
528 data, used in building error messages
530 @param data: a string of the format key=val,key=val,...
532 @return: {key=val, key=val}
533 @raises errors.ParameterError: if there are duplicate keys
538 for elem in utils.UnescapeAndSplit(data, sep=","):
540 key, val = elem.split("=", 1)
542 if elem.startswith(NO_PREFIX):
543 key, val = elem[len(NO_PREFIX):], False
544 elif elem.startswith(UN_PREFIX):
545 key, val = elem[len(UN_PREFIX):], None
547 key, val = elem, True
549 raise errors.ParameterError("Duplicate key '%s' in option %s" %
555 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
556 """Custom parser for ident:key=val,key=val options.
558 This will store the parsed values as a tuple (ident, {key: val}). As such,
559 multiple uses of this option via action=append is possible.
563 ident, rest = value, ""
565 ident, rest = value.split(":", 1)
567 if ident.startswith(NO_PREFIX):
569 msg = "Cannot pass options when removing parameter groups: %s" % value
570 raise errors.ParameterError(msg)
571 retval = (ident[len(NO_PREFIX):], False)
572 elif (ident.startswith(UN_PREFIX) and
573 (len(ident) <= len(UN_PREFIX) or
574 not ident[len(UN_PREFIX)][0].isdigit())):
576 msg = "Cannot pass options when removing parameter groups: %s" % value
577 raise errors.ParameterError(msg)
578 retval = (ident[len(UN_PREFIX):], None)
580 kv_dict = _SplitKeyVal(opt, rest)
581 retval = (ident, kv_dict)
585 def check_key_val(option, opt, value): # pylint: disable=W0613
586 """Custom parser class for key=val,key=val options.
588 This will store the parsed values as a dict {key: val}.
591 return _SplitKeyVal(opt, value)
594 def check_bool(option, opt, value): # pylint: disable=W0613
595 """Custom parser for yes/no options.
597 This will store the parsed value as either True or False.
600 value = value.lower()
601 if value == constants.VALUE_FALSE or value == "no":
603 elif value == constants.VALUE_TRUE or value == "yes":
606 raise errors.ParameterError("Invalid boolean value '%s'" % value)
609 def check_list(option, opt, value): # pylint: disable=W0613
610 """Custom parser for comma-separated lists.
613 # we have to make this explicit check since "".split(",") is [""],
614 # not an empty list :(
618 return utils.UnescapeAndSplit(value)
621 def check_maybefloat(option, opt, value): # pylint: disable=W0613
622 """Custom parser for float numbers which might be also defaults.
625 value = value.lower()
627 if value == constants.VALUE_DEFAULT:
633 # completion_suggestion is normally a list. Using numeric values not evaluating
634 # to False for dynamic completion.
635 (OPT_COMPL_MANY_NODES,
637 OPT_COMPL_ONE_INSTANCE,
639 OPT_COMPL_ONE_IALLOCATOR,
640 OPT_COMPL_INST_ADD_NODES,
641 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
643 OPT_COMPL_ALL = frozenset([
644 OPT_COMPL_MANY_NODES,
646 OPT_COMPL_ONE_INSTANCE,
648 OPT_COMPL_ONE_IALLOCATOR,
649 OPT_COMPL_INST_ADD_NODES,
650 OPT_COMPL_ONE_NODEGROUP,
654 class CliOption(Option):
655 """Custom option class for optparse.
658 ATTRS = Option.ATTRS + [
659 "completion_suggest",
661 TYPES = Option.TYPES + (
669 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
670 TYPE_CHECKER["identkeyval"] = check_ident_key_val
671 TYPE_CHECKER["keyval"] = check_key_val
672 TYPE_CHECKER["unit"] = check_unit
673 TYPE_CHECKER["bool"] = check_bool
674 TYPE_CHECKER["list"] = check_list
675 TYPE_CHECKER["maybefloat"] = check_maybefloat
678 # optparse.py sets make_option, so we do it for our own option class, too
679 cli_option = CliOption
684 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
685 help="Increase debugging level")
687 NOHDR_OPT = cli_option("--no-headers", default=False,
688 action="store_true", dest="no_headers",
689 help="Don't display column headers")
691 SEP_OPT = cli_option("--separator", default=None,
692 action="store", dest="separator",
693 help=("Separator between output fields"
694 " (defaults to one space)"))
696 USEUNITS_OPT = cli_option("--units", default=None,
697 dest="units", choices=("h", "m", "g", "t"),
698 help="Specify units for output (one of h/m/g/t)")
700 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
701 type="string", metavar="FIELDS",
702 help="Comma separated list of output fields")
704 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
705 default=False, help="Force the operation")
707 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
708 default=False, help="Do not require confirmation")
710 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
711 action="store_true", default=False,
712 help=("Ignore offline nodes and do as much"
715 TAG_ADD_OPT = cli_option("--tags", dest="tags",
716 default=None, help="Comma-separated list of instance"
719 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
720 default=None, help="File with tag names")
722 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
723 default=False, action="store_true",
724 help=("Submit the job and return the job ID, but"
725 " don't wait for the job to finish"))
727 SYNC_OPT = cli_option("--sync", dest="do_locking",
728 default=False, action="store_true",
729 help=("Grab locks while doing the queries"
730 " in order to ensure more consistent results"))
732 DRY_RUN_OPT = cli_option("--dry-run", default=False,
734 help=("Do not execute the operation, just run the"
735 " check steps and verify it it could be"
738 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
740 help="Increase the verbosity of the operation")
742 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
743 action="store_true", dest="simulate_errors",
744 help="Debugging option that makes the operation"
745 " treat most runtime checks as failed")
747 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
748 default=True, action="store_false",
749 help="Don't wait for sync (DANGEROUS!)")
751 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
752 default=False, action="store_true",
753 help="Wait for disks to sync")
755 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
756 action="store_true", default=False,
757 help="Enable offline instance")
759 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
760 action="store_true", default=False,
761 help="Disable down instance")
763 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
764 help=("Custom disk setup (%s)" %
765 utils.CommaJoin(constants.DISK_TEMPLATES)),
766 default=None, metavar="TEMPL",
767 choices=list(constants.DISK_TEMPLATES))
769 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
770 help="Do not create any network cards for"
773 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
774 help="Relative path under default cluster-wide"
775 " file storage dir to store file-based disks",
776 default=None, metavar="<DIR>")
778 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
779 help="Driver to use for image files",
780 default="loop", metavar="<DRIVER>",
781 choices=list(constants.FILE_DRIVER))
783 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
784 help="Select nodes for the instance automatically"
785 " using the <NAME> iallocator plugin",
786 default=None, type="string",
787 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
789 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
791 help="Set the default instance allocator plugin",
792 default=None, type="string",
793 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
795 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
797 completion_suggest=OPT_COMPL_ONE_OS)
799 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
800 type="keyval", default={},
801 help="OS parameters")
803 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
804 action="store_true", default=False,
805 help="Force an unknown variant")
807 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
808 action="store_true", default=False,
809 help="Do not install the OS (will"
812 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
813 dest="allow_runtime_chgs",
814 default=True, action="store_false",
815 help="Don't allow runtime changes")
817 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
818 type="keyval", default={},
819 help="Backend parameters")
821 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
822 default={}, dest="hvparams",
823 help="Hypervisor parameters")
825 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
826 help="Disk template parameters, in the format"
827 " template:option=value,option=value,...",
828 type="identkeyval", action="append", default=[])
830 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
831 type="keyval", default={},
832 help="Memory size specs: list of key=value,"
833 " where key is one of min, max, std"
834 " (in MB or using a unit)")
836 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
837 type="keyval", default={},
838 help="CPU count specs: list of key=value,"
839 " where key is one of min, max, std")
841 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
842 dest="ispecs_disk_count",
843 type="keyval", default={},
844 help="Disk count specs: list of key=value,"
845 " where key is one of min, max, std")
847 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
848 type="keyval", default={},
849 help="Disk size specs: list of key=value,"
850 " where key is one of min, max, std"
851 " (in MB or using a unit)")
853 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
854 type="keyval", default={},
855 help="NIC count specs: list of key=value,"
856 " where key is one of min, max, std")
858 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
859 dest="ipolicy_disk_templates",
860 type="list", default=None,
861 help="Comma-separated list of"
862 " enabled disk templates")
864 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
865 dest="ipolicy_vcpu_ratio",
866 type="maybefloat", default=None,
867 help="The maximum allowed vcpu-to-cpu ratio")
869 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
870 dest="ipolicy_spindle_ratio",
871 type="maybefloat", default=None,
872 help=("The maximum allowed instances to"
875 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
876 help="Hypervisor and hypervisor options, in the"
877 " format hypervisor:option=value,option=value,...",
878 default=None, type="identkeyval")
880 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
881 help="Hypervisor and hypervisor options, in the"
882 " format hypervisor:option=value,option=value,...",
883 default=[], action="append", type="identkeyval")
885 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
886 action="store_false",
887 help="Don't check that the instance's IP"
890 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
891 default=True, action="store_false",
892 help="Don't check that the instance's name"
895 NET_OPT = cli_option("--net",
896 help="NIC parameters", default=[],
897 dest="nics", action="append", type="identkeyval")
899 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
900 dest="disks", action="append", type="identkeyval")
902 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
903 help="Comma-separated list of disks"
904 " indices to act on (e.g. 0,2) (optional,"
905 " defaults to all disks)")
907 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
908 help="Enforces a single-disk configuration using the"
909 " given disk size, in MiB unless a suffix is used",
910 default=None, type="unit", metavar="<size>")
912 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
913 dest="ignore_consistency",
914 action="store_true", default=False,
915 help="Ignore the consistency of the disks on"
918 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
919 dest="allow_failover",
920 action="store_true", default=False,
921 help="If migration is not possible fallback to"
924 NONLIVE_OPT = cli_option("--non-live", dest="live",
925 default=True, action="store_false",
926 help="Do a non-live migration (this usually means"
927 " freeze the instance, save the state, transfer and"
928 " only then resume running on the secondary node)")
930 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
932 choices=list(constants.HT_MIGRATION_MODES),
933 help="Override default migration mode (choose"
934 " either live or non-live")
936 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
937 help="Target node and optional secondary node",
938 metavar="<pnode>[:<snode>]",
939 completion_suggest=OPT_COMPL_INST_ADD_NODES)
941 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
942 action="append", metavar="<node>",
943 help="Use only this node (can be used multiple"
944 " times, if not given defaults to all nodes)",
945 completion_suggest=OPT_COMPL_ONE_NODE)
947 NODEGROUP_OPT_NAME = "--node-group"
948 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
950 help="Node group (name or uuid)",
951 metavar="<nodegroup>",
952 default=None, type="string",
953 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
955 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
957 completion_suggest=OPT_COMPL_ONE_NODE)
959 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
960 action="store_false",
961 help="Don't start the instance after creation")
963 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
964 action="store_true", default=False,
965 help="Show command instead of executing it")
967 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
968 default=False, action="store_true",
969 help="Instead of performing the migration, try to"
970 " recover from a failed cleanup. This is safe"
971 " to run even if the instance is healthy, but it"
972 " will create extra replication traffic and "
973 " disrupt briefly the replication (like during the"
976 STATIC_OPT = cli_option("-s", "--static", dest="static",
977 action="store_true", default=False,
978 help="Only show configuration data, not runtime data")
980 ALL_OPT = cli_option("--all", dest="show_all",
981 default=False, action="store_true",
982 help="Show info on all instances on the cluster."
983 " This can take a long time to run, use wisely")
985 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
986 action="store_true", default=False,
987 help="Interactive OS reinstall, lists available"
988 " OS templates for selection")
990 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
991 action="store_true", default=False,
992 help="Remove the instance from the cluster"
993 " configuration even if there are failures"
994 " during the removal process")
996 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
997 dest="ignore_remove_failures",
998 action="store_true", default=False,
999 help="Remove the instance from the"
1000 " cluster configuration even if there"
1001 " are failures during the removal"
1004 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1005 action="store_true", default=False,
1006 help="Remove the instance from the cluster")
1008 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1009 help="Specifies the new node for the instance",
1010 metavar="NODE", default=None,
1011 completion_suggest=OPT_COMPL_ONE_NODE)
1013 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1014 help="Specifies the new secondary node",
1015 metavar="NODE", default=None,
1016 completion_suggest=OPT_COMPL_ONE_NODE)
1018 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1019 default=False, action="store_true",
1020 help="Replace the disk(s) on the primary"
1021 " node (applies only to internally mirrored"
1022 " disk templates, e.g. %s)" %
1023 utils.CommaJoin(constants.DTS_INT_MIRROR))
1025 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1026 default=False, action="store_true",
1027 help="Replace the disk(s) on the secondary"
1028 " node (applies only to internally mirrored"
1029 " disk templates, e.g. %s)" %
1030 utils.CommaJoin(constants.DTS_INT_MIRROR))
1032 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1033 default=False, action="store_true",
1034 help="Lock all nodes and auto-promote as needed"
1037 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1038 default=False, action="store_true",
1039 help="Automatically replace faulty disks"
1040 " (applies only to internally mirrored"
1041 " disk templates, e.g. %s)" %
1042 utils.CommaJoin(constants.DTS_INT_MIRROR))
1044 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1045 default=False, action="store_true",
1046 help="Ignore current recorded size"
1047 " (useful for forcing activation when"
1048 " the recorded size is wrong)")
1050 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1052 completion_suggest=OPT_COMPL_ONE_NODE)
1054 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1057 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1058 help="Specify the secondary ip for the node",
1059 metavar="ADDRESS", default=None)
1061 READD_OPT = cli_option("--readd", dest="readd",
1062 default=False, action="store_true",
1063 help="Readd old node after replacing it")
1065 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1066 default=True, action="store_false",
1067 help="Disable SSH key fingerprint checking")
1069 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1070 default=False, action="store_true",
1071 help="Force the joining of a node")
1073 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1074 type="bool", default=None, metavar=_YORNO,
1075 help="Set the master_candidate flag on the node")
1077 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1078 type="bool", default=None,
1079 help=("Set the offline flag on the node"
1080 " (cluster does not communicate with offline"
1083 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1084 type="bool", default=None,
1085 help=("Set the drained flag on the node"
1086 " (excluded from allocation operations)"))
1088 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1089 type="bool", default=None, metavar=_YORNO,
1090 help="Set the master_capable flag on the node")
1092 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1093 type="bool", default=None, metavar=_YORNO,
1094 help="Set the vm_capable flag on the node")
1096 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1097 type="bool", default=None, metavar=_YORNO,
1098 help="Set the allocatable flag on a volume")
1100 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1101 help="Disable support for lvm based instances"
1103 action="store_false", default=True)
1105 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1106 dest="enabled_hypervisors",
1107 help="Comma-separated list of hypervisors",
1108 type="string", default=None)
1110 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1111 type="keyval", default={},
1112 help="NIC parameters")
1114 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1115 dest="candidate_pool_size", type="int",
1116 help="Set the candidate pool size")
1118 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1119 help=("Enables LVM and specifies the volume group"
1120 " name (cluster-wide) for disk allocation"
1121 " [%s]" % constants.DEFAULT_VG),
1122 metavar="VG", default=None)
1124 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1125 help="Destroy cluster", action="store_true")
1127 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1128 help="Skip node agreement check (dangerous)",
1129 action="store_true", default=False)
1131 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1132 help="Specify the mac prefix for the instance IP"
1133 " addresses, in the format XX:XX:XX",
1137 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1138 help="Specify the node interface (cluster-wide)"
1139 " on which the master IP address will be added"
1140 " (cluster init default: %s)" %
1141 constants.DEFAULT_BRIDGE,
1145 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1146 help="Specify the netmask of the master IP",
1150 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1151 dest="use_external_mip_script",
1152 help="Specify whether to run a user-provided"
1153 " script for the master IP address turnup and"
1154 " turndown operations",
1155 type="bool", metavar=_YORNO, default=None)
1157 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1158 help="Specify the default directory (cluster-"
1159 "wide) for storing the file-based disks [%s]" %
1160 constants.DEFAULT_FILE_STORAGE_DIR,
1162 default=constants.DEFAULT_FILE_STORAGE_DIR)
1164 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1165 dest="shared_file_storage_dir",
1166 help="Specify the default directory (cluster-"
1167 "wide) for storing the shared file-based"
1169 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1170 metavar="SHAREDDIR",
1171 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1173 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1174 help="Don't modify /etc/hosts",
1175 action="store_false", default=True)
1177 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1178 help="Don't initialize SSH keys",
1179 action="store_false", default=True)
1181 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1182 help="Enable parseable error messages",
1183 action="store_true", default=False)
1185 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1186 help="Skip N+1 memory redundancy tests",
1187 action="store_true", default=False)
1189 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1190 help="Type of reboot: soft/hard/full",
1191 default=constants.INSTANCE_REBOOT_HARD,
1193 choices=list(constants.REBOOT_TYPES))
1195 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1196 dest="ignore_secondaries",
1197 default=False, action="store_true",
1198 help="Ignore errors from secondaries")
1200 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1201 action="store_false", default=True,
1202 help="Don't shutdown the instance (unsafe)")
1204 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1205 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1206 help="Maximum time to wait")
1208 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1209 dest="shutdown_timeout", type="int",
1210 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1211 help="Maximum time to wait for instance shutdown")
1213 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1215 help=("Number of seconds between repetions of the"
1218 EARLY_RELEASE_OPT = cli_option("--early-release",
1219 dest="early_release", default=False,
1220 action="store_true",
1221 help="Release the locks on the secondary"
1224 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1225 dest="new_cluster_cert",
1226 default=False, action="store_true",
1227 help="Generate a new cluster certificate")
1229 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1231 help="File containing new RAPI certificate")
1233 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1234 default=None, action="store_true",
1235 help=("Generate a new self-signed RAPI"
1238 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1240 help="File containing new SPICE certificate")
1242 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1244 help="File containing the certificate of the CA"
1245 " which signed the SPICE certificate")
1247 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1248 dest="new_spice_cert", default=None,
1249 action="store_true",
1250 help=("Generate a new self-signed SPICE"
1253 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1254 dest="new_confd_hmac_key",
1255 default=False, action="store_true",
1256 help=("Create a new HMAC key for %s" %
1259 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1260 dest="cluster_domain_secret",
1262 help=("Load new new cluster domain"
1263 " secret from file"))
1265 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1266 dest="new_cluster_domain_secret",
1267 default=False, action="store_true",
1268 help=("Create a new cluster domain"
1271 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1272 dest="use_replication_network",
1273 help="Whether to use the replication network"
1274 " for talking to the nodes",
1275 action="store_true", default=False)
1277 MAINTAIN_NODE_HEALTH_OPT = \
1278 cli_option("--maintain-node-health", dest="maintain_node_health",
1279 metavar=_YORNO, default=None, type="bool",
1280 help="Configure the cluster to automatically maintain node"
1281 " health, by shutting down unknown instances, shutting down"
1282 " unknown DRBD devices, etc.")
1284 IDENTIFY_DEFAULTS_OPT = \
1285 cli_option("--identify-defaults", dest="identify_defaults",
1286 default=False, action="store_true",
1287 help="Identify which saved instance parameters are equal to"
1288 " the current cluster defaults and set them as such, instead"
1289 " of marking them as overridden")
1291 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1292 action="store", dest="uid_pool",
1293 help=("A list of user-ids or user-id"
1294 " ranges separated by commas"))
1296 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1297 action="store", dest="add_uids",
1298 help=("A list of user-ids or user-id"
1299 " ranges separated by commas, to be"
1300 " added to the user-id pool"))
1302 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1303 action="store", dest="remove_uids",
1304 help=("A list of user-ids or user-id"
1305 " ranges separated by commas, to be"
1306 " removed from the user-id pool"))
1308 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1309 action="store", dest="reserved_lvs",
1310 help=("A comma-separated list of reserved"
1311 " logical volumes names, that will be"
1312 " ignored by cluster verify"))
1314 ROMAN_OPT = cli_option("--roman",
1315 dest="roman_integers", default=False,
1316 action="store_true",
1317 help="Use roman numbers for positive integers")
1319 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1320 action="store", default=None,
1321 help="Specifies usermode helper for DRBD")
1323 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1324 action="store_false", default=True,
1325 help="Disable support for DRBD")
1327 PRIMARY_IP_VERSION_OPT = \
1328 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1329 action="store", dest="primary_ip_version",
1330 metavar="%d|%d" % (constants.IP4_VERSION,
1331 constants.IP6_VERSION),
1332 help="Cluster-wide IP version for primary IP")
1334 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1335 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1336 choices=_PRIONAME_TO_VALUE.keys(),
1337 help="Priority for opcode processing")
1339 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1340 type="bool", default=None, metavar=_YORNO,
1341 help="Sets the hidden flag on the OS")
1343 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1344 type="bool", default=None, metavar=_YORNO,
1345 help="Sets the blacklisted flag on the OS")
1347 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1348 type="bool", metavar=_YORNO,
1349 dest="prealloc_wipe_disks",
1350 help=("Wipe disks prior to instance"
1353 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1354 type="keyval", default=None,
1355 help="Node parameters")
1357 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1358 action="store", metavar="POLICY", default=None,
1359 help="Allocation policy for the node group")
1361 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1362 type="bool", metavar=_YORNO,
1363 dest="node_powered",
1364 help="Specify if the SoR for node is powered")
1366 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1367 default=constants.OOB_TIMEOUT,
1368 help="Maximum time to wait for out-of-band helper")
1370 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1371 default=constants.OOB_POWER_DELAY,
1372 help="Time in seconds to wait between power-ons")
1374 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1375 action="store_true", default=False,
1376 help=("Whether command argument should be treated"
1379 NO_REMEMBER_OPT = cli_option("--no-remember",
1381 action="store_true", default=False,
1382 help="Perform but do not record the change"
1383 " in the configuration")
1385 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1386 default=False, action="store_true",
1387 help="Evacuate primary instances only")
1389 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1390 default=False, action="store_true",
1391 help="Evacuate secondary instances only"
1392 " (applies only to internally mirrored"
1393 " disk templates, e.g. %s)" %
1394 utils.CommaJoin(constants.DTS_INT_MIRROR))
1396 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1397 action="store_true", default=False,
1398 help="Pause instance at startup")
1400 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1401 help="Destination node group (name or uuid)",
1402 default=None, action="append",
1403 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1405 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1406 action="append", dest="ignore_errors",
1407 choices=list(constants.CV_ALL_ECODES_STRINGS),
1408 help="Error code to be ignored")
1410 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1412 help=("Specify disk state information in the"
1414 " storage_type/identifier:option=value,...;"
1415 " note this is unused for now"),
1418 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1420 help=("Specify hypervisor state information in the"
1421 " format hypervisor:option=value,...;"
1422 " note this is unused for now"),
1425 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1426 action="store_true", default=False,
1427 help="Ignore instance policy violations")
1429 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1430 help="Sets the instance's runtime memory,"
1431 " ballooning it up or down to the new value",
1432 default=None, type="unit", metavar="<size>")
1434 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1435 action="store_true", default=False,
1436 help="Marks the grow as absolute instead of the"
1437 " (default) relative mode")
1439 #: Options provided by all commands
1440 COMMON_OPTS = [DEBUG_OPT]
1442 # common options for creating instances. add and import then add their own
1444 COMMON_CREATE_OPTS = [
1449 FILESTORE_DRIVER_OPT,
1466 # common instance policy options
1467 INSTANCE_POLICY_OPTS = [
1468 SPECS_CPU_COUNT_OPT,
1469 SPECS_DISK_COUNT_OPT,
1470 SPECS_DISK_SIZE_OPT,
1472 SPECS_NIC_COUNT_OPT,
1473 IPOLICY_DISK_TEMPLATES,
1475 IPOLICY_SPINDLE_RATIO,
1479 def _ParseArgs(argv, commands, aliases, env_override):
1480 """Parser for the command line arguments.
1482 This function parses the arguments and returns the function which
1483 must be executed together with its (modified) arguments.
1485 @param argv: the command line
1486 @param commands: dictionary with special contents, see the design
1487 doc for cmdline handling
1488 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1489 @param env_override: list of env variables allowed for default args
1492 assert not (env_override - set(commands))
1495 binary = "<command>"
1497 binary = argv[0].split("/")[-1]
1499 if len(argv) > 1 and argv[1] == "--version":
1500 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1501 constants.RELEASE_VERSION)
1502 # Quit right away. That way we don't have to care about this special
1503 # argument. optparse.py does it the same.
1506 if len(argv) < 2 or not (argv[1] in commands or
1507 argv[1] in aliases):
1508 # let's do a nice thing
1509 sortedcmds = commands.keys()
1512 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1513 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1516 # compute the max line length for cmd + usage
1517 mlen = max([len(" %s" % cmd) for cmd in commands])
1518 mlen = min(60, mlen) # should not get here...
1520 # and format a nice command list
1521 ToStdout("Commands:")
1522 for cmd in sortedcmds:
1523 cmdstr = " %s" % (cmd,)
1524 help_text = commands[cmd][4]
1525 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1526 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1527 for line in help_lines:
1528 ToStdout("%-*s %s", mlen, "", line)
1532 return None, None, None
1534 # get command, unalias it, and look it up in commands
1538 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1541 if aliases[cmd] not in commands:
1542 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1543 " command '%s'" % (cmd, aliases[cmd]))
1547 if cmd in env_override:
1548 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1549 env_args = os.environ.get(args_env_name)
1551 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1553 func, args_def, parser_opts, usage, description = commands[cmd]
1554 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1555 description=description,
1556 formatter=TitledHelpFormatter(),
1557 usage="%%prog %s %s" % (cmd, usage))
1558 parser.disable_interspersed_args()
1559 options, args = parser.parse_args(args=argv[1:])
1561 if not _CheckArguments(cmd, args_def, args):
1562 return None, None, None
1564 return func, options, args
1567 def _CheckArguments(cmd, args_def, args):
1568 """Verifies the arguments using the argument definition.
1572 1. Abort with error if values specified by user but none expected.
1574 1. For each argument in definition
1576 1. Keep running count of minimum number of values (min_count)
1577 1. Keep running count of maximum number of values (max_count)
1578 1. If it has an unlimited number of values
1580 1. Abort with error if it's not the last argument in the definition
1582 1. If last argument has limited number of values
1584 1. Abort with error if number of values doesn't match or is too large
1586 1. Abort with error if user didn't pass enough values (min_count)
1589 if args and not args_def:
1590 ToStderr("Error: Command %s expects no arguments", cmd)
1597 last_idx = len(args_def) - 1
1599 for idx, arg in enumerate(args_def):
1600 if min_count is None:
1602 elif arg.min is not None:
1603 min_count += arg.min
1605 if max_count is None:
1607 elif arg.max is not None:
1608 max_count += arg.max
1611 check_max = (arg.max is not None)
1613 elif arg.max is None:
1614 raise errors.ProgrammerError("Only the last argument can have max=None")
1617 # Command with exact number of arguments
1618 if (min_count is not None and max_count is not None and
1619 min_count == max_count and len(args) != min_count):
1620 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1623 # Command with limited number of arguments
1624 if max_count is not None and len(args) > max_count:
1625 ToStderr("Error: Command %s expects only %d argument(s)",
1629 # Command with some required arguments
1630 if min_count is not None and len(args) < min_count:
1631 ToStderr("Error: Command %s expects at least %d argument(s)",
1638 def SplitNodeOption(value):
1639 """Splits the value of a --node option.
1642 if value and ":" in value:
1643 return value.split(":", 1)
1645 return (value, None)
1648 def CalculateOSNames(os_name, os_variants):
1649 """Calculates all the names an OS can be called, according to its variants.
1651 @type os_name: string
1652 @param os_name: base name of the os
1653 @type os_variants: list or None
1654 @param os_variants: list of supported variants
1656 @return: list of valid names
1660 return ["%s+%s" % (os_name, v) for v in os_variants]
1665 def ParseFields(selected, default):
1666 """Parses the values of "--field"-like options.
1668 @type selected: string or None
1669 @param selected: User-selected options
1671 @param default: Default fields
1674 if selected is None:
1677 if selected.startswith("+"):
1678 return default + selected[1:].split(",")
1680 return selected.split(",")
1683 UsesRPC = rpc.RunWithRPC
1686 def AskUser(text, choices=None):
1687 """Ask the user a question.
1689 @param text: the question to ask
1691 @param choices: list with elements tuples (input_char, return_value,
1692 description); if not given, it will default to: [('y', True,
1693 'Perform the operation'), ('n', False, 'Do no do the operation')];
1694 note that the '?' char is reserved for help
1696 @return: one of the return values from the choices list; if input is
1697 not possible (i.e. not running with a tty, we return the last
1702 choices = [("y", True, "Perform the operation"),
1703 ("n", False, "Do not perform the operation")]
1704 if not choices or not isinstance(choices, list):
1705 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1706 for entry in choices:
1707 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1708 raise errors.ProgrammerError("Invalid choices element to AskUser")
1710 answer = choices[-1][1]
1712 for line in text.splitlines():
1713 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1714 text = "\n".join(new_text)
1716 f = file("/dev/tty", "a+")
1720 chars = [entry[0] for entry in choices]
1721 chars[-1] = "[%s]" % chars[-1]
1723 maps = dict([(entry[0], entry[1]) for entry in choices])
1727 f.write("/".join(chars))
1729 line = f.readline(2).strip().lower()
1734 for entry in choices:
1735 f.write(" %s - %s\n" % (entry[0], entry[2]))
1743 class JobSubmittedException(Exception):
1744 """Job was submitted, client should exit.
1746 This exception has one argument, the ID of the job that was
1747 submitted. The handler should print this ID.
1749 This is not an error, just a structured way to exit from clients.
1754 def SendJob(ops, cl=None):
1755 """Function to submit an opcode without waiting for the results.
1758 @param ops: list of opcodes
1759 @type cl: luxi.Client
1760 @param cl: the luxi client to use for communicating with the master;
1761 if None, a new client will be created
1767 job_id = cl.SubmitJob(ops)
1772 def GenericPollJob(job_id, cbs, report_cbs):
1773 """Generic job-polling function.
1775 @type job_id: number
1776 @param job_id: Job ID
1777 @type cbs: Instance of L{JobPollCbBase}
1778 @param cbs: Data callbacks
1779 @type report_cbs: Instance of L{JobPollReportCbBase}
1780 @param report_cbs: Reporting callbacks
1783 prev_job_info = None
1784 prev_logmsg_serial = None
1789 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1792 # job not found, go away!
1793 raise errors.JobLost("Job with id %s lost" % job_id)
1795 if result == constants.JOB_NOTCHANGED:
1796 report_cbs.ReportNotChanged(job_id, status)
1801 # Split result, a tuple of (field values, log entries)
1802 (job_info, log_entries) = result
1803 (status, ) = job_info
1806 for log_entry in log_entries:
1807 (serial, timestamp, log_type, message) = log_entry
1808 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1810 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1812 # TODO: Handle canceled and archived jobs
1813 elif status in (constants.JOB_STATUS_SUCCESS,
1814 constants.JOB_STATUS_ERROR,
1815 constants.JOB_STATUS_CANCELING,
1816 constants.JOB_STATUS_CANCELED):
1819 prev_job_info = job_info
1821 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1823 raise errors.JobLost("Job with id %s lost" % job_id)
1825 status, opstatus, result = jobs[0]
1827 if status == constants.JOB_STATUS_SUCCESS:
1830 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1831 raise errors.OpExecError("Job was canceled")
1834 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1835 if status == constants.OP_STATUS_SUCCESS:
1837 elif status == constants.OP_STATUS_ERROR:
1838 errors.MaybeRaise(msg)
1841 raise errors.OpExecError("partial failure (opcode %d): %s" %
1844 raise errors.OpExecError(str(msg))
1846 # default failure mode
1847 raise errors.OpExecError(result)
1850 class JobPollCbBase:
1851 """Base class for L{GenericPollJob} callbacks.
1855 """Initializes this class.
1859 def WaitForJobChangeOnce(self, job_id, fields,
1860 prev_job_info, prev_log_serial):
1861 """Waits for changes on a job.
1864 raise NotImplementedError()
1866 def QueryJobs(self, job_ids, fields):
1867 """Returns the selected fields for the selected job IDs.
1869 @type job_ids: list of numbers
1870 @param job_ids: Job IDs
1871 @type fields: list of strings
1872 @param fields: Fields
1875 raise NotImplementedError()
1878 class JobPollReportCbBase:
1879 """Base class for L{GenericPollJob} reporting callbacks.
1883 """Initializes this class.
1887 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1888 """Handles a log message.
1891 raise NotImplementedError()
1893 def ReportNotChanged(self, job_id, status):
1894 """Called for if a job hasn't changed in a while.
1896 @type job_id: number
1897 @param job_id: Job ID
1898 @type status: string or None
1899 @param status: Job status if available
1902 raise NotImplementedError()
1905 class _LuxiJobPollCb(JobPollCbBase):
1906 def __init__(self, cl):
1907 """Initializes this class.
1910 JobPollCbBase.__init__(self)
1913 def WaitForJobChangeOnce(self, job_id, fields,
1914 prev_job_info, prev_log_serial):
1915 """Waits for changes on a job.
1918 return self.cl.WaitForJobChangeOnce(job_id, fields,
1919 prev_job_info, prev_log_serial)
1921 def QueryJobs(self, job_ids, fields):
1922 """Returns the selected fields for the selected job IDs.
1925 return self.cl.QueryJobs(job_ids, fields)
1928 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1929 def __init__(self, feedback_fn):
1930 """Initializes this class.
1933 JobPollReportCbBase.__init__(self)
1935 self.feedback_fn = feedback_fn
1937 assert callable(feedback_fn)
1939 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1940 """Handles a log message.
1943 self.feedback_fn((timestamp, log_type, log_msg))
1945 def ReportNotChanged(self, job_id, status):
1946 """Called if a job hasn't changed in a while.
1952 class StdioJobPollReportCb(JobPollReportCbBase):
1954 """Initializes this class.
1957 JobPollReportCbBase.__init__(self)
1959 self.notified_queued = False
1960 self.notified_waitlock = False
1962 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1963 """Handles a log message.
1966 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1967 FormatLogMessage(log_type, log_msg))
1969 def ReportNotChanged(self, job_id, status):
1970 """Called if a job hasn't changed in a while.
1976 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1977 ToStderr("Job %s is waiting in queue", job_id)
1978 self.notified_queued = True
1980 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1981 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1982 self.notified_waitlock = True
1985 def FormatLogMessage(log_type, log_msg):
1986 """Formats a job message according to its type.
1989 if log_type != constants.ELOG_MESSAGE:
1990 log_msg = str(log_msg)
1992 return utils.SafeEncode(log_msg)
1995 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1996 """Function to poll for the result of a job.
1998 @type job_id: job identified
1999 @param job_id: the job to poll for results
2000 @type cl: luxi.Client
2001 @param cl: the luxi client to use for communicating with the master;
2002 if None, a new client will be created
2008 if reporter is None:
2010 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2012 reporter = StdioJobPollReportCb()
2014 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2016 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2019 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2020 """Legacy function to submit an opcode.
2022 This is just a simple wrapper over the construction of the processor
2023 instance. It should be extended to better handle feedback and
2024 interaction functions.
2030 SetGenericOpcodeOpts([op], opts)
2032 job_id = SendJob([op], cl=cl)
2034 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2037 return op_results[0]
2040 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2041 """Wrapper around SubmitOpCode or SendJob.
2043 This function will decide, based on the 'opts' parameter, whether to
2044 submit and wait for the result of the opcode (and return it), or
2045 whether to just send the job and print its identifier. It is used in
2046 order to simplify the implementation of the '--submit' option.
2048 It will also process the opcodes if we're sending the via SendJob
2049 (otherwise SubmitOpCode does it).
2052 if opts and opts.submit_only:
2054 SetGenericOpcodeOpts(job, opts)
2055 job_id = SendJob(job, cl=cl)
2056 raise JobSubmittedException(job_id)
2058 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2061 def SetGenericOpcodeOpts(opcode_list, options):
2062 """Processor for generic options.
2064 This function updates the given opcodes based on generic command
2065 line options (like debug, dry-run, etc.).
2067 @param opcode_list: list of opcodes
2068 @param options: command line options or None
2069 @return: None (in-place modification)
2074 for op in opcode_list:
2075 op.debug_level = options.debug
2076 if hasattr(options, "dry_run"):
2077 op.dry_run = options.dry_run
2078 if getattr(options, "priority", None) is not None:
2079 op.priority = _PRIONAME_TO_VALUE[options.priority]
2083 # TODO: Cache object?
2085 client = luxi.Client()
2086 except luxi.NoMasterError:
2087 ss = ssconf.SimpleStore()
2089 # Try to read ssconf file
2092 except errors.ConfigurationError:
2093 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2094 " not part of a cluster")
2096 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2097 if master != myself:
2098 raise errors.OpPrereqError("This is not the master node, please connect"
2099 " to node '%s' and rerun the command" %
2105 def FormatError(err):
2106 """Return a formatted error message for a given error.
2108 This function takes an exception instance and returns a tuple
2109 consisting of two values: first, the recommended exit code, and
2110 second, a string describing the error message (not
2111 newline-terminated).
2117 if isinstance(err, errors.ConfigurationError):
2118 txt = "Corrupt configuration file: %s" % msg
2120 obuf.write(txt + "\n")
2121 obuf.write("Aborting.")
2123 elif isinstance(err, errors.HooksAbort):
2124 obuf.write("Failure: hooks execution failed:\n")
2125 for node, script, out in err.args[0]:
2127 obuf.write(" node: %s, script: %s, output: %s\n" %
2128 (node, script, out))
2130 obuf.write(" node: %s, script: %s (no output)\n" %
2132 elif isinstance(err, errors.HooksFailure):
2133 obuf.write("Failure: hooks general failure: %s" % msg)
2134 elif isinstance(err, errors.ResolverError):
2135 this_host = netutils.Hostname.GetSysName()
2136 if err.args[0] == this_host:
2137 msg = "Failure: can't resolve my own hostname ('%s')"
2139 msg = "Failure: can't resolve hostname '%s'"
2140 obuf.write(msg % err.args[0])
2141 elif isinstance(err, errors.OpPrereqError):
2142 if len(err.args) == 2:
2143 obuf.write("Failure: prerequisites not met for this"
2144 " operation:\nerror type: %s, error details:\n%s" %
2145 (err.args[1], err.args[0]))
2147 obuf.write("Failure: prerequisites not met for this"
2148 " operation:\n%s" % msg)
2149 elif isinstance(err, errors.OpExecError):
2150 obuf.write("Failure: command execution error:\n%s" % msg)
2151 elif isinstance(err, errors.TagError):
2152 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2153 elif isinstance(err, errors.JobQueueDrainError):
2154 obuf.write("Failure: the job queue is marked for drain and doesn't"
2155 " accept new requests\n")
2156 elif isinstance(err, errors.JobQueueFull):
2157 obuf.write("Failure: the job queue is full and doesn't accept new"
2158 " job submissions until old jobs are archived\n")
2159 elif isinstance(err, errors.TypeEnforcementError):
2160 obuf.write("Parameter Error: %s" % msg)
2161 elif isinstance(err, errors.ParameterError):
2162 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2163 elif isinstance(err, luxi.NoMasterError):
2164 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2165 " and listening for connections?")
2166 elif isinstance(err, luxi.TimeoutError):
2167 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2168 " been submitted and will continue to run even if the call"
2169 " timed out. Useful commands in this situation are \"gnt-job"
2170 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2172 elif isinstance(err, luxi.PermissionError):
2173 obuf.write("It seems you don't have permissions to connect to the"
2174 " master daemon.\nPlease retry as a different user.")
2175 elif isinstance(err, luxi.ProtocolError):
2176 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2178 elif isinstance(err, errors.JobLost):
2179 obuf.write("Error checking job status: %s" % msg)
2180 elif isinstance(err, errors.QueryFilterParseError):
2181 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2182 obuf.write("\n".join(err.GetDetails()))
2183 elif isinstance(err, errors.GenericError):
2184 obuf.write("Unhandled Ganeti error: %s" % msg)
2185 elif isinstance(err, JobSubmittedException):
2186 obuf.write("JobID: %s\n" % err.args[0])
2189 obuf.write("Unhandled exception: %s" % msg)
2190 return retcode, obuf.getvalue().rstrip("\n")
2193 def GenericMain(commands, override=None, aliases=None,
2194 env_override=frozenset()):
2195 """Generic main function for all the gnt-* commands.
2197 @param commands: a dictionary with a special structure, see the design doc
2198 for command line handling.
2199 @param override: if not None, we expect a dictionary with keys that will
2200 override command line options; this can be used to pass
2201 options from the scripts to generic functions
2202 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2203 @param env_override: list of environment names which are allowed to submit
2204 default args for commands
2207 # save the program name and the entire command line for later logging
2209 binary = os.path.basename(sys.argv[0])
2211 binary = sys.argv[0]
2213 if len(sys.argv) >= 2:
2214 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2218 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2220 binary = "<unknown program>"
2221 cmdline = "<unknown>"
2227 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2228 except errors.ParameterError, err:
2229 result, err_msg = FormatError(err)
2233 if func is None: # parse error
2236 if override is not None:
2237 for key, val in override.iteritems():
2238 setattr(options, key, val)
2240 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2241 stderr_logging=True)
2243 logging.info("Command line: %s", cmdline)
2246 result = func(options, args)
2247 except (errors.GenericError, luxi.ProtocolError,
2248 JobSubmittedException), err:
2249 result, err_msg = FormatError(err)
2250 logging.exception("Error during command processing")
2252 except KeyboardInterrupt:
2253 result = constants.EXIT_FAILURE
2254 ToStderr("Aborted. Note that if the operation created any jobs, they"
2255 " might have been submitted and"
2256 " will continue to run in the background.")
2257 except IOError, err:
2258 if err.errno == errno.EPIPE:
2259 # our terminal went away, we'll exit
2260 sys.exit(constants.EXIT_FAILURE)
2267 def ParseNicOption(optvalue):
2268 """Parses the value of the --net option(s).
2272 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2273 except (TypeError, ValueError), err:
2274 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2276 nics = [{}] * nic_max
2277 for nidx, ndict in optvalue:
2280 if not isinstance(ndict, dict):
2281 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2282 " got %s" % (nidx, ndict))
2284 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2291 def GenericInstanceCreate(mode, opts, args):
2292 """Add an instance to the cluster via either creation or import.
2294 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2295 @param opts: the command line options selected by the user
2297 @param args: should contain only one element, the new instance name
2299 @return: the desired exit code
2304 (pnode, snode) = SplitNodeOption(opts.node)
2309 hypervisor, hvparams = opts.hypervisor
2312 nics = ParseNicOption(opts.nics)
2316 elif mode == constants.INSTANCE_CREATE:
2317 # default of one nic, all auto
2323 if opts.disk_template == constants.DT_DISKLESS:
2324 if opts.disks or opts.sd_size is not None:
2325 raise errors.OpPrereqError("Diskless instance but disk"
2326 " information passed")
2329 if (not opts.disks and not opts.sd_size
2330 and mode == constants.INSTANCE_CREATE):
2331 raise errors.OpPrereqError("No disk information specified")
2332 if opts.disks and opts.sd_size is not None:
2333 raise errors.OpPrereqError("Please use either the '--disk' or"
2335 if opts.sd_size is not None:
2336 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2340 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2341 except ValueError, err:
2342 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2343 disks = [{}] * disk_max
2346 for didx, ddict in opts.disks:
2348 if not isinstance(ddict, dict):
2349 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2350 raise errors.OpPrereqError(msg)
2351 elif constants.IDISK_SIZE in ddict:
2352 if constants.IDISK_ADOPT in ddict:
2353 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2354 " (disk %d)" % didx)
2356 ddict[constants.IDISK_SIZE] = \
2357 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2358 except ValueError, err:
2359 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2361 elif constants.IDISK_ADOPT in ddict:
2362 if mode == constants.INSTANCE_IMPORT:
2363 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2365 ddict[constants.IDISK_SIZE] = 0
2367 raise errors.OpPrereqError("Missing size or adoption source for"
2371 if opts.tags is not None:
2372 tags = opts.tags.split(",")
2376 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2377 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2379 if mode == constants.INSTANCE_CREATE:
2382 force_variant = opts.force_variant
2385 no_install = opts.no_install
2386 identify_defaults = False
2387 elif mode == constants.INSTANCE_IMPORT:
2390 force_variant = False
2391 src_node = opts.src_node
2392 src_path = opts.src_dir
2394 identify_defaults = opts.identify_defaults
2396 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2398 op = opcodes.OpInstanceCreate(instance_name=instance,
2400 disk_template=opts.disk_template,
2402 pnode=pnode, snode=snode,
2403 ip_check=opts.ip_check,
2404 name_check=opts.name_check,
2405 wait_for_sync=opts.wait_for_sync,
2406 file_storage_dir=opts.file_storage_dir,
2407 file_driver=opts.file_driver,
2408 iallocator=opts.iallocator,
2409 hypervisor=hypervisor,
2411 beparams=opts.beparams,
2412 osparams=opts.osparams,
2416 force_variant=force_variant,
2420 no_install=no_install,
2421 identify_defaults=identify_defaults,
2422 ignore_ipolicy=opts.ignore_ipolicy)
2424 SubmitOrSend(op, opts)
2428 class _RunWhileClusterStoppedHelper:
2429 """Helper class for L{RunWhileClusterStopped} to simplify state management
2432 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2433 """Initializes this class.
2435 @type feedback_fn: callable
2436 @param feedback_fn: Feedback function
2437 @type cluster_name: string
2438 @param cluster_name: Cluster name
2439 @type master_node: string
2440 @param master_node Master node name
2441 @type online_nodes: list
2442 @param online_nodes: List of names of online nodes
2445 self.feedback_fn = feedback_fn
2446 self.cluster_name = cluster_name
2447 self.master_node = master_node
2448 self.online_nodes = online_nodes
2450 self.ssh = ssh.SshRunner(self.cluster_name)
2452 self.nonmaster_nodes = [name for name in online_nodes
2453 if name != master_node]
2455 assert self.master_node not in self.nonmaster_nodes
2457 def _RunCmd(self, node_name, cmd):
2458 """Runs a command on the local or a remote machine.
2460 @type node_name: string
2461 @param node_name: Machine name
2466 if node_name is None or node_name == self.master_node:
2467 # No need to use SSH
2468 result = utils.RunCmd(cmd)
2470 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2473 errmsg = ["Failed to run command %s" % result.cmd]
2475 errmsg.append("on node %s" % node_name)
2476 errmsg.append(": exitcode %s and error %s" %
2477 (result.exit_code, result.output))
2478 raise errors.OpExecError(" ".join(errmsg))
2480 def Call(self, fn, *args):
2481 """Call function while all daemons are stopped.
2484 @param fn: Function to be called
2487 # Pause watcher by acquiring an exclusive lock on watcher state file
2488 self.feedback_fn("Blocking watcher")
2489 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2491 # TODO: Currently, this just blocks. There's no timeout.
2492 # TODO: Should it be a shared lock?
2493 watcher_block.Exclusive(blocking=True)
2495 # Stop master daemons, so that no new jobs can come in and all running
2497 self.feedback_fn("Stopping master daemons")
2498 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2500 # Stop daemons on all nodes
2501 for node_name in self.online_nodes:
2502 self.feedback_fn("Stopping daemons on %s" % node_name)
2503 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2505 # All daemons are shut down now
2507 return fn(self, *args)
2508 except Exception, err:
2509 _, errmsg = FormatError(err)
2510 logging.exception("Caught exception")
2511 self.feedback_fn(errmsg)
2514 # Start cluster again, master node last
2515 for node_name in self.nonmaster_nodes + [self.master_node]:
2516 self.feedback_fn("Starting daemons on %s" % node_name)
2517 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2520 watcher_block.Close()
2523 def RunWhileClusterStopped(feedback_fn, fn, *args):
2524 """Calls a function while all cluster daemons are stopped.
2526 @type feedback_fn: callable
2527 @param feedback_fn: Feedback function
2529 @param fn: Function to be called when daemons are stopped
2532 feedback_fn("Gathering cluster information")
2534 # This ensures we're running on the master daemon
2537 (cluster_name, master_node) = \
2538 cl.QueryConfigValues(["cluster_name", "master_node"])
2540 online_nodes = GetOnlineNodes([], cl=cl)
2542 # Don't keep a reference to the client. The master daemon will go away.
2545 assert master_node in online_nodes
2547 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2548 online_nodes).Call(fn, *args)
2551 def GenerateTable(headers, fields, separator, data,
2552 numfields=None, unitfields=None,
2554 """Prints a table with headers and different fields.
2557 @param headers: dictionary mapping field names to headers for
2560 @param fields: the field names corresponding to each row in
2562 @param separator: the separator to be used; if this is None,
2563 the default 'smart' algorithm is used which computes optimal
2564 field width, otherwise just the separator is used between
2567 @param data: a list of lists, each sublist being one row to be output
2568 @type numfields: list
2569 @param numfields: a list with the fields that hold numeric
2570 values and thus should be right-aligned
2571 @type unitfields: list
2572 @param unitfields: a list with the fields that hold numeric
2573 values that should be formatted with the units field
2574 @type units: string or None
2575 @param units: the units we should use for formatting, or None for
2576 automatic choice (human-readable for non-separator usage, otherwise
2577 megabytes); this is a one-letter string
2586 if numfields is None:
2588 if unitfields is None:
2591 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2592 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2595 for field in fields:
2596 if headers and field not in headers:
2597 # TODO: handle better unknown fields (either revert to old
2598 # style of raising exception, or deal more intelligently with
2600 headers[field] = field
2601 if separator is not None:
2602 format_fields.append("%s")
2603 elif numfields.Matches(field):
2604 format_fields.append("%*s")
2606 format_fields.append("%-*s")
2608 if separator is None:
2609 mlens = [0 for name in fields]
2610 format_str = " ".join(format_fields)
2612 format_str = separator.replace("%", "%%").join(format_fields)
2617 for idx, val in enumerate(row):
2618 if unitfields.Matches(fields[idx]):
2621 except (TypeError, ValueError):
2624 val = row[idx] = utils.FormatUnit(val, units)
2625 val = row[idx] = str(val)
2626 if separator is None:
2627 mlens[idx] = max(mlens[idx], len(val))
2632 for idx, name in enumerate(fields):
2634 if separator is None:
2635 mlens[idx] = max(mlens[idx], len(hdr))
2636 args.append(mlens[idx])
2638 result.append(format_str % tuple(args))
2640 if separator is None:
2641 assert len(mlens) == len(fields)
2643 if fields and not numfields.Matches(fields[-1]):
2649 line = ["-" for _ in fields]
2650 for idx in range(len(fields)):
2651 if separator is None:
2652 args.append(mlens[idx])
2653 args.append(line[idx])
2654 result.append(format_str % tuple(args))
2659 def _FormatBool(value):
2660 """Formats a boolean value as a string.
2668 #: Default formatting for query results; (callback, align right)
2669 _DEFAULT_FORMAT_QUERY = {
2670 constants.QFT_TEXT: (str, False),
2671 constants.QFT_BOOL: (_FormatBool, False),
2672 constants.QFT_NUMBER: (str, True),
2673 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2674 constants.QFT_OTHER: (str, False),
2675 constants.QFT_UNKNOWN: (str, False),
2679 def _GetColumnFormatter(fdef, override, unit):
2680 """Returns formatting function for a field.
2682 @type fdef: L{objects.QueryFieldDefinition}
2683 @type override: dict
2684 @param override: Dictionary for overriding field formatting functions,
2685 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2687 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2688 @rtype: tuple; (callable, bool)
2689 @return: Returns the function to format a value (takes one parameter) and a
2690 boolean for aligning the value on the right-hand side
2693 fmt = override.get(fdef.name, None)
2697 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2699 if fdef.kind == constants.QFT_UNIT:
2700 # Can't keep this information in the static dictionary
2701 return (lambda value: utils.FormatUnit(value, unit), True)
2703 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2707 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2710 class _QueryColumnFormatter:
2711 """Callable class for formatting fields of a query.
2714 def __init__(self, fn, status_fn, verbose):
2715 """Initializes this class.
2718 @param fn: Formatting function
2719 @type status_fn: callable
2720 @param status_fn: Function to report fields' status
2721 @type verbose: boolean
2722 @param verbose: whether to use verbose field descriptions or not
2726 self._status_fn = status_fn
2727 self._verbose = verbose
2729 def __call__(self, data):
2730 """Returns a field's string representation.
2733 (status, value) = data
2736 self._status_fn(status)
2738 if status == constants.RS_NORMAL:
2739 return self._fn(value)
2741 assert value is None, \
2742 "Found value %r for abnormal status %s" % (value, status)
2744 return FormatResultError(status, self._verbose)
2747 def FormatResultError(status, verbose):
2748 """Formats result status other than L{constants.RS_NORMAL}.
2750 @param status: The result status
2751 @type verbose: boolean
2752 @param verbose: Whether to return the verbose text
2753 @return: Text of result status
2756 assert status != constants.RS_NORMAL, \
2757 "FormatResultError called with status equal to constants.RS_NORMAL"
2759 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2761 raise NotImplementedError("Unknown status %s" % status)
2768 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2769 header=False, verbose=False):
2770 """Formats data in L{objects.QueryResponse}.
2772 @type result: L{objects.QueryResponse}
2773 @param result: result of query operation
2775 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2776 see L{utils.text.FormatUnit}
2777 @type format_override: dict
2778 @param format_override: Dictionary for overriding field formatting functions,
2779 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2780 @type separator: string or None
2781 @param separator: String used to separate fields
2783 @param header: Whether to output header row
2784 @type verbose: boolean
2785 @param verbose: whether to use verbose field descriptions or not
2794 if format_override is None:
2795 format_override = {}
2797 stats = dict.fromkeys(constants.RS_ALL, 0)
2799 def _RecordStatus(status):
2804 for fdef in result.fields:
2805 assert fdef.title and fdef.name
2806 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2807 columns.append(TableColumn(fdef.title,
2808 _QueryColumnFormatter(fn, _RecordStatus,
2812 table = FormatTable(result.data, columns, header, separator)
2814 # Collect statistics
2815 assert len(stats) == len(constants.RS_ALL)
2816 assert compat.all(count >= 0 for count in stats.values())
2818 # Determine overall status. If there was no data, unknown fields must be
2819 # detected via the field definitions.
2820 if (stats[constants.RS_UNKNOWN] or
2821 (not result.data and _GetUnknownFields(result.fields))):
2823 elif compat.any(count > 0 for key, count in stats.items()
2824 if key != constants.RS_NORMAL):
2825 status = QR_INCOMPLETE
2829 return (status, table)
2832 def _GetUnknownFields(fdefs):
2833 """Returns list of unknown fields included in C{fdefs}.
2835 @type fdefs: list of L{objects.QueryFieldDefinition}
2838 return [fdef for fdef in fdefs
2839 if fdef.kind == constants.QFT_UNKNOWN]
2842 def _WarnUnknownFields(fdefs):
2843 """Prints a warning to stderr if a query included unknown fields.
2845 @type fdefs: list of L{objects.QueryFieldDefinition}
2848 unknown = _GetUnknownFields(fdefs)
2850 ToStderr("Warning: Queried for unknown fields %s",
2851 utils.CommaJoin(fdef.name for fdef in unknown))
2857 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2858 format_override=None, verbose=False, force_filter=False,
2859 namefield=None, qfilter=None):
2860 """Generic implementation for listing all items of a resource.
2862 @param resource: One of L{constants.QR_VIA_LUXI}
2863 @type fields: list of strings
2864 @param fields: List of fields to query for
2865 @type names: list of strings
2866 @param names: Names of items to query for
2867 @type unit: string or None
2868 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2869 None for automatic choice (human-readable for non-separator usage,
2870 otherwise megabytes); this is a one-letter string
2871 @type separator: string or None
2872 @param separator: String used to separate fields
2874 @param header: Whether to show header row
2875 @type force_filter: bool
2876 @param force_filter: Whether to always treat names as filter
2877 @type format_override: dict
2878 @param format_override: Dictionary for overriding field formatting functions,
2879 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2880 @type verbose: boolean
2881 @param verbose: whether to use verbose field descriptions or not
2882 @type namefield: string
2883 @param namefield: Name of field to use for simple filters (see
2884 L{qlang.MakeFilter} for details)
2885 @type qfilter: list or None
2886 @param qfilter: Query filter (in addition to names)
2892 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2895 qfilter = namefilter
2896 elif namefilter is not None:
2897 qfilter = [qlang.OP_AND, namefilter, qfilter]
2902 response = cl.Query(resource, fields, qfilter)
2904 found_unknown = _WarnUnknownFields(response.fields)
2906 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2908 format_override=format_override,
2914 assert ((found_unknown and status == QR_UNKNOWN) or
2915 (not found_unknown and status != QR_UNKNOWN))
2917 if status == QR_UNKNOWN:
2918 return constants.EXIT_UNKNOWN_FIELD
2920 # TODO: Should the list command fail if not all data could be collected?
2921 return constants.EXIT_SUCCESS
2924 def GenericListFields(resource, fields, separator, header, cl=None):
2925 """Generic implementation for listing fields for a resource.
2927 @param resource: One of L{constants.QR_VIA_LUXI}
2928 @type fields: list of strings
2929 @param fields: List of fields to query for
2930 @type separator: string or None
2931 @param separator: String used to separate fields
2933 @param header: Whether to show header row
2942 response = cl.QueryFields(resource, fields)
2944 found_unknown = _WarnUnknownFields(response.fields)
2947 TableColumn("Name", str, False),
2948 TableColumn("Title", str, False),
2949 TableColumn("Description", str, False),
2952 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2954 for line in FormatTable(rows, columns, header, separator):
2958 return constants.EXIT_UNKNOWN_FIELD
2960 return constants.EXIT_SUCCESS
2964 """Describes a column for L{FormatTable}.
2967 def __init__(self, title, fn, align_right):
2968 """Initializes this class.
2971 @param title: Column title
2973 @param fn: Formatting function
2974 @type align_right: bool
2975 @param align_right: Whether to align values on the right-hand side
2980 self.align_right = align_right
2983 def _GetColFormatString(width, align_right):
2984 """Returns the format string for a field.
2992 return "%%%s%ss" % (sign, width)
2995 def FormatTable(rows, columns, header, separator):
2996 """Formats data as a table.
2998 @type rows: list of lists
2999 @param rows: Row data, one list per row
3000 @type columns: list of L{TableColumn}
3001 @param columns: Column descriptions
3003 @param header: Whether to show header row
3004 @type separator: string or None
3005 @param separator: String used to separate columns
3009 data = [[col.title for col in columns]]
3010 colwidth = [len(col.title) for col in columns]
3013 colwidth = [0 for _ in columns]
3017 assert len(row) == len(columns)
3019 formatted = [col.format(value) for value, col in zip(row, columns)]
3021 if separator is None:
3022 # Update column widths
3023 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3024 # Modifying a list's items while iterating is fine
3025 colwidth[idx] = max(oldwidth, len(value))
3027 data.append(formatted)
3029 if separator is not None:
3030 # Return early if a separator is used
3031 return [separator.join(row) for row in data]
3033 if columns and not columns[-1].align_right:
3034 # Avoid unnecessary spaces at end of line
3037 # Build format string
3038 fmt = " ".join([_GetColFormatString(width, col.align_right)
3039 for col, width in zip(columns, colwidth)])
3041 return [fmt % tuple(row) for row in data]
3044 def FormatTimestamp(ts):
3045 """Formats a given timestamp.
3048 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3051 @return: a string with the formatted timestamp
3054 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3058 return utils.FormatTime(sec, usecs=usecs)
3061 def ParseTimespec(value):
3062 """Parse a time specification.
3064 The following suffixed will be recognized:
3072 Without any suffix, the value will be taken to be in seconds.
3077 raise errors.OpPrereqError("Empty time specification passed")
3085 if value[-1] not in suffix_map:
3088 except (TypeError, ValueError):
3089 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3091 multiplier = suffix_map[value[-1]]
3093 if not value: # no data left after stripping the suffix
3094 raise errors.OpPrereqError("Invalid time specification (only"
3097 value = int(value) * multiplier
3098 except (TypeError, ValueError):
3099 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3103 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3104 filter_master=False, nodegroup=None):
3105 """Returns the names of online nodes.
3107 This function will also log a warning on stderr with the names of
3110 @param nodes: if not empty, use only this subset of nodes (minus the
3112 @param cl: if not None, luxi client to use
3113 @type nowarn: boolean
3114 @param nowarn: by default, this function will output a note with the
3115 offline nodes that are skipped; if this parameter is True the
3116 note is not displayed
3117 @type secondary_ips: boolean
3118 @param secondary_ips: if True, return the secondary IPs instead of the
3119 names, useful for doing network traffic over the replication interface
3121 @type filter_master: boolean
3122 @param filter_master: if True, do not return the master node in the list
3123 (useful in coordination with secondary_ips where we cannot check our
3124 node name against the list)
3125 @type nodegroup: string
3126 @param nodegroup: If set, only return nodes in this node group
3135 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3137 if nodegroup is not None:
3138 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3139 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3142 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3145 if len(qfilter) > 1:
3146 final_filter = [qlang.OP_AND] + qfilter
3148 assert len(qfilter) == 1
3149 final_filter = qfilter[0]
3153 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3155 def _IsOffline(row):
3156 (_, (_, offline), _) = row
3160 ((_, name), _, _) = row
3164 (_, _, (_, sip)) = row
3167 (offline, online) = compat.partition(result.data, _IsOffline)
3169 if offline and not nowarn:
3170 ToStderr("Note: skipping offline node(s): %s" %
3171 utils.CommaJoin(map(_GetName, offline)))
3178 return map(fn, online)
3181 def _ToStream(stream, txt, *args):
3182 """Write a message to a stream, bypassing the logging system
3184 @type stream: file object
3185 @param stream: the file to which we should write
3187 @param txt: the message
3193 stream.write(txt % args)
3198 except IOError, err:
3199 if err.errno == errno.EPIPE:
3200 # our terminal went away, we'll exit
3201 sys.exit(constants.EXIT_FAILURE)
3206 def ToStdout(txt, *args):
3207 """Write a message to stdout only, bypassing the logging system
3209 This is just a wrapper over _ToStream.
3212 @param txt: the message
3215 _ToStream(sys.stdout, txt, *args)
3218 def ToStderr(txt, *args):
3219 """Write a message to stderr only, bypassing the logging system
3221 This is just a wrapper over _ToStream.
3224 @param txt: the message
3227 _ToStream(sys.stderr, txt, *args)
3230 class JobExecutor(object):
3231 """Class which manages the submission and execution of multiple jobs.
3233 Note that instances of this class should not be reused between
3237 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3242 self.verbose = verbose
3245 self.feedback_fn = feedback_fn
3246 self._counter = itertools.count()
3249 def _IfName(name, fmt):
3250 """Helper function for formatting name.
3258 def QueueJob(self, name, *ops):
3259 """Record a job for later submit.
3262 @param name: a description of the job, will be used in WaitJobSet
3265 SetGenericOpcodeOpts(ops, self.opts)
3266 self.queue.append((self._counter.next(), name, ops))
3268 def AddJobId(self, name, status, job_id):
3269 """Adds a job ID to the internal queue.
3272 self.jobs.append((self._counter.next(), status, job_id, name))
3274 def SubmitPending(self, each=False):
3275 """Submit all pending jobs.
3280 for (_, _, ops) in self.queue:
3281 # SubmitJob will remove the success status, but raise an exception if
3282 # the submission fails, so we'll notice that anyway.
3283 results.append([True, self.cl.SubmitJob(ops)[0]])
3285 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3286 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3287 self.jobs.append((idx, status, data, name))
3289 def _ChooseJob(self):
3290 """Choose a non-waiting/queued job to poll next.
3293 assert self.jobs, "_ChooseJob called with empty job list"
3295 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3299 for job_data, status in zip(self.jobs, result):
3300 if (isinstance(status, list) and status and
3301 status[0] in (constants.JOB_STATUS_QUEUED,
3302 constants.JOB_STATUS_WAITING,
3303 constants.JOB_STATUS_CANCELING)):
3304 # job is still present and waiting
3306 # good candidate found (either running job or lost job)
3307 self.jobs.remove(job_data)
3311 return self.jobs.pop(0)
3313 def GetResults(self):
3314 """Wait for and return the results of all jobs.
3317 @return: list of tuples (success, job results), in the same order
3318 as the submitted jobs; if a job has failed, instead of the result
3319 there will be the error message
3323 self.SubmitPending()
3326 ok_jobs = [row[2] for row in self.jobs if row[1]]
3328 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3330 # first, remove any non-submitted jobs
3331 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3332 for idx, _, jid, name in failures:
3333 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3334 results.append((idx, False, jid))
3337 (idx, _, jid, name) = self._ChooseJob()
3338 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3340 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3342 except errors.JobLost, err:
3343 _, job_result = FormatError(err)
3344 ToStderr("Job %s%s has been archived, cannot check its result",
3345 jid, self._IfName(name, " for %s"))
3347 except (errors.GenericError, luxi.ProtocolError), err:
3348 _, job_result = FormatError(err)
3350 # the error message will always be shown, verbose or not
3351 ToStderr("Job %s%s has failed: %s",
3352 jid, self._IfName(name, " for %s"), job_result)
3354 results.append((idx, success, job_result))
3356 # sort based on the index, then drop it
3358 results = [i[1:] for i in results]
3362 def WaitOrShow(self, wait):
3363 """Wait for job results or only print the job IDs.
3366 @param wait: whether to wait or not
3370 return self.GetResults()
3373 self.SubmitPending()
3374 for _, status, result, name in self.jobs:
3376 ToStdout("%s: %s", result, name)
3378 ToStderr("Failure for %s: %s", name, result)
3379 return [row[1:3] for row in self.jobs]
3382 def FormatParameterDict(buf, param_dict, actual, level=1):
3383 """Formats a parameter dictionary.
3385 @type buf: L{StringIO}
3386 @param buf: the buffer into which to write
3387 @type param_dict: dict
3388 @param param_dict: the own parameters
3390 @param actual: the current parameter set (including defaults)
3391 @param level: Level of indent
3394 indent = " " * level
3396 for key in sorted(actual):
3398 buf.write("%s- %s:" % (indent, key))
3400 if isinstance(data, dict) and data:
3402 FormatParameterDict(buf, param_dict.get(key, {}), data,
3405 val = param_dict.get(key, "default (%s)" % data)
3406 buf.write(" %s\n" % val)
3409 def ConfirmOperation(names, list_type, text, extra=""):
3410 """Ask the user to confirm an operation on a list of list_type.
3412 This function is used to request confirmation for doing an operation
3413 on a given list of list_type.
3416 @param names: the list of names that we display when
3417 we ask for confirmation
3418 @type list_type: str
3419 @param list_type: Human readable name for elements in the list (e.g. nodes)
3421 @param text: the operation that the user should confirm
3423 @return: True or False depending on user's confirmation.
3427 msg = ("The %s will operate on %d %s.\n%s"
3428 "Do you want to continue?" % (text, count, list_type, extra))
3429 affected = (("\nAffected %s:\n" % list_type) +
3430 "\n".join([" %s" % name for name in names]))
3432 choices = [("y", True, "Yes, execute the %s" % text),
3433 ("n", False, "No, abort the %s" % text)]
3436 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3439 question = msg + affected
3441 choice = AskUser(question, choices)
3444 choice = AskUser(msg + affected, choices)
3448 def _MaybeParseUnit(elements):
3449 """Parses and returns an array of potential values with units.
3453 for k, v in elements.items():
3454 if v == constants.VALUE_DEFAULT:
3457 parsed[k] = utils.ParseUnit(v)
3461 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3462 ispecs_cpu_count=None,
3463 ispecs_disk_count=None,
3464 ispecs_disk_size=None,
3465 ispecs_nic_count=None,
3466 ipolicy_disk_templates=None,
3467 ipolicy_vcpu_ratio=None,
3468 ipolicy_spindle_ratio=None,
3469 group_ipolicy=False,
3470 allowed_values=None,
3472 """Creation of instance policy based on command line options.
3474 @param fill_all: whether for cluster policies we should ensure that
3475 all values are filled
3481 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3482 if ispecs_disk_size:
3483 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3484 except (TypeError, ValueError, errors.UnitParseError), err:
3485 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3487 (ispecs_disk_size, ispecs_mem_size, err),
3490 # prepare ipolicy dict
3491 ipolicy_transposed = {
3492 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3493 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3494 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3495 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3496 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3499 # first, check that the values given are correct
3501 forced_type = TISPECS_GROUP_TYPES
3503 forced_type = TISPECS_CLUSTER_TYPES
3505 for specs in ipolicy_transposed.values():
3506 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3509 ipolicy_out = objects.MakeEmptyIPolicy()
3510 for name, specs in ipolicy_transposed.iteritems():
3511 assert name in constants.ISPECS_PARAMETERS
3512 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3513 ipolicy_out[key][name] = val
3515 # no filldict for non-dicts
3516 if not group_ipolicy and fill_all:
3517 if ipolicy_disk_templates is None:
3518 ipolicy_disk_templates = constants.DISK_TEMPLATES
3519 if ipolicy_vcpu_ratio is None:
3520 ipolicy_vcpu_ratio = \
3521 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3522 if ipolicy_spindle_ratio is None:
3523 ipolicy_spindle_ratio = \
3524 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3525 if ipolicy_disk_templates is not None:
3526 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3527 if ipolicy_vcpu_ratio is not None:
3528 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3529 if ipolicy_spindle_ratio is not None:
3530 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3532 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)