4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
67 "CLUSTER_DOMAIN_SECRET_OPT",
85 "FILESTORE_DRIVER_OPT",
91 "GLOBAL_SHARED_FILEDIR_OPT",
96 "DEFAULT_IALLOCATOR_OPT",
97 "IDENTIFY_DEFAULTS_OPT",
100 "IGNORE_FAILURES_OPT",
101 "IGNORE_OFFLINE_OPT",
102 "IGNORE_REMOVE_FAILURES_OPT",
103 "IGNORE_SECONDARIES_OPT",
107 "MAINTAIN_NODE_HEALTH_OPT",
109 "MASTER_NETMASK_OPT",
111 "MIGRATION_MODE_OPT",
113 "NEW_CLUSTER_CERT_OPT",
114 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115 "NEW_CONFD_HMAC_KEY_OPT",
118 "NEW_SPICE_CERT_OPT",
120 "NODE_FORCE_JOIN_OPT",
122 "NODE_PLACEMENT_OPT",
126 "NODRBD_STORAGE_OPT",
132 "NOMODIFY_ETCHOSTS_OPT",
133 "NOMODIFY_SSH_SETUP_OPT",
137 "NORUNTIME_CHGS_OPT",
140 "NOSSH_KEYCHECK_OPT",
154 "PREALLOC_WIPE_DISKS_OPT",
155 "PRIMARY_IP_VERSION_OPT",
161 "REMOVE_INSTANCE_OPT",
167 "SECONDARY_ONLY_OPT",
171 "SHUTDOWN_TIMEOUT_OPT",
173 "SPECS_CPU_COUNT_OPT",
174 "SPECS_DISK_COUNT_OPT",
175 "SPECS_DISK_SIZE_OPT",
176 "SPECS_MEM_SIZE_OPT",
177 "SPECS_NIC_COUNT_OPT",
178 "IPOLICY_DISK_TEMPLATES",
179 "IPOLICY_VCPU_RATIO",
185 "STARTUP_PAUSED_OPT",
194 "USE_EXTERNAL_MIP_SCRIPT",
201 "IGNORE_IPOLICY_OPT",
202 "INSTANCE_POLICY_OPTS",
203 # Generic functions for CLI programs
205 "CreateIPolicyFromOpts",
207 "GenericInstanceCreate",
213 "JobSubmittedException",
215 "RunWhileClusterStopped",
219 # Formatting functions
220 "ToStderr", "ToStdout",
223 "FormatParameterDict",
232 # command line options support infrastructure
233 "ARGS_MANY_INSTANCES",
252 "OPT_COMPL_INST_ADD_NODES",
253 "OPT_COMPL_MANY_NODES",
254 "OPT_COMPL_ONE_IALLOCATOR",
255 "OPT_COMPL_ONE_INSTANCE",
256 "OPT_COMPL_ONE_NODE",
257 "OPT_COMPL_ONE_NODEGROUP",
263 "COMMON_CREATE_OPTS",
269 #: Priorities (sorted)
271 ("low", constants.OP_PRIO_LOW),
272 ("normal", constants.OP_PRIO_NORMAL),
273 ("high", constants.OP_PRIO_HIGH),
276 #: Priority dictionary for easier lookup
277 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
278 # we migrate to Python 2.6
279 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
281 # Query result status for clients
284 QR_INCOMPLETE) = range(3)
286 #: Maximum batch size for ChooseJob
290 # constants used to create InstancePolicy dictionary
291 TISPECS_GROUP_TYPES = {
292 constants.ISPECS_MIN: constants.VTYPE_INT,
293 constants.ISPECS_MAX: constants.VTYPE_INT,
296 TISPECS_CLUSTER_TYPES = {
297 constants.ISPECS_MIN: constants.VTYPE_INT,
298 constants.ISPECS_MAX: constants.VTYPE_INT,
299 constants.ISPECS_STD: constants.VTYPE_INT,
304 def __init__(self, min=0, max=None): # pylint: disable=W0622
309 return ("<%s min=%s max=%s>" %
310 (self.__class__.__name__, self.min, self.max))
313 class ArgSuggest(_Argument):
314 """Suggesting argument.
316 Value can be any of the ones passed to the constructor.
319 # pylint: disable=W0622
320 def __init__(self, min=0, max=None, choices=None):
321 _Argument.__init__(self, min=min, max=max)
322 self.choices = choices
325 return ("<%s min=%s max=%s choices=%r>" %
326 (self.__class__.__name__, self.min, self.max, self.choices))
329 class ArgChoice(ArgSuggest):
332 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
333 but value must be one of the choices.
338 class ArgUnknown(_Argument):
339 """Unknown argument to program (e.g. determined at runtime).
344 class ArgInstance(_Argument):
345 """Instances argument.
350 class ArgNode(_Argument):
356 class ArgGroup(_Argument):
357 """Node group argument.
362 class ArgJobId(_Argument):
368 class ArgFile(_Argument):
369 """File path argument.
374 class ArgCommand(_Argument):
380 class ArgHost(_Argument):
386 class ArgOs(_Argument):
393 ARGS_MANY_INSTANCES = [ArgInstance()]
394 ARGS_MANY_NODES = [ArgNode()]
395 ARGS_MANY_GROUPS = [ArgGroup()]
396 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
397 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
399 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
400 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403 def _ExtractTagsObject(opts, args):
404 """Extract the tag type object.
406 Note that this function will modify its args parameter.
409 if not hasattr(opts, "tag_type"):
410 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
412 if kind == constants.TAG_CLUSTER:
414 elif kind in (constants.TAG_NODEGROUP,
416 constants.TAG_INSTANCE):
418 raise errors.OpPrereqError("no arguments passed to the command")
422 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426 def _ExtendTags(opts, args):
427 """Extend the args if a source file has been given.
429 This function will extend the tags with the contents of the file
430 passed in the 'tags_source' attribute of the opts parameter. A file
431 named '-' will be replaced by stdin.
434 fname = opts.tags_source
440 new_fh = open(fname, "r")
443 # we don't use the nice 'new_data = [line.strip() for line in fh]'
444 # because of python bug 1633941
446 line = new_fh.readline()
449 new_data.append(line.strip())
452 args.extend(new_data)
455 def ListTags(opts, args):
456 """List the tags on a given object.
458 This is a generic implementation that knows how to deal with all
459 three cases of tag objects (cluster, node, instance). The opts
460 argument is expected to contain a tag_type field denoting what
461 object type we work on.
464 kind, name = _ExtractTagsObject(opts, args)
466 result = cl.QueryTags(kind, name)
467 result = list(result)
473 def AddTags(opts, args):
474 """Add tags on a given object.
476 This is a generic implementation that knows how to deal with all
477 three cases of tag objects (cluster, node, instance). The opts
478 argument is expected to contain a tag_type field denoting what
479 object type we work on.
482 kind, name = _ExtractTagsObject(opts, args)
483 _ExtendTags(opts, args)
485 raise errors.OpPrereqError("No tags to be added")
486 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
487 SubmitOrSend(op, opts)
490 def RemoveTags(opts, args):
491 """Remove tags from a given object.
493 This is a generic implementation that knows how to deal with all
494 three cases of tag objects (cluster, node, instance). The opts
495 argument is expected to contain a tag_type field denoting what
496 object type we work on.
499 kind, name = _ExtractTagsObject(opts, args)
500 _ExtendTags(opts, args)
502 raise errors.OpPrereqError("No tags to be removed")
503 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
504 SubmitOrSend(op, opts)
507 def check_unit(option, opt, value): # pylint: disable=W0613
508 """OptParsers custom converter for units.
512 return utils.ParseUnit(value)
513 except errors.UnitParseError, err:
514 raise OptionValueError("option %s: %s" % (opt, err))
517 def _SplitKeyVal(opt, data):
518 """Convert a KeyVal string into a dict.
520 This function will convert a key=val[,...] string into a dict. Empty
521 values will be converted specially: keys which have the prefix 'no_'
522 will have the value=False and the prefix stripped, the others will
526 @param opt: a string holding the option name for which we process the
527 data, used in building error messages
529 @param data: a string of the format key=val,key=val,...
531 @return: {key=val, key=val}
532 @raises errors.ParameterError: if there are duplicate keys
537 for elem in utils.UnescapeAndSplit(data, sep=","):
539 key, val = elem.split("=", 1)
541 if elem.startswith(NO_PREFIX):
542 key, val = elem[len(NO_PREFIX):], False
543 elif elem.startswith(UN_PREFIX):
544 key, val = elem[len(UN_PREFIX):], None
546 key, val = elem, True
548 raise errors.ParameterError("Duplicate key '%s' in option %s" %
554 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
555 """Custom parser for ident:key=val,key=val options.
557 This will store the parsed values as a tuple (ident, {key: val}). As such,
558 multiple uses of this option via action=append is possible.
562 ident, rest = value, ""
564 ident, rest = value.split(":", 1)
566 if ident.startswith(NO_PREFIX):
568 msg = "Cannot pass options when removing parameter groups: %s" % value
569 raise errors.ParameterError(msg)
570 retval = (ident[len(NO_PREFIX):], False)
571 elif (ident.startswith(UN_PREFIX) and
572 (len(ident) <= len(UN_PREFIX) or
573 not ident[len(UN_PREFIX)][0].isdigit())):
575 msg = "Cannot pass options when removing parameter groups: %s" % value
576 raise errors.ParameterError(msg)
577 retval = (ident[len(UN_PREFIX):], None)
579 kv_dict = _SplitKeyVal(opt, rest)
580 retval = (ident, kv_dict)
584 def check_key_val(option, opt, value): # pylint: disable=W0613
585 """Custom parser class for key=val,key=val options.
587 This will store the parsed values as a dict {key: val}.
590 return _SplitKeyVal(opt, value)
593 def check_bool(option, opt, value): # pylint: disable=W0613
594 """Custom parser for yes/no options.
596 This will store the parsed value as either True or False.
599 value = value.lower()
600 if value == constants.VALUE_FALSE or value == "no":
602 elif value == constants.VALUE_TRUE or value == "yes":
605 raise errors.ParameterError("Invalid boolean value '%s'" % value)
608 def check_list(option, opt, value): # pylint: disable=W0613
609 """Custom parser for comma-separated lists.
612 # we have to make this explicit check since "".split(",") is [""],
613 # not an empty list :(
617 return utils.UnescapeAndSplit(value)
620 def check_maybefloat(option, opt, value): # pylint: disable=W0613
621 """Custom parser for float numbers which might be also defaults.
624 value = value.lower()
626 if value == constants.VALUE_DEFAULT:
632 # completion_suggestion is normally a list. Using numeric values not evaluating
633 # to False for dynamic completion.
634 (OPT_COMPL_MANY_NODES,
636 OPT_COMPL_ONE_INSTANCE,
638 OPT_COMPL_ONE_IALLOCATOR,
639 OPT_COMPL_INST_ADD_NODES,
640 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
642 OPT_COMPL_ALL = frozenset([
643 OPT_COMPL_MANY_NODES,
645 OPT_COMPL_ONE_INSTANCE,
647 OPT_COMPL_ONE_IALLOCATOR,
648 OPT_COMPL_INST_ADD_NODES,
649 OPT_COMPL_ONE_NODEGROUP,
653 class CliOption(Option):
654 """Custom option class for optparse.
657 ATTRS = Option.ATTRS + [
658 "completion_suggest",
660 TYPES = Option.TYPES + (
668 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
669 TYPE_CHECKER["identkeyval"] = check_ident_key_val
670 TYPE_CHECKER["keyval"] = check_key_val
671 TYPE_CHECKER["unit"] = check_unit
672 TYPE_CHECKER["bool"] = check_bool
673 TYPE_CHECKER["list"] = check_list
674 TYPE_CHECKER["maybefloat"] = check_maybefloat
677 # optparse.py sets make_option, so we do it for our own option class, too
678 cli_option = CliOption
683 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
684 help="Increase debugging level")
686 NOHDR_OPT = cli_option("--no-headers", default=False,
687 action="store_true", dest="no_headers",
688 help="Don't display column headers")
690 SEP_OPT = cli_option("--separator", default=None,
691 action="store", dest="separator",
692 help=("Separator between output fields"
693 " (defaults to one space)"))
695 USEUNITS_OPT = cli_option("--units", default=None,
696 dest="units", choices=("h", "m", "g", "t"),
697 help="Specify units for output (one of h/m/g/t)")
699 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
700 type="string", metavar="FIELDS",
701 help="Comma separated list of output fields")
703 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
704 default=False, help="Force the operation")
706 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
707 default=False, help="Do not require confirmation")
709 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
710 action="store_true", default=False,
711 help=("Ignore offline nodes and do as much"
714 TAG_ADD_OPT = cli_option("--tags", dest="tags",
715 default=None, help="Comma-separated list of instance"
718 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
719 default=None, help="File with tag names")
721 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
722 default=False, action="store_true",
723 help=("Submit the job and return the job ID, but"
724 " don't wait for the job to finish"))
726 SYNC_OPT = cli_option("--sync", dest="do_locking",
727 default=False, action="store_true",
728 help=("Grab locks while doing the queries"
729 " in order to ensure more consistent results"))
731 DRY_RUN_OPT = cli_option("--dry-run", default=False,
733 help=("Do not execute the operation, just run the"
734 " check steps and verify if it could be"
737 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
739 help="Increase the verbosity of the operation")
741 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
742 action="store_true", dest="simulate_errors",
743 help="Debugging option that makes the operation"
744 " treat most runtime checks as failed")
746 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
747 default=True, action="store_false",
748 help="Don't wait for sync (DANGEROUS!)")
750 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
751 action="store_true", default=False,
752 help="Enable offline instance")
754 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
755 action="store_true", default=False,
756 help="Disable down instance")
758 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
759 help=("Custom disk setup (%s)" %
760 utils.CommaJoin(constants.DISK_TEMPLATES)),
761 default=None, metavar="TEMPL",
762 choices=list(constants.DISK_TEMPLATES))
764 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
765 help="Do not create any network cards for"
768 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
769 help="Relative path under default cluster-wide"
770 " file storage dir to store file-based disks",
771 default=None, metavar="<DIR>")
773 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
774 help="Driver to use for image files",
775 default="loop", metavar="<DRIVER>",
776 choices=list(constants.FILE_DRIVER))
778 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
779 help="Select nodes for the instance automatically"
780 " using the <NAME> iallocator plugin",
781 default=None, type="string",
782 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
784 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
786 help="Set the default instance allocator plugin",
787 default=None, type="string",
788 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
790 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
792 completion_suggest=OPT_COMPL_ONE_OS)
794 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
795 type="keyval", default={},
796 help="OS parameters")
798 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
799 action="store_true", default=False,
800 help="Force an unknown variant")
802 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
803 action="store_true", default=False,
804 help="Do not install the OS (will"
807 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
808 dest="allow_runtime_chgs",
809 default=True, action="store_false",
810 help="Don't allow runtime changes")
812 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
813 type="keyval", default={},
814 help="Backend parameters")
816 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
817 default={}, dest="hvparams",
818 help="Hypervisor parameters")
820 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
821 help="Disk template parameters, in the format"
822 " template:option=value,option=value,...",
823 type="identkeyval", action="append", default=[])
825 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
826 type="keyval", default={},
827 help="Memory size specs: list of key=value,"
828 " where key is one of min, max, std"
829 " (in MB or using a unit)")
831 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
832 type="keyval", default={},
833 help="CPU count specs: list of key=value,"
834 " where key is one of min, max, std")
836 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
837 dest="ispecs_disk_count",
838 type="keyval", default={},
839 help="Disk count specs: list of key=value,"
840 " where key is one of min, max, std")
842 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
843 type="keyval", default={},
844 help="Disk size specs: list of key=value,"
845 " where key is one of min, max, std"
846 " (in MB or using a unit)")
848 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
849 type="keyval", default={},
850 help="NIC count specs: list of key=value,"
851 " where key is one of min, max, std")
853 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
854 dest="ipolicy_disk_templates",
855 type="list", default=None,
856 help="Comma-separated list of"
857 " enabled disk templates")
859 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
860 dest="ipolicy_vcpu_ratio",
861 type="maybefloat", default=None,
862 help="The maximum allowed vcpu-to-cpu ratio")
864 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
865 dest="ipolicy_spindle_ratio",
866 type="maybefloat", default=None,
867 help=("The maximum allowed instances to"
870 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
871 help="Hypervisor and hypervisor options, in the"
872 " format hypervisor:option=value,option=value,...",
873 default=None, type="identkeyval")
875 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
876 help="Hypervisor and hypervisor options, in the"
877 " format hypervisor:option=value,option=value,...",
878 default=[], action="append", type="identkeyval")
880 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
881 action="store_false",
882 help="Don't check that the instance's IP"
885 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
886 default=True, action="store_false",
887 help="Don't check that the instance's name"
890 NET_OPT = cli_option("--net",
891 help="NIC parameters", default=[],
892 dest="nics", action="append", type="identkeyval")
894 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
895 dest="disks", action="append", type="identkeyval")
897 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
898 help="Comma-separated list of disks"
899 " indices to act on (e.g. 0,2) (optional,"
900 " defaults to all disks)")
902 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
903 help="Enforces a single-disk configuration using the"
904 " given disk size, in MiB unless a suffix is used",
905 default=None, type="unit", metavar="<size>")
907 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
908 dest="ignore_consistency",
909 action="store_true", default=False,
910 help="Ignore the consistency of the disks on"
913 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
914 dest="allow_failover",
915 action="store_true", default=False,
916 help="If migration is not possible fallback to"
919 NONLIVE_OPT = cli_option("--non-live", dest="live",
920 default=True, action="store_false",
921 help="Do a non-live migration (this usually means"
922 " freeze the instance, save the state, transfer and"
923 " only then resume running on the secondary node)")
925 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
927 choices=list(constants.HT_MIGRATION_MODES),
928 help="Override default migration mode (choose"
929 " either live or non-live")
931 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
932 help="Target node and optional secondary node",
933 metavar="<pnode>[:<snode>]",
934 completion_suggest=OPT_COMPL_INST_ADD_NODES)
936 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
937 action="append", metavar="<node>",
938 help="Use only this node (can be used multiple"
939 " times, if not given defaults to all nodes)",
940 completion_suggest=OPT_COMPL_ONE_NODE)
942 NODEGROUP_OPT_NAME = "--node-group"
943 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
945 help="Node group (name or uuid)",
946 metavar="<nodegroup>",
947 default=None, type="string",
948 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
950 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
952 completion_suggest=OPT_COMPL_ONE_NODE)
954 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
955 action="store_false",
956 help="Don't start the instance after creation")
958 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
959 action="store_true", default=False,
960 help="Show command instead of executing it")
962 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
963 default=False, action="store_true",
964 help="Instead of performing the migration, try to"
965 " recover from a failed cleanup. This is safe"
966 " to run even if the instance is healthy, but it"
967 " will create extra replication traffic and "
968 " disrupt briefly the replication (like during the"
971 STATIC_OPT = cli_option("-s", "--static", dest="static",
972 action="store_true", default=False,
973 help="Only show configuration data, not runtime data")
975 ALL_OPT = cli_option("--all", dest="show_all",
976 default=False, action="store_true",
977 help="Show info on all instances on the cluster."
978 " This can take a long time to run, use wisely")
980 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
981 action="store_true", default=False,
982 help="Interactive OS reinstall, lists available"
983 " OS templates for selection")
985 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
986 action="store_true", default=False,
987 help="Remove the instance from the cluster"
988 " configuration even if there are failures"
989 " during the removal process")
991 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
992 dest="ignore_remove_failures",
993 action="store_true", default=False,
994 help="Remove the instance from the"
995 " cluster configuration even if there"
996 " are failures during the removal"
999 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1000 action="store_true", default=False,
1001 help="Remove the instance from the cluster")
1003 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1004 help="Specifies the new node for the instance",
1005 metavar="NODE", default=None,
1006 completion_suggest=OPT_COMPL_ONE_NODE)
1008 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1009 help="Specifies the new secondary node",
1010 metavar="NODE", default=None,
1011 completion_suggest=OPT_COMPL_ONE_NODE)
1013 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1014 default=False, action="store_true",
1015 help="Replace the disk(s) on the primary"
1016 " node (applies only to internally mirrored"
1017 " disk templates, e.g. %s)" %
1018 utils.CommaJoin(constants.DTS_INT_MIRROR))
1020 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1021 default=False, action="store_true",
1022 help="Replace the disk(s) on the secondary"
1023 " node (applies only to internally mirrored"
1024 " disk templates, e.g. %s)" %
1025 utils.CommaJoin(constants.DTS_INT_MIRROR))
1027 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1028 default=False, action="store_true",
1029 help="Lock all nodes and auto-promote as needed"
1032 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1033 default=False, action="store_true",
1034 help="Automatically replace faulty disks"
1035 " (applies only to internally mirrored"
1036 " disk templates, e.g. %s)" %
1037 utils.CommaJoin(constants.DTS_INT_MIRROR))
1039 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1040 default=False, action="store_true",
1041 help="Ignore current recorded size"
1042 " (useful for forcing activation when"
1043 " the recorded size is wrong)")
1045 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1047 completion_suggest=OPT_COMPL_ONE_NODE)
1049 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1052 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1053 help="Specify the secondary ip for the node",
1054 metavar="ADDRESS", default=None)
1056 READD_OPT = cli_option("--readd", dest="readd",
1057 default=False, action="store_true",
1058 help="Readd old node after replacing it")
1060 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1061 default=True, action="store_false",
1062 help="Disable SSH key fingerprint checking")
1064 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1065 default=False, action="store_true",
1066 help="Force the joining of a node")
1068 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1069 type="bool", default=None, metavar=_YORNO,
1070 help="Set the master_candidate flag on the node")
1072 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1073 type="bool", default=None,
1074 help=("Set the offline flag on the node"
1075 " (cluster does not communicate with offline"
1078 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1079 type="bool", default=None,
1080 help=("Set the drained flag on the node"
1081 " (excluded from allocation operations)"))
1083 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1084 type="bool", default=None, metavar=_YORNO,
1085 help="Set the master_capable flag on the node")
1087 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1088 type="bool", default=None, metavar=_YORNO,
1089 help="Set the vm_capable flag on the node")
1091 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1092 type="bool", default=None, metavar=_YORNO,
1093 help="Set the allocatable flag on a volume")
1095 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1096 help="Disable support for lvm based instances"
1098 action="store_false", default=True)
1100 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1101 dest="enabled_hypervisors",
1102 help="Comma-separated list of hypervisors",
1103 type="string", default=None)
1105 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1106 type="keyval", default={},
1107 help="NIC parameters")
1109 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1110 dest="candidate_pool_size", type="int",
1111 help="Set the candidate pool size")
1113 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1114 help=("Enables LVM and specifies the volume group"
1115 " name (cluster-wide) for disk allocation"
1116 " [%s]" % constants.DEFAULT_VG),
1117 metavar="VG", default=None)
1119 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1120 help="Destroy cluster", action="store_true")
1122 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1123 help="Skip node agreement check (dangerous)",
1124 action="store_true", default=False)
1126 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1127 help="Specify the mac prefix for the instance IP"
1128 " addresses, in the format XX:XX:XX",
1132 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1133 help="Specify the node interface (cluster-wide)"
1134 " on which the master IP address will be added"
1135 " (cluster init default: %s)" %
1136 constants.DEFAULT_BRIDGE,
1140 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1141 help="Specify the netmask of the master IP",
1145 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1146 dest="use_external_mip_script",
1147 help="Specify whether to run a user-provided"
1148 " script for the master IP address turnup and"
1149 " turndown operations",
1150 type="bool", metavar=_YORNO, default=None)
1152 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1153 help="Specify the default directory (cluster-"
1154 "wide) for storing the file-based disks [%s]" %
1155 constants.DEFAULT_FILE_STORAGE_DIR,
1157 default=constants.DEFAULT_FILE_STORAGE_DIR)
1159 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1160 dest="shared_file_storage_dir",
1161 help="Specify the default directory (cluster-"
1162 "wide) for storing the shared file-based"
1164 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1165 metavar="SHAREDDIR",
1166 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1168 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1169 help="Don't modify /etc/hosts",
1170 action="store_false", default=True)
1172 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1173 help="Don't initialize SSH keys",
1174 action="store_false", default=True)
1176 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1177 help="Enable parseable error messages",
1178 action="store_true", default=False)
1180 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1181 help="Skip N+1 memory redundancy tests",
1182 action="store_true", default=False)
1184 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1185 help="Type of reboot: soft/hard/full",
1186 default=constants.INSTANCE_REBOOT_HARD,
1188 choices=list(constants.REBOOT_TYPES))
1190 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1191 dest="ignore_secondaries",
1192 default=False, action="store_true",
1193 help="Ignore errors from secondaries")
1195 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1196 action="store_false", default=True,
1197 help="Don't shutdown the instance (unsafe)")
1199 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1200 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1201 help="Maximum time to wait")
1203 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1204 dest="shutdown_timeout", type="int",
1205 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1206 help="Maximum time to wait for instance shutdown")
1208 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1210 help=("Number of seconds between repetions of the"
1213 EARLY_RELEASE_OPT = cli_option("--early-release",
1214 dest="early_release", default=False,
1215 action="store_true",
1216 help="Release the locks on the secondary"
1219 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1220 dest="new_cluster_cert",
1221 default=False, action="store_true",
1222 help="Generate a new cluster certificate")
1224 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1226 help="File containing new RAPI certificate")
1228 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1229 default=None, action="store_true",
1230 help=("Generate a new self-signed RAPI"
1233 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1235 help="File containing new SPICE certificate")
1237 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1239 help="File containing the certificate of the CA"
1240 " which signed the SPICE certificate")
1242 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1243 dest="new_spice_cert", default=None,
1244 action="store_true",
1245 help=("Generate a new self-signed SPICE"
1248 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1249 dest="new_confd_hmac_key",
1250 default=False, action="store_true",
1251 help=("Create a new HMAC key for %s" %
1254 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1255 dest="cluster_domain_secret",
1257 help=("Load new new cluster domain"
1258 " secret from file"))
1260 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1261 dest="new_cluster_domain_secret",
1262 default=False, action="store_true",
1263 help=("Create a new cluster domain"
1266 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1267 dest="use_replication_network",
1268 help="Whether to use the replication network"
1269 " for talking to the nodes",
1270 action="store_true", default=False)
1272 MAINTAIN_NODE_HEALTH_OPT = \
1273 cli_option("--maintain-node-health", dest="maintain_node_health",
1274 metavar=_YORNO, default=None, type="bool",
1275 help="Configure the cluster to automatically maintain node"
1276 " health, by shutting down unknown instances, shutting down"
1277 " unknown DRBD devices, etc.")
1279 IDENTIFY_DEFAULTS_OPT = \
1280 cli_option("--identify-defaults", dest="identify_defaults",
1281 default=False, action="store_true",
1282 help="Identify which saved instance parameters are equal to"
1283 " the current cluster defaults and set them as such, instead"
1284 " of marking them as overridden")
1286 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1287 action="store", dest="uid_pool",
1288 help=("A list of user-ids or user-id"
1289 " ranges separated by commas"))
1291 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1292 action="store", dest="add_uids",
1293 help=("A list of user-ids or user-id"
1294 " ranges separated by commas, to be"
1295 " added to the user-id pool"))
1297 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1298 action="store", dest="remove_uids",
1299 help=("A list of user-ids or user-id"
1300 " ranges separated by commas, to be"
1301 " removed from the user-id pool"))
1303 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1304 action="store", dest="reserved_lvs",
1305 help=("A comma-separated list of reserved"
1306 " logical volumes names, that will be"
1307 " ignored by cluster verify"))
1309 ROMAN_OPT = cli_option("--roman",
1310 dest="roman_integers", default=False,
1311 action="store_true",
1312 help="Use roman numbers for positive integers")
1314 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1315 action="store", default=None,
1316 help="Specifies usermode helper for DRBD")
1318 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1319 action="store_false", default=True,
1320 help="Disable support for DRBD")
1322 PRIMARY_IP_VERSION_OPT = \
1323 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1324 action="store", dest="primary_ip_version",
1325 metavar="%d|%d" % (constants.IP4_VERSION,
1326 constants.IP6_VERSION),
1327 help="Cluster-wide IP version for primary IP")
1329 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1330 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1331 choices=_PRIONAME_TO_VALUE.keys(),
1332 help="Priority for opcode processing")
1334 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1335 type="bool", default=None, metavar=_YORNO,
1336 help="Sets the hidden flag on the OS")
1338 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1339 type="bool", default=None, metavar=_YORNO,
1340 help="Sets the blacklisted flag on the OS")
1342 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1343 type="bool", metavar=_YORNO,
1344 dest="prealloc_wipe_disks",
1345 help=("Wipe disks prior to instance"
1348 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1349 type="keyval", default=None,
1350 help="Node parameters")
1352 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1353 action="store", metavar="POLICY", default=None,
1354 help="Allocation policy for the node group")
1356 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1357 type="bool", metavar=_YORNO,
1358 dest="node_powered",
1359 help="Specify if the SoR for node is powered")
1361 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1362 default=constants.OOB_TIMEOUT,
1363 help="Maximum time to wait for out-of-band helper")
1365 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1366 default=constants.OOB_POWER_DELAY,
1367 help="Time in seconds to wait between power-ons")
1369 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1370 action="store_true", default=False,
1371 help=("Whether command argument should be treated"
1374 NO_REMEMBER_OPT = cli_option("--no-remember",
1376 action="store_true", default=False,
1377 help="Perform but do not record the change"
1378 " in the configuration")
1380 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1381 default=False, action="store_true",
1382 help="Evacuate primary instances only")
1384 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1385 default=False, action="store_true",
1386 help="Evacuate secondary instances only"
1387 " (applies only to internally mirrored"
1388 " disk templates, e.g. %s)" %
1389 utils.CommaJoin(constants.DTS_INT_MIRROR))
1391 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1392 action="store_true", default=False,
1393 help="Pause instance at startup")
1395 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1396 help="Destination node group (name or uuid)",
1397 default=None, action="append",
1398 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1400 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1401 action="append", dest="ignore_errors",
1402 choices=list(constants.CV_ALL_ECODES_STRINGS),
1403 help="Error code to be ignored")
1405 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1407 help=("Specify disk state information in the"
1409 " storage_type/identifier:option=value,...;"
1410 " note this is unused for now"),
1413 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1415 help=("Specify hypervisor state information in the"
1416 " format hypervisor:option=value,...;"
1417 " note this is unused for now"),
1420 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1421 action="store_true", default=False,
1422 help="Ignore instance policy violations")
1424 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1425 help="Sets the instance's runtime memory,"
1426 " ballooning it up or down to the new value",
1427 default=None, type="unit", metavar="<size>")
1429 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1430 action="store_true", default=False,
1431 help="Marks the grow as absolute instead of the"
1432 " (default) relative mode")
1434 #: Options provided by all commands
1435 COMMON_OPTS = [DEBUG_OPT]
1437 # common options for creating instances. add and import then add their own
1439 COMMON_CREATE_OPTS = [
1444 FILESTORE_DRIVER_OPT,
1461 # common instance policy options
1462 INSTANCE_POLICY_OPTS = [
1463 SPECS_CPU_COUNT_OPT,
1464 SPECS_DISK_COUNT_OPT,
1465 SPECS_DISK_SIZE_OPT,
1467 SPECS_NIC_COUNT_OPT,
1468 IPOLICY_DISK_TEMPLATES,
1470 IPOLICY_SPINDLE_RATIO,
1474 def _ParseArgs(argv, commands, aliases, env_override):
1475 """Parser for the command line arguments.
1477 This function parses the arguments and returns the function which
1478 must be executed together with its (modified) arguments.
1480 @param argv: the command line
1481 @param commands: dictionary with special contents, see the design
1482 doc for cmdline handling
1483 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1484 @param env_override: list of env variables allowed for default args
1487 assert not (env_override - set(commands))
1490 binary = "<command>"
1492 binary = argv[0].split("/")[-1]
1494 if len(argv) > 1 and argv[1] == "--version":
1495 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1496 constants.RELEASE_VERSION)
1497 # Quit right away. That way we don't have to care about this special
1498 # argument. optparse.py does it the same.
1501 if len(argv) < 2 or not (argv[1] in commands or
1502 argv[1] in aliases):
1503 # let's do a nice thing
1504 sortedcmds = commands.keys()
1507 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1508 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1511 # compute the max line length for cmd + usage
1512 mlen = max([len(" %s" % cmd) for cmd in commands])
1513 mlen = min(60, mlen) # should not get here...
1515 # and format a nice command list
1516 ToStdout("Commands:")
1517 for cmd in sortedcmds:
1518 cmdstr = " %s" % (cmd,)
1519 help_text = commands[cmd][4]
1520 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1521 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1522 for line in help_lines:
1523 ToStdout("%-*s %s", mlen, "", line)
1527 return None, None, None
1529 # get command, unalias it, and look it up in commands
1533 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1536 if aliases[cmd] not in commands:
1537 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1538 " command '%s'" % (cmd, aliases[cmd]))
1542 if cmd in env_override:
1543 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1544 env_args = os.environ.get(args_env_name)
1546 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1548 func, args_def, parser_opts, usage, description = commands[cmd]
1549 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1550 description=description,
1551 formatter=TitledHelpFormatter(),
1552 usage="%%prog %s %s" % (cmd, usage))
1553 parser.disable_interspersed_args()
1554 options, args = parser.parse_args(args=argv[1:])
1556 if not _CheckArguments(cmd, args_def, args):
1557 return None, None, None
1559 return func, options, args
1562 def _CheckArguments(cmd, args_def, args):
1563 """Verifies the arguments using the argument definition.
1567 1. Abort with error if values specified by user but none expected.
1569 1. For each argument in definition
1571 1. Keep running count of minimum number of values (min_count)
1572 1. Keep running count of maximum number of values (max_count)
1573 1. If it has an unlimited number of values
1575 1. Abort with error if it's not the last argument in the definition
1577 1. If last argument has limited number of values
1579 1. Abort with error if number of values doesn't match or is too large
1581 1. Abort with error if user didn't pass enough values (min_count)
1584 if args and not args_def:
1585 ToStderr("Error: Command %s expects no arguments", cmd)
1592 last_idx = len(args_def) - 1
1594 for idx, arg in enumerate(args_def):
1595 if min_count is None:
1597 elif arg.min is not None:
1598 min_count += arg.min
1600 if max_count is None:
1602 elif arg.max is not None:
1603 max_count += arg.max
1606 check_max = (arg.max is not None)
1608 elif arg.max is None:
1609 raise errors.ProgrammerError("Only the last argument can have max=None")
1612 # Command with exact number of arguments
1613 if (min_count is not None and max_count is not None and
1614 min_count == max_count and len(args) != min_count):
1615 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1618 # Command with limited number of arguments
1619 if max_count is not None and len(args) > max_count:
1620 ToStderr("Error: Command %s expects only %d argument(s)",
1624 # Command with some required arguments
1625 if min_count is not None and len(args) < min_count:
1626 ToStderr("Error: Command %s expects at least %d argument(s)",
1633 def SplitNodeOption(value):
1634 """Splits the value of a --node option.
1637 if value and ":" in value:
1638 return value.split(":", 1)
1640 return (value, None)
1643 def CalculateOSNames(os_name, os_variants):
1644 """Calculates all the names an OS can be called, according to its variants.
1646 @type os_name: string
1647 @param os_name: base name of the os
1648 @type os_variants: list or None
1649 @param os_variants: list of supported variants
1651 @return: list of valid names
1655 return ["%s+%s" % (os_name, v) for v in os_variants]
1660 def ParseFields(selected, default):
1661 """Parses the values of "--field"-like options.
1663 @type selected: string or None
1664 @param selected: User-selected options
1666 @param default: Default fields
1669 if selected is None:
1672 if selected.startswith("+"):
1673 return default + selected[1:].split(",")
1675 return selected.split(",")
1678 UsesRPC = rpc.RunWithRPC
1681 def AskUser(text, choices=None):
1682 """Ask the user a question.
1684 @param text: the question to ask
1686 @param choices: list with elements tuples (input_char, return_value,
1687 description); if not given, it will default to: [('y', True,
1688 'Perform the operation'), ('n', False, 'Do no do the operation')];
1689 note that the '?' char is reserved for help
1691 @return: one of the return values from the choices list; if input is
1692 not possible (i.e. not running with a tty, we return the last
1697 choices = [("y", True, "Perform the operation"),
1698 ("n", False, "Do not perform the operation")]
1699 if not choices or not isinstance(choices, list):
1700 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1701 for entry in choices:
1702 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1703 raise errors.ProgrammerError("Invalid choices element to AskUser")
1705 answer = choices[-1][1]
1707 for line in text.splitlines():
1708 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1709 text = "\n".join(new_text)
1711 f = file("/dev/tty", "a+")
1715 chars = [entry[0] for entry in choices]
1716 chars[-1] = "[%s]" % chars[-1]
1718 maps = dict([(entry[0], entry[1]) for entry in choices])
1722 f.write("/".join(chars))
1724 line = f.readline(2).strip().lower()
1729 for entry in choices:
1730 f.write(" %s - %s\n" % (entry[0], entry[2]))
1738 class JobSubmittedException(Exception):
1739 """Job was submitted, client should exit.
1741 This exception has one argument, the ID of the job that was
1742 submitted. The handler should print this ID.
1744 This is not an error, just a structured way to exit from clients.
1749 def SendJob(ops, cl=None):
1750 """Function to submit an opcode without waiting for the results.
1753 @param ops: list of opcodes
1754 @type cl: luxi.Client
1755 @param cl: the luxi client to use for communicating with the master;
1756 if None, a new client will be created
1762 job_id = cl.SubmitJob(ops)
1767 def GenericPollJob(job_id, cbs, report_cbs):
1768 """Generic job-polling function.
1770 @type job_id: number
1771 @param job_id: Job ID
1772 @type cbs: Instance of L{JobPollCbBase}
1773 @param cbs: Data callbacks
1774 @type report_cbs: Instance of L{JobPollReportCbBase}
1775 @param report_cbs: Reporting callbacks
1778 prev_job_info = None
1779 prev_logmsg_serial = None
1784 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1787 # job not found, go away!
1788 raise errors.JobLost("Job with id %s lost" % job_id)
1790 if result == constants.JOB_NOTCHANGED:
1791 report_cbs.ReportNotChanged(job_id, status)
1796 # Split result, a tuple of (field values, log entries)
1797 (job_info, log_entries) = result
1798 (status, ) = job_info
1801 for log_entry in log_entries:
1802 (serial, timestamp, log_type, message) = log_entry
1803 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1805 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1807 # TODO: Handle canceled and archived jobs
1808 elif status in (constants.JOB_STATUS_SUCCESS,
1809 constants.JOB_STATUS_ERROR,
1810 constants.JOB_STATUS_CANCELING,
1811 constants.JOB_STATUS_CANCELED):
1814 prev_job_info = job_info
1816 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1818 raise errors.JobLost("Job with id %s lost" % job_id)
1820 status, opstatus, result = jobs[0]
1822 if status == constants.JOB_STATUS_SUCCESS:
1825 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1826 raise errors.OpExecError("Job was canceled")
1829 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1830 if status == constants.OP_STATUS_SUCCESS:
1832 elif status == constants.OP_STATUS_ERROR:
1833 errors.MaybeRaise(msg)
1836 raise errors.OpExecError("partial failure (opcode %d): %s" %
1839 raise errors.OpExecError(str(msg))
1841 # default failure mode
1842 raise errors.OpExecError(result)
1845 class JobPollCbBase:
1846 """Base class for L{GenericPollJob} callbacks.
1850 """Initializes this class.
1854 def WaitForJobChangeOnce(self, job_id, fields,
1855 prev_job_info, prev_log_serial):
1856 """Waits for changes on a job.
1859 raise NotImplementedError()
1861 def QueryJobs(self, job_ids, fields):
1862 """Returns the selected fields for the selected job IDs.
1864 @type job_ids: list of numbers
1865 @param job_ids: Job IDs
1866 @type fields: list of strings
1867 @param fields: Fields
1870 raise NotImplementedError()
1873 class JobPollReportCbBase:
1874 """Base class for L{GenericPollJob} reporting callbacks.
1878 """Initializes this class.
1882 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1883 """Handles a log message.
1886 raise NotImplementedError()
1888 def ReportNotChanged(self, job_id, status):
1889 """Called for if a job hasn't changed in a while.
1891 @type job_id: number
1892 @param job_id: Job ID
1893 @type status: string or None
1894 @param status: Job status if available
1897 raise NotImplementedError()
1900 class _LuxiJobPollCb(JobPollCbBase):
1901 def __init__(self, cl):
1902 """Initializes this class.
1905 JobPollCbBase.__init__(self)
1908 def WaitForJobChangeOnce(self, job_id, fields,
1909 prev_job_info, prev_log_serial):
1910 """Waits for changes on a job.
1913 return self.cl.WaitForJobChangeOnce(job_id, fields,
1914 prev_job_info, prev_log_serial)
1916 def QueryJobs(self, job_ids, fields):
1917 """Returns the selected fields for the selected job IDs.
1920 return self.cl.QueryJobs(job_ids, fields)
1923 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1924 def __init__(self, feedback_fn):
1925 """Initializes this class.
1928 JobPollReportCbBase.__init__(self)
1930 self.feedback_fn = feedback_fn
1932 assert callable(feedback_fn)
1934 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1935 """Handles a log message.
1938 self.feedback_fn((timestamp, log_type, log_msg))
1940 def ReportNotChanged(self, job_id, status):
1941 """Called if a job hasn't changed in a while.
1947 class StdioJobPollReportCb(JobPollReportCbBase):
1949 """Initializes this class.
1952 JobPollReportCbBase.__init__(self)
1954 self.notified_queued = False
1955 self.notified_waitlock = False
1957 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1958 """Handles a log message.
1961 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1962 FormatLogMessage(log_type, log_msg))
1964 def ReportNotChanged(self, job_id, status):
1965 """Called if a job hasn't changed in a while.
1971 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1972 ToStderr("Job %s is waiting in queue", job_id)
1973 self.notified_queued = True
1975 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1976 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1977 self.notified_waitlock = True
1980 def FormatLogMessage(log_type, log_msg):
1981 """Formats a job message according to its type.
1984 if log_type != constants.ELOG_MESSAGE:
1985 log_msg = str(log_msg)
1987 return utils.SafeEncode(log_msg)
1990 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1991 """Function to poll for the result of a job.
1993 @type job_id: job identified
1994 @param job_id: the job to poll for results
1995 @type cl: luxi.Client
1996 @param cl: the luxi client to use for communicating with the master;
1997 if None, a new client will be created
2003 if reporter is None:
2005 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2007 reporter = StdioJobPollReportCb()
2009 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2011 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2014 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2015 """Legacy function to submit an opcode.
2017 This is just a simple wrapper over the construction of the processor
2018 instance. It should be extended to better handle feedback and
2019 interaction functions.
2025 SetGenericOpcodeOpts([op], opts)
2027 job_id = SendJob([op], cl=cl)
2029 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2032 return op_results[0]
2035 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2036 """Wrapper around SubmitOpCode or SendJob.
2038 This function will decide, based on the 'opts' parameter, whether to
2039 submit and wait for the result of the opcode (and return it), or
2040 whether to just send the job and print its identifier. It is used in
2041 order to simplify the implementation of the '--submit' option.
2043 It will also process the opcodes if we're sending the via SendJob
2044 (otherwise SubmitOpCode does it).
2047 if opts and opts.submit_only:
2049 SetGenericOpcodeOpts(job, opts)
2050 job_id = SendJob(job, cl=cl)
2051 raise JobSubmittedException(job_id)
2053 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2056 def SetGenericOpcodeOpts(opcode_list, options):
2057 """Processor for generic options.
2059 This function updates the given opcodes based on generic command
2060 line options (like debug, dry-run, etc.).
2062 @param opcode_list: list of opcodes
2063 @param options: command line options or None
2064 @return: None (in-place modification)
2069 for op in opcode_list:
2070 op.debug_level = options.debug
2071 if hasattr(options, "dry_run"):
2072 op.dry_run = options.dry_run
2073 if getattr(options, "priority", None) is not None:
2074 op.priority = _PRIONAME_TO_VALUE[options.priority]
2078 # TODO: Cache object?
2080 client = luxi.Client()
2081 except luxi.NoMasterError:
2082 ss = ssconf.SimpleStore()
2084 # Try to read ssconf file
2087 except errors.ConfigurationError:
2088 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2089 " not part of a cluster")
2091 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2092 if master != myself:
2093 raise errors.OpPrereqError("This is not the master node, please connect"
2094 " to node '%s' and rerun the command" %
2100 def FormatError(err):
2101 """Return a formatted error message for a given error.
2103 This function takes an exception instance and returns a tuple
2104 consisting of two values: first, the recommended exit code, and
2105 second, a string describing the error message (not
2106 newline-terminated).
2112 if isinstance(err, errors.ConfigurationError):
2113 txt = "Corrupt configuration file: %s" % msg
2115 obuf.write(txt + "\n")
2116 obuf.write("Aborting.")
2118 elif isinstance(err, errors.HooksAbort):
2119 obuf.write("Failure: hooks execution failed:\n")
2120 for node, script, out in err.args[0]:
2122 obuf.write(" node: %s, script: %s, output: %s\n" %
2123 (node, script, out))
2125 obuf.write(" node: %s, script: %s (no output)\n" %
2127 elif isinstance(err, errors.HooksFailure):
2128 obuf.write("Failure: hooks general failure: %s" % msg)
2129 elif isinstance(err, errors.ResolverError):
2130 this_host = netutils.Hostname.GetSysName()
2131 if err.args[0] == this_host:
2132 msg = "Failure: can't resolve my own hostname ('%s')"
2134 msg = "Failure: can't resolve hostname '%s'"
2135 obuf.write(msg % err.args[0])
2136 elif isinstance(err, errors.OpPrereqError):
2137 if len(err.args) == 2:
2138 obuf.write("Failure: prerequisites not met for this"
2139 " operation:\nerror type: %s, error details:\n%s" %
2140 (err.args[1], err.args[0]))
2142 obuf.write("Failure: prerequisites not met for this"
2143 " operation:\n%s" % msg)
2144 elif isinstance(err, errors.OpExecError):
2145 obuf.write("Failure: command execution error:\n%s" % msg)
2146 elif isinstance(err, errors.TagError):
2147 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2148 elif isinstance(err, errors.JobQueueDrainError):
2149 obuf.write("Failure: the job queue is marked for drain and doesn't"
2150 " accept new requests\n")
2151 elif isinstance(err, errors.JobQueueFull):
2152 obuf.write("Failure: the job queue is full and doesn't accept new"
2153 " job submissions until old jobs are archived\n")
2154 elif isinstance(err, errors.TypeEnforcementError):
2155 obuf.write("Parameter Error: %s" % msg)
2156 elif isinstance(err, errors.ParameterError):
2157 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2158 elif isinstance(err, luxi.NoMasterError):
2159 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2160 " and listening for connections?")
2161 elif isinstance(err, luxi.TimeoutError):
2162 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2163 " been submitted and will continue to run even if the call"
2164 " timed out. Useful commands in this situation are \"gnt-job"
2165 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2167 elif isinstance(err, luxi.PermissionError):
2168 obuf.write("It seems you don't have permissions to connect to the"
2169 " master daemon.\nPlease retry as a different user.")
2170 elif isinstance(err, luxi.ProtocolError):
2171 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2173 elif isinstance(err, errors.JobLost):
2174 obuf.write("Error checking job status: %s" % msg)
2175 elif isinstance(err, errors.QueryFilterParseError):
2176 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2177 obuf.write("\n".join(err.GetDetails()))
2178 elif isinstance(err, errors.GenericError):
2179 obuf.write("Unhandled Ganeti error: %s" % msg)
2180 elif isinstance(err, JobSubmittedException):
2181 obuf.write("JobID: %s\n" % err.args[0])
2184 obuf.write("Unhandled exception: %s" % msg)
2185 return retcode, obuf.getvalue().rstrip("\n")
2188 def GenericMain(commands, override=None, aliases=None,
2189 env_override=frozenset()):
2190 """Generic main function for all the gnt-* commands.
2192 @param commands: a dictionary with a special structure, see the design doc
2193 for command line handling.
2194 @param override: if not None, we expect a dictionary with keys that will
2195 override command line options; this can be used to pass
2196 options from the scripts to generic functions
2197 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2198 @param env_override: list of environment names which are allowed to submit
2199 default args for commands
2202 # save the program name and the entire command line for later logging
2204 binary = os.path.basename(sys.argv[0])
2206 binary = sys.argv[0]
2208 if len(sys.argv) >= 2:
2209 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2213 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2215 binary = "<unknown program>"
2216 cmdline = "<unknown>"
2222 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2223 except errors.ParameterError, err:
2224 result, err_msg = FormatError(err)
2228 if func is None: # parse error
2231 if override is not None:
2232 for key, val in override.iteritems():
2233 setattr(options, key, val)
2235 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2236 stderr_logging=True)
2238 logging.info("Command line: %s", cmdline)
2241 result = func(options, args)
2242 except (errors.GenericError, luxi.ProtocolError,
2243 JobSubmittedException), err:
2244 result, err_msg = FormatError(err)
2245 logging.exception("Error during command processing")
2247 except KeyboardInterrupt:
2248 result = constants.EXIT_FAILURE
2249 ToStderr("Aborted. Note that if the operation created any jobs, they"
2250 " might have been submitted and"
2251 " will continue to run in the background.")
2252 except IOError, err:
2253 if err.errno == errno.EPIPE:
2254 # our terminal went away, we'll exit
2255 sys.exit(constants.EXIT_FAILURE)
2262 def ParseNicOption(optvalue):
2263 """Parses the value of the --net option(s).
2267 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2268 except (TypeError, ValueError), err:
2269 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2271 nics = [{}] * nic_max
2272 for nidx, ndict in optvalue:
2275 if not isinstance(ndict, dict):
2276 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2277 " got %s" % (nidx, ndict))
2279 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2286 def GenericInstanceCreate(mode, opts, args):
2287 """Add an instance to the cluster via either creation or import.
2289 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2290 @param opts: the command line options selected by the user
2292 @param args: should contain only one element, the new instance name
2294 @return: the desired exit code
2299 (pnode, snode) = SplitNodeOption(opts.node)
2304 hypervisor, hvparams = opts.hypervisor
2307 nics = ParseNicOption(opts.nics)
2311 elif mode == constants.INSTANCE_CREATE:
2312 # default of one nic, all auto
2318 if opts.disk_template == constants.DT_DISKLESS:
2319 if opts.disks or opts.sd_size is not None:
2320 raise errors.OpPrereqError("Diskless instance but disk"
2321 " information passed")
2324 if (not opts.disks and not opts.sd_size
2325 and mode == constants.INSTANCE_CREATE):
2326 raise errors.OpPrereqError("No disk information specified")
2327 if opts.disks and opts.sd_size is not None:
2328 raise errors.OpPrereqError("Please use either the '--disk' or"
2330 if opts.sd_size is not None:
2331 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2335 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2336 except ValueError, err:
2337 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2338 disks = [{}] * disk_max
2341 for didx, ddict in opts.disks:
2343 if not isinstance(ddict, dict):
2344 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2345 raise errors.OpPrereqError(msg)
2346 elif constants.IDISK_SIZE in ddict:
2347 if constants.IDISK_ADOPT in ddict:
2348 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2349 " (disk %d)" % didx)
2351 ddict[constants.IDISK_SIZE] = \
2352 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2353 except ValueError, err:
2354 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2356 elif constants.IDISK_ADOPT in ddict:
2357 if mode == constants.INSTANCE_IMPORT:
2358 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2360 ddict[constants.IDISK_SIZE] = 0
2362 raise errors.OpPrereqError("Missing size or adoption source for"
2366 if opts.tags is not None:
2367 tags = opts.tags.split(",")
2371 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2372 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2374 if mode == constants.INSTANCE_CREATE:
2377 force_variant = opts.force_variant
2380 no_install = opts.no_install
2381 identify_defaults = False
2382 elif mode == constants.INSTANCE_IMPORT:
2385 force_variant = False
2386 src_node = opts.src_node
2387 src_path = opts.src_dir
2389 identify_defaults = opts.identify_defaults
2391 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2393 op = opcodes.OpInstanceCreate(instance_name=instance,
2395 disk_template=opts.disk_template,
2397 pnode=pnode, snode=snode,
2398 ip_check=opts.ip_check,
2399 name_check=opts.name_check,
2400 wait_for_sync=opts.wait_for_sync,
2401 file_storage_dir=opts.file_storage_dir,
2402 file_driver=opts.file_driver,
2403 iallocator=opts.iallocator,
2404 hypervisor=hypervisor,
2406 beparams=opts.beparams,
2407 osparams=opts.osparams,
2411 force_variant=force_variant,
2415 no_install=no_install,
2416 identify_defaults=identify_defaults,
2417 ignore_ipolicy=opts.ignore_ipolicy)
2419 SubmitOrSend(op, opts)
2423 class _RunWhileClusterStoppedHelper:
2424 """Helper class for L{RunWhileClusterStopped} to simplify state management
2427 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2428 """Initializes this class.
2430 @type feedback_fn: callable
2431 @param feedback_fn: Feedback function
2432 @type cluster_name: string
2433 @param cluster_name: Cluster name
2434 @type master_node: string
2435 @param master_node Master node name
2436 @type online_nodes: list
2437 @param online_nodes: List of names of online nodes
2440 self.feedback_fn = feedback_fn
2441 self.cluster_name = cluster_name
2442 self.master_node = master_node
2443 self.online_nodes = online_nodes
2445 self.ssh = ssh.SshRunner(self.cluster_name)
2447 self.nonmaster_nodes = [name for name in online_nodes
2448 if name != master_node]
2450 assert self.master_node not in self.nonmaster_nodes
2452 def _RunCmd(self, node_name, cmd):
2453 """Runs a command on the local or a remote machine.
2455 @type node_name: string
2456 @param node_name: Machine name
2461 if node_name is None or node_name == self.master_node:
2462 # No need to use SSH
2463 result = utils.RunCmd(cmd)
2465 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2468 errmsg = ["Failed to run command %s" % result.cmd]
2470 errmsg.append("on node %s" % node_name)
2471 errmsg.append(": exitcode %s and error %s" %
2472 (result.exit_code, result.output))
2473 raise errors.OpExecError(" ".join(errmsg))
2475 def Call(self, fn, *args):
2476 """Call function while all daemons are stopped.
2479 @param fn: Function to be called
2482 # Pause watcher by acquiring an exclusive lock on watcher state file
2483 self.feedback_fn("Blocking watcher")
2484 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2486 # TODO: Currently, this just blocks. There's no timeout.
2487 # TODO: Should it be a shared lock?
2488 watcher_block.Exclusive(blocking=True)
2490 # Stop master daemons, so that no new jobs can come in and all running
2492 self.feedback_fn("Stopping master daemons")
2493 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2495 # Stop daemons on all nodes
2496 for node_name in self.online_nodes:
2497 self.feedback_fn("Stopping daemons on %s" % node_name)
2498 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2500 # All daemons are shut down now
2502 return fn(self, *args)
2503 except Exception, err:
2504 _, errmsg = FormatError(err)
2505 logging.exception("Caught exception")
2506 self.feedback_fn(errmsg)
2509 # Start cluster again, master node last
2510 for node_name in self.nonmaster_nodes + [self.master_node]:
2511 self.feedback_fn("Starting daemons on %s" % node_name)
2512 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2515 watcher_block.Close()
2518 def RunWhileClusterStopped(feedback_fn, fn, *args):
2519 """Calls a function while all cluster daemons are stopped.
2521 @type feedback_fn: callable
2522 @param feedback_fn: Feedback function
2524 @param fn: Function to be called when daemons are stopped
2527 feedback_fn("Gathering cluster information")
2529 # This ensures we're running on the master daemon
2532 (cluster_name, master_node) = \
2533 cl.QueryConfigValues(["cluster_name", "master_node"])
2535 online_nodes = GetOnlineNodes([], cl=cl)
2537 # Don't keep a reference to the client. The master daemon will go away.
2540 assert master_node in online_nodes
2542 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2543 online_nodes).Call(fn, *args)
2546 def GenerateTable(headers, fields, separator, data,
2547 numfields=None, unitfields=None,
2549 """Prints a table with headers and different fields.
2552 @param headers: dictionary mapping field names to headers for
2555 @param fields: the field names corresponding to each row in
2557 @param separator: the separator to be used; if this is None,
2558 the default 'smart' algorithm is used which computes optimal
2559 field width, otherwise just the separator is used between
2562 @param data: a list of lists, each sublist being one row to be output
2563 @type numfields: list
2564 @param numfields: a list with the fields that hold numeric
2565 values and thus should be right-aligned
2566 @type unitfields: list
2567 @param unitfields: a list with the fields that hold numeric
2568 values that should be formatted with the units field
2569 @type units: string or None
2570 @param units: the units we should use for formatting, or None for
2571 automatic choice (human-readable for non-separator usage, otherwise
2572 megabytes); this is a one-letter string
2581 if numfields is None:
2583 if unitfields is None:
2586 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2587 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2590 for field in fields:
2591 if headers and field not in headers:
2592 # TODO: handle better unknown fields (either revert to old
2593 # style of raising exception, or deal more intelligently with
2595 headers[field] = field
2596 if separator is not None:
2597 format_fields.append("%s")
2598 elif numfields.Matches(field):
2599 format_fields.append("%*s")
2601 format_fields.append("%-*s")
2603 if separator is None:
2604 mlens = [0 for name in fields]
2605 format_str = " ".join(format_fields)
2607 format_str = separator.replace("%", "%%").join(format_fields)
2612 for idx, val in enumerate(row):
2613 if unitfields.Matches(fields[idx]):
2616 except (TypeError, ValueError):
2619 val = row[idx] = utils.FormatUnit(val, units)
2620 val = row[idx] = str(val)
2621 if separator is None:
2622 mlens[idx] = max(mlens[idx], len(val))
2627 for idx, name in enumerate(fields):
2629 if separator is None:
2630 mlens[idx] = max(mlens[idx], len(hdr))
2631 args.append(mlens[idx])
2633 result.append(format_str % tuple(args))
2635 if separator is None:
2636 assert len(mlens) == len(fields)
2638 if fields and not numfields.Matches(fields[-1]):
2644 line = ["-" for _ in fields]
2645 for idx in range(len(fields)):
2646 if separator is None:
2647 args.append(mlens[idx])
2648 args.append(line[idx])
2649 result.append(format_str % tuple(args))
2654 def _FormatBool(value):
2655 """Formats a boolean value as a string.
2663 #: Default formatting for query results; (callback, align right)
2664 _DEFAULT_FORMAT_QUERY = {
2665 constants.QFT_TEXT: (str, False),
2666 constants.QFT_BOOL: (_FormatBool, False),
2667 constants.QFT_NUMBER: (str, True),
2668 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2669 constants.QFT_OTHER: (str, False),
2670 constants.QFT_UNKNOWN: (str, False),
2674 def _GetColumnFormatter(fdef, override, unit):
2675 """Returns formatting function for a field.
2677 @type fdef: L{objects.QueryFieldDefinition}
2678 @type override: dict
2679 @param override: Dictionary for overriding field formatting functions,
2680 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2682 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2683 @rtype: tuple; (callable, bool)
2684 @return: Returns the function to format a value (takes one parameter) and a
2685 boolean for aligning the value on the right-hand side
2688 fmt = override.get(fdef.name, None)
2692 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2694 if fdef.kind == constants.QFT_UNIT:
2695 # Can't keep this information in the static dictionary
2696 return (lambda value: utils.FormatUnit(value, unit), True)
2698 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2702 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2705 class _QueryColumnFormatter:
2706 """Callable class for formatting fields of a query.
2709 def __init__(self, fn, status_fn, verbose):
2710 """Initializes this class.
2713 @param fn: Formatting function
2714 @type status_fn: callable
2715 @param status_fn: Function to report fields' status
2716 @type verbose: boolean
2717 @param verbose: whether to use verbose field descriptions or not
2721 self._status_fn = status_fn
2722 self._verbose = verbose
2724 def __call__(self, data):
2725 """Returns a field's string representation.
2728 (status, value) = data
2731 self._status_fn(status)
2733 if status == constants.RS_NORMAL:
2734 return self._fn(value)
2736 assert value is None, \
2737 "Found value %r for abnormal status %s" % (value, status)
2739 return FormatResultError(status, self._verbose)
2742 def FormatResultError(status, verbose):
2743 """Formats result status other than L{constants.RS_NORMAL}.
2745 @param status: The result status
2746 @type verbose: boolean
2747 @param verbose: Whether to return the verbose text
2748 @return: Text of result status
2751 assert status != constants.RS_NORMAL, \
2752 "FormatResultError called with status equal to constants.RS_NORMAL"
2754 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2756 raise NotImplementedError("Unknown status %s" % status)
2763 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2764 header=False, verbose=False):
2765 """Formats data in L{objects.QueryResponse}.
2767 @type result: L{objects.QueryResponse}
2768 @param result: result of query operation
2770 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2771 see L{utils.text.FormatUnit}
2772 @type format_override: dict
2773 @param format_override: Dictionary for overriding field formatting functions,
2774 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2775 @type separator: string or None
2776 @param separator: String used to separate fields
2778 @param header: Whether to output header row
2779 @type verbose: boolean
2780 @param verbose: whether to use verbose field descriptions or not
2789 if format_override is None:
2790 format_override = {}
2792 stats = dict.fromkeys(constants.RS_ALL, 0)
2794 def _RecordStatus(status):
2799 for fdef in result.fields:
2800 assert fdef.title and fdef.name
2801 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2802 columns.append(TableColumn(fdef.title,
2803 _QueryColumnFormatter(fn, _RecordStatus,
2807 table = FormatTable(result.data, columns, header, separator)
2809 # Collect statistics
2810 assert len(stats) == len(constants.RS_ALL)
2811 assert compat.all(count >= 0 for count in stats.values())
2813 # Determine overall status. If there was no data, unknown fields must be
2814 # detected via the field definitions.
2815 if (stats[constants.RS_UNKNOWN] or
2816 (not result.data and _GetUnknownFields(result.fields))):
2818 elif compat.any(count > 0 for key, count in stats.items()
2819 if key != constants.RS_NORMAL):
2820 status = QR_INCOMPLETE
2824 return (status, table)
2827 def _GetUnknownFields(fdefs):
2828 """Returns list of unknown fields included in C{fdefs}.
2830 @type fdefs: list of L{objects.QueryFieldDefinition}
2833 return [fdef for fdef in fdefs
2834 if fdef.kind == constants.QFT_UNKNOWN]
2837 def _WarnUnknownFields(fdefs):
2838 """Prints a warning to stderr if a query included unknown fields.
2840 @type fdefs: list of L{objects.QueryFieldDefinition}
2843 unknown = _GetUnknownFields(fdefs)
2845 ToStderr("Warning: Queried for unknown fields %s",
2846 utils.CommaJoin(fdef.name for fdef in unknown))
2852 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2853 format_override=None, verbose=False, force_filter=False,
2854 namefield=None, qfilter=None):
2855 """Generic implementation for listing all items of a resource.
2857 @param resource: One of L{constants.QR_VIA_LUXI}
2858 @type fields: list of strings
2859 @param fields: List of fields to query for
2860 @type names: list of strings
2861 @param names: Names of items to query for
2862 @type unit: string or None
2863 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2864 None for automatic choice (human-readable for non-separator usage,
2865 otherwise megabytes); this is a one-letter string
2866 @type separator: string or None
2867 @param separator: String used to separate fields
2869 @param header: Whether to show header row
2870 @type force_filter: bool
2871 @param force_filter: Whether to always treat names as filter
2872 @type format_override: dict
2873 @param format_override: Dictionary for overriding field formatting functions,
2874 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2875 @type verbose: boolean
2876 @param verbose: whether to use verbose field descriptions or not
2877 @type namefield: string
2878 @param namefield: Name of field to use for simple filters (see
2879 L{qlang.MakeFilter} for details)
2880 @type qfilter: list or None
2881 @param qfilter: Query filter (in addition to names)
2887 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2890 qfilter = namefilter
2891 elif namefilter is not None:
2892 qfilter = [qlang.OP_AND, namefilter, qfilter]
2897 response = cl.Query(resource, fields, qfilter)
2899 found_unknown = _WarnUnknownFields(response.fields)
2901 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2903 format_override=format_override,
2909 assert ((found_unknown and status == QR_UNKNOWN) or
2910 (not found_unknown and status != QR_UNKNOWN))
2912 if status == QR_UNKNOWN:
2913 return constants.EXIT_UNKNOWN_FIELD
2915 # TODO: Should the list command fail if not all data could be collected?
2916 return constants.EXIT_SUCCESS
2919 def GenericListFields(resource, fields, separator, header, cl=None):
2920 """Generic implementation for listing fields for a resource.
2922 @param resource: One of L{constants.QR_VIA_LUXI}
2923 @type fields: list of strings
2924 @param fields: List of fields to query for
2925 @type separator: string or None
2926 @param separator: String used to separate fields
2928 @param header: Whether to show header row
2937 response = cl.QueryFields(resource, fields)
2939 found_unknown = _WarnUnknownFields(response.fields)
2942 TableColumn("Name", str, False),
2943 TableColumn("Title", str, False),
2944 TableColumn("Description", str, False),
2947 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2949 for line in FormatTable(rows, columns, header, separator):
2953 return constants.EXIT_UNKNOWN_FIELD
2955 return constants.EXIT_SUCCESS
2959 """Describes a column for L{FormatTable}.
2962 def __init__(self, title, fn, align_right):
2963 """Initializes this class.
2966 @param title: Column title
2968 @param fn: Formatting function
2969 @type align_right: bool
2970 @param align_right: Whether to align values on the right-hand side
2975 self.align_right = align_right
2978 def _GetColFormatString(width, align_right):
2979 """Returns the format string for a field.
2987 return "%%%s%ss" % (sign, width)
2990 def FormatTable(rows, columns, header, separator):
2991 """Formats data as a table.
2993 @type rows: list of lists
2994 @param rows: Row data, one list per row
2995 @type columns: list of L{TableColumn}
2996 @param columns: Column descriptions
2998 @param header: Whether to show header row
2999 @type separator: string or None
3000 @param separator: String used to separate columns
3004 data = [[col.title for col in columns]]
3005 colwidth = [len(col.title) for col in columns]
3008 colwidth = [0 for _ in columns]
3012 assert len(row) == len(columns)
3014 formatted = [col.format(value) for value, col in zip(row, columns)]
3016 if separator is None:
3017 # Update column widths
3018 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3019 # Modifying a list's items while iterating is fine
3020 colwidth[idx] = max(oldwidth, len(value))
3022 data.append(formatted)
3024 if separator is not None:
3025 # Return early if a separator is used
3026 return [separator.join(row) for row in data]
3028 if columns and not columns[-1].align_right:
3029 # Avoid unnecessary spaces at end of line
3032 # Build format string
3033 fmt = " ".join([_GetColFormatString(width, col.align_right)
3034 for col, width in zip(columns, colwidth)])
3036 return [fmt % tuple(row) for row in data]
3039 def FormatTimestamp(ts):
3040 """Formats a given timestamp.
3043 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3046 @return: a string with the formatted timestamp
3049 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3053 return utils.FormatTime(sec, usecs=usecs)
3056 def ParseTimespec(value):
3057 """Parse a time specification.
3059 The following suffixed will be recognized:
3067 Without any suffix, the value will be taken to be in seconds.
3072 raise errors.OpPrereqError("Empty time specification passed")
3080 if value[-1] not in suffix_map:
3083 except (TypeError, ValueError):
3084 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3086 multiplier = suffix_map[value[-1]]
3088 if not value: # no data left after stripping the suffix
3089 raise errors.OpPrereqError("Invalid time specification (only"
3092 value = int(value) * multiplier
3093 except (TypeError, ValueError):
3094 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3098 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3099 filter_master=False, nodegroup=None):
3100 """Returns the names of online nodes.
3102 This function will also log a warning on stderr with the names of
3105 @param nodes: if not empty, use only this subset of nodes (minus the
3107 @param cl: if not None, luxi client to use
3108 @type nowarn: boolean
3109 @param nowarn: by default, this function will output a note with the
3110 offline nodes that are skipped; if this parameter is True the
3111 note is not displayed
3112 @type secondary_ips: boolean
3113 @param secondary_ips: if True, return the secondary IPs instead of the
3114 names, useful for doing network traffic over the replication interface
3116 @type filter_master: boolean
3117 @param filter_master: if True, do not return the master node in the list
3118 (useful in coordination with secondary_ips where we cannot check our
3119 node name against the list)
3120 @type nodegroup: string
3121 @param nodegroup: If set, only return nodes in this node group
3130 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3132 if nodegroup is not None:
3133 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3134 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3137 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3140 if len(qfilter) > 1:
3141 final_filter = [qlang.OP_AND] + qfilter
3143 assert len(qfilter) == 1
3144 final_filter = qfilter[0]
3148 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3150 def _IsOffline(row):
3151 (_, (_, offline), _) = row
3155 ((_, name), _, _) = row
3159 (_, _, (_, sip)) = row
3162 (offline, online) = compat.partition(result.data, _IsOffline)
3164 if offline and not nowarn:
3165 ToStderr("Note: skipping offline node(s): %s" %
3166 utils.CommaJoin(map(_GetName, offline)))
3173 return map(fn, online)
3176 def _ToStream(stream, txt, *args):
3177 """Write a message to a stream, bypassing the logging system
3179 @type stream: file object
3180 @param stream: the file to which we should write
3182 @param txt: the message
3188 stream.write(txt % args)
3193 except IOError, err:
3194 if err.errno == errno.EPIPE:
3195 # our terminal went away, we'll exit
3196 sys.exit(constants.EXIT_FAILURE)
3201 def ToStdout(txt, *args):
3202 """Write a message to stdout only, bypassing the logging system
3204 This is just a wrapper over _ToStream.
3207 @param txt: the message
3210 _ToStream(sys.stdout, txt, *args)
3213 def ToStderr(txt, *args):
3214 """Write a message to stderr only, bypassing the logging system
3216 This is just a wrapper over _ToStream.
3219 @param txt: the message
3222 _ToStream(sys.stderr, txt, *args)
3225 class JobExecutor(object):
3226 """Class which manages the submission and execution of multiple jobs.
3228 Note that instances of this class should not be reused between
3232 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3237 self.verbose = verbose
3240 self.feedback_fn = feedback_fn
3241 self._counter = itertools.count()
3244 def _IfName(name, fmt):
3245 """Helper function for formatting name.
3253 def QueueJob(self, name, *ops):
3254 """Record a job for later submit.
3257 @param name: a description of the job, will be used in WaitJobSet
3260 SetGenericOpcodeOpts(ops, self.opts)
3261 self.queue.append((self._counter.next(), name, ops))
3263 def AddJobId(self, name, status, job_id):
3264 """Adds a job ID to the internal queue.
3267 self.jobs.append((self._counter.next(), status, job_id, name))
3269 def SubmitPending(self, each=False):
3270 """Submit all pending jobs.
3275 for (_, _, ops) in self.queue:
3276 # SubmitJob will remove the success status, but raise an exception if
3277 # the submission fails, so we'll notice that anyway.
3278 results.append([True, self.cl.SubmitJob(ops)[0]])
3280 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3281 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3282 self.jobs.append((idx, status, data, name))
3284 def _ChooseJob(self):
3285 """Choose a non-waiting/queued job to poll next.
3288 assert self.jobs, "_ChooseJob called with empty job list"
3290 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3294 for job_data, status in zip(self.jobs, result):
3295 if (isinstance(status, list) and status and
3296 status[0] in (constants.JOB_STATUS_QUEUED,
3297 constants.JOB_STATUS_WAITING,
3298 constants.JOB_STATUS_CANCELING)):
3299 # job is still present and waiting
3301 # good candidate found (either running job or lost job)
3302 self.jobs.remove(job_data)
3306 return self.jobs.pop(0)
3308 def GetResults(self):
3309 """Wait for and return the results of all jobs.
3312 @return: list of tuples (success, job results), in the same order
3313 as the submitted jobs; if a job has failed, instead of the result
3314 there will be the error message
3318 self.SubmitPending()
3321 ok_jobs = [row[2] for row in self.jobs if row[1]]
3323 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3325 # first, remove any non-submitted jobs
3326 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3327 for idx, _, jid, name in failures:
3328 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3329 results.append((idx, False, jid))
3332 (idx, _, jid, name) = self._ChooseJob()
3333 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3335 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3337 except errors.JobLost, err:
3338 _, job_result = FormatError(err)
3339 ToStderr("Job %s%s has been archived, cannot check its result",
3340 jid, self._IfName(name, " for %s"))
3342 except (errors.GenericError, luxi.ProtocolError), err:
3343 _, job_result = FormatError(err)
3345 # the error message will always be shown, verbose or not
3346 ToStderr("Job %s%s has failed: %s",
3347 jid, self._IfName(name, " for %s"), job_result)
3349 results.append((idx, success, job_result))
3351 # sort based on the index, then drop it
3353 results = [i[1:] for i in results]
3357 def WaitOrShow(self, wait):
3358 """Wait for job results or only print the job IDs.
3361 @param wait: whether to wait or not
3365 return self.GetResults()
3368 self.SubmitPending()
3369 for _, status, result, name in self.jobs:
3371 ToStdout("%s: %s", result, name)
3373 ToStderr("Failure for %s: %s", name, result)
3374 return [row[1:3] for row in self.jobs]
3377 def FormatParameterDict(buf, param_dict, actual, level=1):
3378 """Formats a parameter dictionary.
3380 @type buf: L{StringIO}
3381 @param buf: the buffer into which to write
3382 @type param_dict: dict
3383 @param param_dict: the own parameters
3385 @param actual: the current parameter set (including defaults)
3386 @param level: Level of indent
3389 indent = " " * level
3391 for key in sorted(actual):
3393 buf.write("%s- %s:" % (indent, key))
3395 if isinstance(data, dict) and data:
3397 FormatParameterDict(buf, param_dict.get(key, {}), data,
3400 val = param_dict.get(key, "default (%s)" % data)
3401 buf.write(" %s\n" % val)
3404 def ConfirmOperation(names, list_type, text, extra=""):
3405 """Ask the user to confirm an operation on a list of list_type.
3407 This function is used to request confirmation for doing an operation
3408 on a given list of list_type.
3411 @param names: the list of names that we display when
3412 we ask for confirmation
3413 @type list_type: str
3414 @param list_type: Human readable name for elements in the list (e.g. nodes)
3416 @param text: the operation that the user should confirm
3418 @return: True or False depending on user's confirmation.
3422 msg = ("The %s will operate on %d %s.\n%s"
3423 "Do you want to continue?" % (text, count, list_type, extra))
3424 affected = (("\nAffected %s:\n" % list_type) +
3425 "\n".join([" %s" % name for name in names]))
3427 choices = [("y", True, "Yes, execute the %s" % text),
3428 ("n", False, "No, abort the %s" % text)]
3431 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3434 question = msg + affected
3436 choice = AskUser(question, choices)
3439 choice = AskUser(msg + affected, choices)
3443 def _MaybeParseUnit(elements):
3444 """Parses and returns an array of potential values with units.
3448 for k, v in elements.items():
3449 if v == constants.VALUE_DEFAULT:
3452 parsed[k] = utils.ParseUnit(v)
3456 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3457 ispecs_cpu_count=None,
3458 ispecs_disk_count=None,
3459 ispecs_disk_size=None,
3460 ispecs_nic_count=None,
3461 ipolicy_disk_templates=None,
3462 ipolicy_vcpu_ratio=None,
3463 ipolicy_spindle_ratio=None,
3464 group_ipolicy=False,
3465 allowed_values=None,
3467 """Creation of instance policy based on command line options.
3469 @param fill_all: whether for cluster policies we should ensure that
3470 all values are filled
3476 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3477 if ispecs_disk_size:
3478 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3479 except (TypeError, ValueError, errors.UnitParseError), err:
3480 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3482 (ispecs_disk_size, ispecs_mem_size, err),
3485 # prepare ipolicy dict
3486 ipolicy_transposed = {
3487 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3488 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3489 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3490 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3491 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3494 # first, check that the values given are correct
3496 forced_type = TISPECS_GROUP_TYPES
3498 forced_type = TISPECS_CLUSTER_TYPES
3500 for specs in ipolicy_transposed.values():
3501 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3504 ipolicy_out = objects.MakeEmptyIPolicy()
3505 for name, specs in ipolicy_transposed.iteritems():
3506 assert name in constants.ISPECS_PARAMETERS
3507 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3508 ipolicy_out[key][name] = val
3510 # no filldict for non-dicts
3511 if not group_ipolicy and fill_all:
3512 if ipolicy_disk_templates is None:
3513 ipolicy_disk_templates = constants.DISK_TEMPLATES
3514 if ipolicy_vcpu_ratio is None:
3515 ipolicy_vcpu_ratio = \
3516 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3517 if ipolicy_spindle_ratio is None:
3518 ipolicy_spindle_ratio = \
3519 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3520 if ipolicy_disk_templates is not None:
3521 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3522 if ipolicy_vcpu_ratio is not None:
3523 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3524 if ipolicy_spindle_ratio is not None:
3525 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3527 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)