4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
67 "CLUSTER_DOMAIN_SECRET_OPT",
85 "FILESTORE_DRIVER_OPT",
91 "GLOBAL_SHARED_FILEDIR_OPT",
96 "DEFAULT_IALLOCATOR_OPT",
97 "IDENTIFY_DEFAULTS_OPT",
100 "IGNORE_FAILURES_OPT",
101 "IGNORE_OFFLINE_OPT",
102 "IGNORE_REMOVE_FAILURES_OPT",
103 "IGNORE_SECONDARIES_OPT",
107 "MAINTAIN_NODE_HEALTH_OPT",
109 "MASTER_NETMASK_OPT",
111 "MIGRATION_MODE_OPT",
113 "NEW_CLUSTER_CERT_OPT",
114 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115 "NEW_CONFD_HMAC_KEY_OPT",
118 "NEW_SPICE_CERT_OPT",
120 "NODE_FORCE_JOIN_OPT",
122 "NODE_PLACEMENT_OPT",
126 "NODRBD_STORAGE_OPT",
132 "NOMODIFY_ETCHOSTS_OPT",
133 "NOMODIFY_SSH_SETUP_OPT",
137 "NORUNTIME_CHGS_OPT",
140 "NOSSH_KEYCHECK_OPT",
154 "PREALLOC_WIPE_DISKS_OPT",
155 "PRIMARY_IP_VERSION_OPT",
161 "REMOVE_INSTANCE_OPT",
167 "SECONDARY_ONLY_OPT",
171 "SHUTDOWN_TIMEOUT_OPT",
173 "SPECS_CPU_COUNT_OPT",
174 "SPECS_DISK_COUNT_OPT",
175 "SPECS_DISK_SIZE_OPT",
176 "SPECS_MEM_SIZE_OPT",
177 "SPECS_NIC_COUNT_OPT",
178 "IPOLICY_DISK_TEMPLATES",
179 "IPOLICY_VCPU_RATIO",
185 "STARTUP_PAUSED_OPT",
194 "USE_EXTERNAL_MIP_SCRIPT",
201 "IGNORE_IPOLICY_OPT",
202 "INSTANCE_POLICY_OPTS",
203 # Generic functions for CLI programs
205 "CreateIPolicyFromOpts",
207 "GenericInstanceCreate",
213 "JobSubmittedException",
215 "RunWhileClusterStopped",
219 # Formatting functions
220 "ToStderr", "ToStdout",
223 "FormatParameterDict",
232 # command line options support infrastructure
233 "ARGS_MANY_INSTANCES",
252 "OPT_COMPL_INST_ADD_NODES",
253 "OPT_COMPL_MANY_NODES",
254 "OPT_COMPL_ONE_IALLOCATOR",
255 "OPT_COMPL_ONE_INSTANCE",
256 "OPT_COMPL_ONE_NODE",
257 "OPT_COMPL_ONE_NODEGROUP",
263 "COMMON_CREATE_OPTS",
269 #: Priorities (sorted)
271 ("low", constants.OP_PRIO_LOW),
272 ("normal", constants.OP_PRIO_NORMAL),
273 ("high", constants.OP_PRIO_HIGH),
276 #: Priority dictionary for easier lookup
277 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
278 # we migrate to Python 2.6
279 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
281 # Query result status for clients
284 QR_INCOMPLETE) = range(3)
286 #: Maximum batch size for ChooseJob
290 # constants used to create InstancePolicy dictionary
291 TISPECS_GROUP_TYPES = {
292 constants.ISPECS_MIN: constants.VTYPE_INT,
293 constants.ISPECS_MAX: constants.VTYPE_INT,
296 TISPECS_CLUSTER_TYPES = {
297 constants.ISPECS_MIN: constants.VTYPE_INT,
298 constants.ISPECS_MAX: constants.VTYPE_INT,
299 constants.ISPECS_STD: constants.VTYPE_INT,
304 def __init__(self, min=0, max=None): # pylint: disable=W0622
309 return ("<%s min=%s max=%s>" %
310 (self.__class__.__name__, self.min, self.max))
313 class ArgSuggest(_Argument):
314 """Suggesting argument.
316 Value can be any of the ones passed to the constructor.
319 # pylint: disable=W0622
320 def __init__(self, min=0, max=None, choices=None):
321 _Argument.__init__(self, min=min, max=max)
322 self.choices = choices
325 return ("<%s min=%s max=%s choices=%r>" %
326 (self.__class__.__name__, self.min, self.max, self.choices))
329 class ArgChoice(ArgSuggest):
332 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
333 but value must be one of the choices.
338 class ArgUnknown(_Argument):
339 """Unknown argument to program (e.g. determined at runtime).
344 class ArgInstance(_Argument):
345 """Instances argument.
350 class ArgNode(_Argument):
356 class ArgGroup(_Argument):
357 """Node group argument.
362 class ArgJobId(_Argument):
368 class ArgFile(_Argument):
369 """File path argument.
374 class ArgCommand(_Argument):
380 class ArgHost(_Argument):
386 class ArgOs(_Argument):
393 ARGS_MANY_INSTANCES = [ArgInstance()]
394 ARGS_MANY_NODES = [ArgNode()]
395 ARGS_MANY_GROUPS = [ArgGroup()]
396 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
397 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
399 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
400 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403 def _ExtractTagsObject(opts, args):
404 """Extract the tag type object.
406 Note that this function will modify its args parameter.
409 if not hasattr(opts, "tag_type"):
410 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
412 if kind == constants.TAG_CLUSTER:
414 elif kind in (constants.TAG_NODEGROUP,
416 constants.TAG_INSTANCE):
418 raise errors.OpPrereqError("no arguments passed to the command")
422 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426 def _ExtendTags(opts, args):
427 """Extend the args if a source file has been given.
429 This function will extend the tags with the contents of the file
430 passed in the 'tags_source' attribute of the opts parameter. A file
431 named '-' will be replaced by stdin.
434 fname = opts.tags_source
440 new_fh = open(fname, "r")
443 # we don't use the nice 'new_data = [line.strip() for line in fh]'
444 # because of python bug 1633941
446 line = new_fh.readline()
449 new_data.append(line.strip())
452 args.extend(new_data)
455 def ListTags(opts, args):
456 """List the tags on a given object.
458 This is a generic implementation that knows how to deal with all
459 three cases of tag objects (cluster, node, instance). The opts
460 argument is expected to contain a tag_type field denoting what
461 object type we work on.
464 kind, name = _ExtractTagsObject(opts, args)
466 result = cl.QueryTags(kind, name)
467 result = list(result)
473 def AddTags(opts, args):
474 """Add tags on a given object.
476 This is a generic implementation that knows how to deal with all
477 three cases of tag objects (cluster, node, instance). The opts
478 argument is expected to contain a tag_type field denoting what
479 object type we work on.
482 kind, name = _ExtractTagsObject(opts, args)
483 _ExtendTags(opts, args)
485 raise errors.OpPrereqError("No tags to be added")
486 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
487 SubmitOrSend(op, opts)
490 def RemoveTags(opts, args):
491 """Remove tags from a given object.
493 This is a generic implementation that knows how to deal with all
494 three cases of tag objects (cluster, node, instance). The opts
495 argument is expected to contain a tag_type field denoting what
496 object type we work on.
499 kind, name = _ExtractTagsObject(opts, args)
500 _ExtendTags(opts, args)
502 raise errors.OpPrereqError("No tags to be removed")
503 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
504 SubmitOrSend(op, opts)
507 def check_unit(option, opt, value): # pylint: disable=W0613
508 """OptParsers custom converter for units.
512 return utils.ParseUnit(value)
513 except errors.UnitParseError, err:
514 raise OptionValueError("option %s: %s" % (opt, err))
517 def _SplitKeyVal(opt, data):
518 """Convert a KeyVal string into a dict.
520 This function will convert a key=val[,...] string into a dict. Empty
521 values will be converted specially: keys which have the prefix 'no_'
522 will have the value=False and the prefix stripped, the others will
526 @param opt: a string holding the option name for which we process the
527 data, used in building error messages
529 @param data: a string of the format key=val,key=val,...
531 @return: {key=val, key=val}
532 @raises errors.ParameterError: if there are duplicate keys
537 for elem in utils.UnescapeAndSplit(data, sep=","):
539 key, val = elem.split("=", 1)
541 if elem.startswith(NO_PREFIX):
542 key, val = elem[len(NO_PREFIX):], False
543 elif elem.startswith(UN_PREFIX):
544 key, val = elem[len(UN_PREFIX):], None
546 key, val = elem, True
548 raise errors.ParameterError("Duplicate key '%s' in option %s" %
554 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
555 """Custom parser for ident:key=val,key=val options.
557 This will store the parsed values as a tuple (ident, {key: val}). As such,
558 multiple uses of this option via action=append is possible.
562 ident, rest = value, ""
564 ident, rest = value.split(":", 1)
566 if ident.startswith(NO_PREFIX):
568 msg = "Cannot pass options when removing parameter groups: %s" % value
569 raise errors.ParameterError(msg)
570 retval = (ident[len(NO_PREFIX):], False)
571 elif (ident.startswith(UN_PREFIX) and
572 (len(ident) <= len(UN_PREFIX) or
573 not ident[len(UN_PREFIX)][0].isdigit())):
575 msg = "Cannot pass options when removing parameter groups: %s" % value
576 raise errors.ParameterError(msg)
577 retval = (ident[len(UN_PREFIX):], None)
579 kv_dict = _SplitKeyVal(opt, rest)
580 retval = (ident, kv_dict)
584 def check_key_val(option, opt, value): # pylint: disable=W0613
585 """Custom parser class for key=val,key=val options.
587 This will store the parsed values as a dict {key: val}.
590 return _SplitKeyVal(opt, value)
593 def check_bool(option, opt, value): # pylint: disable=W0613
594 """Custom parser for yes/no options.
596 This will store the parsed value as either True or False.
599 value = value.lower()
600 if value == constants.VALUE_FALSE or value == "no":
602 elif value == constants.VALUE_TRUE or value == "yes":
605 raise errors.ParameterError("Invalid boolean value '%s'" % value)
608 def check_list(option, opt, value): # pylint: disable=W0613
609 """Custom parser for comma-separated lists.
612 # we have to make this explicit check since "".split(",") is [""],
613 # not an empty list :(
617 return utils.UnescapeAndSplit(value)
620 # completion_suggestion is normally a list. Using numeric values not evaluating
621 # to False for dynamic completion.
622 (OPT_COMPL_MANY_NODES,
624 OPT_COMPL_ONE_INSTANCE,
626 OPT_COMPL_ONE_IALLOCATOR,
627 OPT_COMPL_INST_ADD_NODES,
628 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
630 OPT_COMPL_ALL = frozenset([
631 OPT_COMPL_MANY_NODES,
633 OPT_COMPL_ONE_INSTANCE,
635 OPT_COMPL_ONE_IALLOCATOR,
636 OPT_COMPL_INST_ADD_NODES,
637 OPT_COMPL_ONE_NODEGROUP,
641 class CliOption(Option):
642 """Custom option class for optparse.
645 ATTRS = Option.ATTRS + [
646 "completion_suggest",
648 TYPES = Option.TYPES + (
655 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
656 TYPE_CHECKER["identkeyval"] = check_ident_key_val
657 TYPE_CHECKER["keyval"] = check_key_val
658 TYPE_CHECKER["unit"] = check_unit
659 TYPE_CHECKER["bool"] = check_bool
660 TYPE_CHECKER["list"] = check_list
663 # optparse.py sets make_option, so we do it for our own option class, too
664 cli_option = CliOption
669 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
670 help="Increase debugging level")
672 NOHDR_OPT = cli_option("--no-headers", default=False,
673 action="store_true", dest="no_headers",
674 help="Don't display column headers")
676 SEP_OPT = cli_option("--separator", default=None,
677 action="store", dest="separator",
678 help=("Separator between output fields"
679 " (defaults to one space)"))
681 USEUNITS_OPT = cli_option("--units", default=None,
682 dest="units", choices=("h", "m", "g", "t"),
683 help="Specify units for output (one of h/m/g/t)")
685 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
686 type="string", metavar="FIELDS",
687 help="Comma separated list of output fields")
689 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
690 default=False, help="Force the operation")
692 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
693 default=False, help="Do not require confirmation")
695 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
696 action="store_true", default=False,
697 help=("Ignore offline nodes and do as much"
700 TAG_ADD_OPT = cli_option("--tags", dest="tags",
701 default=None, help="Comma-separated list of instance"
704 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
705 default=None, help="File with tag names")
707 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
708 default=False, action="store_true",
709 help=("Submit the job and return the job ID, but"
710 " don't wait for the job to finish"))
712 SYNC_OPT = cli_option("--sync", dest="do_locking",
713 default=False, action="store_true",
714 help=("Grab locks while doing the queries"
715 " in order to ensure more consistent results"))
717 DRY_RUN_OPT = cli_option("--dry-run", default=False,
719 help=("Do not execute the operation, just run the"
720 " check steps and verify it it could be"
723 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
725 help="Increase the verbosity of the operation")
727 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
728 action="store_true", dest="simulate_errors",
729 help="Debugging option that makes the operation"
730 " treat most runtime checks as failed")
732 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
733 default=True, action="store_false",
734 help="Don't wait for sync (DANGEROUS!)")
736 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
737 action="store_true", default=False,
738 help="Enable offline instance")
740 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
741 action="store_true", default=False,
742 help="Disable down instance")
744 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
745 help=("Custom disk setup (%s)" %
746 utils.CommaJoin(constants.DISK_TEMPLATES)),
747 default=None, metavar="TEMPL",
748 choices=list(constants.DISK_TEMPLATES))
750 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
751 help="Do not create any network cards for"
754 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
755 help="Relative path under default cluster-wide"
756 " file storage dir to store file-based disks",
757 default=None, metavar="<DIR>")
759 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
760 help="Driver to use for image files",
761 default="loop", metavar="<DRIVER>",
762 choices=list(constants.FILE_DRIVER))
764 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
765 help="Select nodes for the instance automatically"
766 " using the <NAME> iallocator plugin",
767 default=None, type="string",
768 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
770 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
772 help="Set the default instance allocator plugin",
773 default=None, type="string",
774 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
776 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
778 completion_suggest=OPT_COMPL_ONE_OS)
780 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
781 type="keyval", default={},
782 help="OS parameters")
784 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
785 action="store_true", default=False,
786 help="Force an unknown variant")
788 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
789 action="store_true", default=False,
790 help="Do not install the OS (will"
793 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
794 dest="allow_runtime_chgs",
795 default=True, action="store_false",
796 help="Don't allow runtime changes")
798 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
799 type="keyval", default={},
800 help="Backend parameters")
802 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
803 default={}, dest="hvparams",
804 help="Hypervisor parameters")
806 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
807 help="Disk template parameters, in the format"
808 " template:option=value,option=value,...",
809 type="identkeyval", action="append", default=[])
811 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
812 type="keyval", default={},
813 help="Memory size specs: list of key=value,"
814 " where key is one of min, max, std"
815 " (in MB or using a unit)")
817 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
818 type="keyval", default={},
819 help="CPU count specs: list of key=value,"
820 " where key is one of min, max, std")
822 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
823 dest="ispecs_disk_count",
824 type="keyval", default={},
825 help="Disk count specs: list of key=value,"
826 " where key is one of min, max, std")
828 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
829 type="keyval", default={},
830 help="Disk size specs: list of key=value,"
831 " where key is one of min, max, std"
832 " (in MB or using a unit)")
834 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
835 type="keyval", default={},
836 help="NIC count specs: list of key=value,"
837 " where key is one of min, max, std")
839 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
840 dest="ipolicy_disk_templates",
841 type="list", default=None,
842 help="Comma-separated list of"
843 " enabled disk templates")
845 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
846 dest="ipolicy_vcpu_ratio",
847 type="float", default=None,
848 help="The maximum allowed vcpu-to-cpu ratio")
850 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
851 help="Hypervisor and hypervisor options, in the"
852 " format hypervisor:option=value,option=value,...",
853 default=None, type="identkeyval")
855 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
856 help="Hypervisor and hypervisor options, in the"
857 " format hypervisor:option=value,option=value,...",
858 default=[], action="append", type="identkeyval")
860 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
861 action="store_false",
862 help="Don't check that the instance's IP"
865 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
866 default=True, action="store_false",
867 help="Don't check that the instance's name"
870 NET_OPT = cli_option("--net",
871 help="NIC parameters", default=[],
872 dest="nics", action="append", type="identkeyval")
874 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
875 dest="disks", action="append", type="identkeyval")
877 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
878 help="Comma-separated list of disks"
879 " indices to act on (e.g. 0,2) (optional,"
880 " defaults to all disks)")
882 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
883 help="Enforces a single-disk configuration using the"
884 " given disk size, in MiB unless a suffix is used",
885 default=None, type="unit", metavar="<size>")
887 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
888 dest="ignore_consistency",
889 action="store_true", default=False,
890 help="Ignore the consistency of the disks on"
893 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
894 dest="allow_failover",
895 action="store_true", default=False,
896 help="If migration is not possible fallback to"
899 NONLIVE_OPT = cli_option("--non-live", dest="live",
900 default=True, action="store_false",
901 help="Do a non-live migration (this usually means"
902 " freeze the instance, save the state, transfer and"
903 " only then resume running on the secondary node)")
905 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
907 choices=list(constants.HT_MIGRATION_MODES),
908 help="Override default migration mode (choose"
909 " either live or non-live")
911 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
912 help="Target node and optional secondary node",
913 metavar="<pnode>[:<snode>]",
914 completion_suggest=OPT_COMPL_INST_ADD_NODES)
916 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
917 action="append", metavar="<node>",
918 help="Use only this node (can be used multiple"
919 " times, if not given defaults to all nodes)",
920 completion_suggest=OPT_COMPL_ONE_NODE)
922 NODEGROUP_OPT_NAME = "--node-group"
923 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
925 help="Node group (name or uuid)",
926 metavar="<nodegroup>",
927 default=None, type="string",
928 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
930 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
932 completion_suggest=OPT_COMPL_ONE_NODE)
934 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
935 action="store_false",
936 help="Don't start the instance after creation")
938 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
939 action="store_true", default=False,
940 help="Show command instead of executing it")
942 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
943 default=False, action="store_true",
944 help="Instead of performing the migration, try to"
945 " recover from a failed cleanup. This is safe"
946 " to run even if the instance is healthy, but it"
947 " will create extra replication traffic and "
948 " disrupt briefly the replication (like during the"
951 STATIC_OPT = cli_option("-s", "--static", dest="static",
952 action="store_true", default=False,
953 help="Only show configuration data, not runtime data")
955 ALL_OPT = cli_option("--all", dest="show_all",
956 default=False, action="store_true",
957 help="Show info on all instances on the cluster."
958 " This can take a long time to run, use wisely")
960 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
961 action="store_true", default=False,
962 help="Interactive OS reinstall, lists available"
963 " OS templates for selection")
965 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
966 action="store_true", default=False,
967 help="Remove the instance from the cluster"
968 " configuration even if there are failures"
969 " during the removal process")
971 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
972 dest="ignore_remove_failures",
973 action="store_true", default=False,
974 help="Remove the instance from the"
975 " cluster configuration even if there"
976 " are failures during the removal"
979 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
980 action="store_true", default=False,
981 help="Remove the instance from the cluster")
983 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
984 help="Specifies the new node for the instance",
985 metavar="NODE", default=None,
986 completion_suggest=OPT_COMPL_ONE_NODE)
988 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
989 help="Specifies the new secondary node",
990 metavar="NODE", default=None,
991 completion_suggest=OPT_COMPL_ONE_NODE)
993 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
994 default=False, action="store_true",
995 help="Replace the disk(s) on the primary"
996 " node (applies only to internally mirrored"
997 " disk templates, e.g. %s)" %
998 utils.CommaJoin(constants.DTS_INT_MIRROR))
1000 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1001 default=False, action="store_true",
1002 help="Replace the disk(s) on the secondary"
1003 " node (applies only to internally mirrored"
1004 " disk templates, e.g. %s)" %
1005 utils.CommaJoin(constants.DTS_INT_MIRROR))
1007 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1008 default=False, action="store_true",
1009 help="Lock all nodes and auto-promote as needed"
1012 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1013 default=False, action="store_true",
1014 help="Automatically replace faulty disks"
1015 " (applies only to internally mirrored"
1016 " disk templates, e.g. %s)" %
1017 utils.CommaJoin(constants.DTS_INT_MIRROR))
1019 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1020 default=False, action="store_true",
1021 help="Ignore current recorded size"
1022 " (useful for forcing activation when"
1023 " the recorded size is wrong)")
1025 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1027 completion_suggest=OPT_COMPL_ONE_NODE)
1029 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1032 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1033 help="Specify the secondary ip for the node",
1034 metavar="ADDRESS", default=None)
1036 READD_OPT = cli_option("--readd", dest="readd",
1037 default=False, action="store_true",
1038 help="Readd old node after replacing it")
1040 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1041 default=True, action="store_false",
1042 help="Disable SSH key fingerprint checking")
1044 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1045 default=False, action="store_true",
1046 help="Force the joining of a node")
1048 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1049 type="bool", default=None, metavar=_YORNO,
1050 help="Set the master_candidate flag on the node")
1052 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1053 type="bool", default=None,
1054 help=("Set the offline flag on the node"
1055 " (cluster does not communicate with offline"
1058 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1059 type="bool", default=None,
1060 help=("Set the drained flag on the node"
1061 " (excluded from allocation operations)"))
1063 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1064 type="bool", default=None, metavar=_YORNO,
1065 help="Set the master_capable flag on the node")
1067 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1068 type="bool", default=None, metavar=_YORNO,
1069 help="Set the vm_capable flag on the node")
1071 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1072 type="bool", default=None, metavar=_YORNO,
1073 help="Set the allocatable flag on a volume")
1075 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1076 help="Disable support for lvm based instances"
1078 action="store_false", default=True)
1080 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1081 dest="enabled_hypervisors",
1082 help="Comma-separated list of hypervisors",
1083 type="string", default=None)
1085 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1086 type="keyval", default={},
1087 help="NIC parameters")
1089 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1090 dest="candidate_pool_size", type="int",
1091 help="Set the candidate pool size")
1093 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1094 help=("Enables LVM and specifies the volume group"
1095 " name (cluster-wide) for disk allocation"
1096 " [%s]" % constants.DEFAULT_VG),
1097 metavar="VG", default=None)
1099 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1100 help="Destroy cluster", action="store_true")
1102 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1103 help="Skip node agreement check (dangerous)",
1104 action="store_true", default=False)
1106 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1107 help="Specify the mac prefix for the instance IP"
1108 " addresses, in the format XX:XX:XX",
1112 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1113 help="Specify the node interface (cluster-wide)"
1114 " on which the master IP address will be added"
1115 " (cluster init default: %s)" %
1116 constants.DEFAULT_BRIDGE,
1120 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1121 help="Specify the netmask of the master IP",
1125 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1126 dest="use_external_mip_script",
1127 help="Specify whether to run a user-provided"
1128 " script for the master IP address turnup and"
1129 " turndown operations",
1130 type="bool", metavar=_YORNO, default=None)
1132 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1133 help="Specify the default directory (cluster-"
1134 "wide) for storing the file-based disks [%s]" %
1135 constants.DEFAULT_FILE_STORAGE_DIR,
1137 default=constants.DEFAULT_FILE_STORAGE_DIR)
1139 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1140 dest="shared_file_storage_dir",
1141 help="Specify the default directory (cluster-"
1142 "wide) for storing the shared file-based"
1144 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1145 metavar="SHAREDDIR",
1146 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1148 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1149 help="Don't modify /etc/hosts",
1150 action="store_false", default=True)
1152 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1153 help="Don't initialize SSH keys",
1154 action="store_false", default=True)
1156 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1157 help="Enable parseable error messages",
1158 action="store_true", default=False)
1160 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1161 help="Skip N+1 memory redundancy tests",
1162 action="store_true", default=False)
1164 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1165 help="Type of reboot: soft/hard/full",
1166 default=constants.INSTANCE_REBOOT_HARD,
1168 choices=list(constants.REBOOT_TYPES))
1170 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1171 dest="ignore_secondaries",
1172 default=False, action="store_true",
1173 help="Ignore errors from secondaries")
1175 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1176 action="store_false", default=True,
1177 help="Don't shutdown the instance (unsafe)")
1179 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1180 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1181 help="Maximum time to wait")
1183 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1184 dest="shutdown_timeout", type="int",
1185 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1186 help="Maximum time to wait for instance shutdown")
1188 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1190 help=("Number of seconds between repetions of the"
1193 EARLY_RELEASE_OPT = cli_option("--early-release",
1194 dest="early_release", default=False,
1195 action="store_true",
1196 help="Release the locks on the secondary"
1199 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1200 dest="new_cluster_cert",
1201 default=False, action="store_true",
1202 help="Generate a new cluster certificate")
1204 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1206 help="File containing new RAPI certificate")
1208 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1209 default=None, action="store_true",
1210 help=("Generate a new self-signed RAPI"
1213 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1215 help="File containing new SPICE certificate")
1217 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1219 help="File containing the certificate of the CA"
1220 " which signed the SPICE certificate")
1222 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1223 dest="new_spice_cert", default=None,
1224 action="store_true",
1225 help=("Generate a new self-signed SPICE"
1228 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1229 dest="new_confd_hmac_key",
1230 default=False, action="store_true",
1231 help=("Create a new HMAC key for %s" %
1234 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1235 dest="cluster_domain_secret",
1237 help=("Load new new cluster domain"
1238 " secret from file"))
1240 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1241 dest="new_cluster_domain_secret",
1242 default=False, action="store_true",
1243 help=("Create a new cluster domain"
1246 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1247 dest="use_replication_network",
1248 help="Whether to use the replication network"
1249 " for talking to the nodes",
1250 action="store_true", default=False)
1252 MAINTAIN_NODE_HEALTH_OPT = \
1253 cli_option("--maintain-node-health", dest="maintain_node_health",
1254 metavar=_YORNO, default=None, type="bool",
1255 help="Configure the cluster to automatically maintain node"
1256 " health, by shutting down unknown instances, shutting down"
1257 " unknown DRBD devices, etc.")
1259 IDENTIFY_DEFAULTS_OPT = \
1260 cli_option("--identify-defaults", dest="identify_defaults",
1261 default=False, action="store_true",
1262 help="Identify which saved instance parameters are equal to"
1263 " the current cluster defaults and set them as such, instead"
1264 " of marking them as overridden")
1266 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1267 action="store", dest="uid_pool",
1268 help=("A list of user-ids or user-id"
1269 " ranges separated by commas"))
1271 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1272 action="store", dest="add_uids",
1273 help=("A list of user-ids or user-id"
1274 " ranges separated by commas, to be"
1275 " added to the user-id pool"))
1277 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1278 action="store", dest="remove_uids",
1279 help=("A list of user-ids or user-id"
1280 " ranges separated by commas, to be"
1281 " removed from the user-id pool"))
1283 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1284 action="store", dest="reserved_lvs",
1285 help=("A comma-separated list of reserved"
1286 " logical volumes names, that will be"
1287 " ignored by cluster verify"))
1289 ROMAN_OPT = cli_option("--roman",
1290 dest="roman_integers", default=False,
1291 action="store_true",
1292 help="Use roman numbers for positive integers")
1294 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1295 action="store", default=None,
1296 help="Specifies usermode helper for DRBD")
1298 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1299 action="store_false", default=True,
1300 help="Disable support for DRBD")
1302 PRIMARY_IP_VERSION_OPT = \
1303 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1304 action="store", dest="primary_ip_version",
1305 metavar="%d|%d" % (constants.IP4_VERSION,
1306 constants.IP6_VERSION),
1307 help="Cluster-wide IP version for primary IP")
1309 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1310 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1311 choices=_PRIONAME_TO_VALUE.keys(),
1312 help="Priority for opcode processing")
1314 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1315 type="bool", default=None, metavar=_YORNO,
1316 help="Sets the hidden flag on the OS")
1318 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1319 type="bool", default=None, metavar=_YORNO,
1320 help="Sets the blacklisted flag on the OS")
1322 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1323 type="bool", metavar=_YORNO,
1324 dest="prealloc_wipe_disks",
1325 help=("Wipe disks prior to instance"
1328 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1329 type="keyval", default=None,
1330 help="Node parameters")
1332 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1333 action="store", metavar="POLICY", default=None,
1334 help="Allocation policy for the node group")
1336 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1337 type="bool", metavar=_YORNO,
1338 dest="node_powered",
1339 help="Specify if the SoR for node is powered")
1341 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1342 default=constants.OOB_TIMEOUT,
1343 help="Maximum time to wait for out-of-band helper")
1345 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1346 default=constants.OOB_POWER_DELAY,
1347 help="Time in seconds to wait between power-ons")
1349 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1350 action="store_true", default=False,
1351 help=("Whether command argument should be treated"
1354 NO_REMEMBER_OPT = cli_option("--no-remember",
1356 action="store_true", default=False,
1357 help="Perform but do not record the change"
1358 " in the configuration")
1360 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1361 default=False, action="store_true",
1362 help="Evacuate primary instances only")
1364 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1365 default=False, action="store_true",
1366 help="Evacuate secondary instances only"
1367 " (applies only to internally mirrored"
1368 " disk templates, e.g. %s)" %
1369 utils.CommaJoin(constants.DTS_INT_MIRROR))
1371 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1372 action="store_true", default=False,
1373 help="Pause instance at startup")
1375 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1376 help="Destination node group (name or uuid)",
1377 default=None, action="append",
1378 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1380 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1381 action="append", dest="ignore_errors",
1382 choices=list(constants.CV_ALL_ECODES_STRINGS),
1383 help="Error code to be ignored")
1385 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1387 help=("Specify disk state information in the format"
1388 " storage_type/identifier:option=value,..."),
1391 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1393 help=("Specify hypervisor state information in the"
1394 " format hypervisor:option=value,..."),
1397 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1398 action="store_true", default=False,
1399 help="Ignore instance policy violations")
1401 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1402 help="Sets the instance's runtime memory,"
1403 " ballooning it up or down to the new value",
1404 default=None, type="unit", metavar="<size>")
1406 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1407 action="store_true", default=False,
1408 help="Marks the grow as absolute instead of the"
1409 " (default) relative mode")
1411 #: Options provided by all commands
1412 COMMON_OPTS = [DEBUG_OPT]
1414 # common options for creating instances. add and import then add their own
1416 COMMON_CREATE_OPTS = [
1421 FILESTORE_DRIVER_OPT,
1438 # common instance policy options
1439 INSTANCE_POLICY_OPTS = [
1440 SPECS_CPU_COUNT_OPT,
1441 SPECS_DISK_COUNT_OPT,
1442 SPECS_DISK_SIZE_OPT,
1444 SPECS_NIC_COUNT_OPT,
1445 IPOLICY_DISK_TEMPLATES,
1450 def _ParseArgs(argv, commands, aliases, env_override):
1451 """Parser for the command line arguments.
1453 This function parses the arguments and returns the function which
1454 must be executed together with its (modified) arguments.
1456 @param argv: the command line
1457 @param commands: dictionary with special contents, see the design
1458 doc for cmdline handling
1459 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1460 @param env_override: list of env variables allowed for default args
1463 assert not (env_override - set(commands))
1466 binary = "<command>"
1468 binary = argv[0].split("/")[-1]
1470 if len(argv) > 1 and argv[1] == "--version":
1471 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1472 constants.RELEASE_VERSION)
1473 # Quit right away. That way we don't have to care about this special
1474 # argument. optparse.py does it the same.
1477 if len(argv) < 2 or not (argv[1] in commands or
1478 argv[1] in aliases):
1479 # let's do a nice thing
1480 sortedcmds = commands.keys()
1483 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1484 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1487 # compute the max line length for cmd + usage
1488 mlen = max([len(" %s" % cmd) for cmd in commands])
1489 mlen = min(60, mlen) # should not get here...
1491 # and format a nice command list
1492 ToStdout("Commands:")
1493 for cmd in sortedcmds:
1494 cmdstr = " %s" % (cmd,)
1495 help_text = commands[cmd][4]
1496 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1497 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1498 for line in help_lines:
1499 ToStdout("%-*s %s", mlen, "", line)
1503 return None, None, None
1505 # get command, unalias it, and look it up in commands
1509 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1512 if aliases[cmd] not in commands:
1513 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1514 " command '%s'" % (cmd, aliases[cmd]))
1518 if cmd in env_override:
1519 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1520 env_args = os.environ.get(args_env_name)
1522 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1524 func, args_def, parser_opts, usage, description = commands[cmd]
1525 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1526 description=description,
1527 formatter=TitledHelpFormatter(),
1528 usage="%%prog %s %s" % (cmd, usage))
1529 parser.disable_interspersed_args()
1530 options, args = parser.parse_args(args=argv[1:])
1532 if not _CheckArguments(cmd, args_def, args):
1533 return None, None, None
1535 return func, options, args
1538 def _CheckArguments(cmd, args_def, args):
1539 """Verifies the arguments using the argument definition.
1543 1. Abort with error if values specified by user but none expected.
1545 1. For each argument in definition
1547 1. Keep running count of minimum number of values (min_count)
1548 1. Keep running count of maximum number of values (max_count)
1549 1. If it has an unlimited number of values
1551 1. Abort with error if it's not the last argument in the definition
1553 1. If last argument has limited number of values
1555 1. Abort with error if number of values doesn't match or is too large
1557 1. Abort with error if user didn't pass enough values (min_count)
1560 if args and not args_def:
1561 ToStderr("Error: Command %s expects no arguments", cmd)
1568 last_idx = len(args_def) - 1
1570 for idx, arg in enumerate(args_def):
1571 if min_count is None:
1573 elif arg.min is not None:
1574 min_count += arg.min
1576 if max_count is None:
1578 elif arg.max is not None:
1579 max_count += arg.max
1582 check_max = (arg.max is not None)
1584 elif arg.max is None:
1585 raise errors.ProgrammerError("Only the last argument can have max=None")
1588 # Command with exact number of arguments
1589 if (min_count is not None and max_count is not None and
1590 min_count == max_count and len(args) != min_count):
1591 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1594 # Command with limited number of arguments
1595 if max_count is not None and len(args) > max_count:
1596 ToStderr("Error: Command %s expects only %d argument(s)",
1600 # Command with some required arguments
1601 if min_count is not None and len(args) < min_count:
1602 ToStderr("Error: Command %s expects at least %d argument(s)",
1609 def SplitNodeOption(value):
1610 """Splits the value of a --node option.
1613 if value and ":" in value:
1614 return value.split(":", 1)
1616 return (value, None)
1619 def CalculateOSNames(os_name, os_variants):
1620 """Calculates all the names an OS can be called, according to its variants.
1622 @type os_name: string
1623 @param os_name: base name of the os
1624 @type os_variants: list or None
1625 @param os_variants: list of supported variants
1627 @return: list of valid names
1631 return ["%s+%s" % (os_name, v) for v in os_variants]
1636 def ParseFields(selected, default):
1637 """Parses the values of "--field"-like options.
1639 @type selected: string or None
1640 @param selected: User-selected options
1642 @param default: Default fields
1645 if selected is None:
1648 if selected.startswith("+"):
1649 return default + selected[1:].split(",")
1651 return selected.split(",")
1654 UsesRPC = rpc.RunWithRPC
1657 def AskUser(text, choices=None):
1658 """Ask the user a question.
1660 @param text: the question to ask
1662 @param choices: list with elements tuples (input_char, return_value,
1663 description); if not given, it will default to: [('y', True,
1664 'Perform the operation'), ('n', False, 'Do no do the operation')];
1665 note that the '?' char is reserved for help
1667 @return: one of the return values from the choices list; if input is
1668 not possible (i.e. not running with a tty, we return the last
1673 choices = [("y", True, "Perform the operation"),
1674 ("n", False, "Do not perform the operation")]
1675 if not choices or not isinstance(choices, list):
1676 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1677 for entry in choices:
1678 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1679 raise errors.ProgrammerError("Invalid choices element to AskUser")
1681 answer = choices[-1][1]
1683 for line in text.splitlines():
1684 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1685 text = "\n".join(new_text)
1687 f = file("/dev/tty", "a+")
1691 chars = [entry[0] for entry in choices]
1692 chars[-1] = "[%s]" % chars[-1]
1694 maps = dict([(entry[0], entry[1]) for entry in choices])
1698 f.write("/".join(chars))
1700 line = f.readline(2).strip().lower()
1705 for entry in choices:
1706 f.write(" %s - %s\n" % (entry[0], entry[2]))
1714 class JobSubmittedException(Exception):
1715 """Job was submitted, client should exit.
1717 This exception has one argument, the ID of the job that was
1718 submitted. The handler should print this ID.
1720 This is not an error, just a structured way to exit from clients.
1725 def SendJob(ops, cl=None):
1726 """Function to submit an opcode without waiting for the results.
1729 @param ops: list of opcodes
1730 @type cl: luxi.Client
1731 @param cl: the luxi client to use for communicating with the master;
1732 if None, a new client will be created
1738 job_id = cl.SubmitJob(ops)
1743 def GenericPollJob(job_id, cbs, report_cbs):
1744 """Generic job-polling function.
1746 @type job_id: number
1747 @param job_id: Job ID
1748 @type cbs: Instance of L{JobPollCbBase}
1749 @param cbs: Data callbacks
1750 @type report_cbs: Instance of L{JobPollReportCbBase}
1751 @param report_cbs: Reporting callbacks
1754 prev_job_info = None
1755 prev_logmsg_serial = None
1760 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1763 # job not found, go away!
1764 raise errors.JobLost("Job with id %s lost" % job_id)
1766 if result == constants.JOB_NOTCHANGED:
1767 report_cbs.ReportNotChanged(job_id, status)
1772 # Split result, a tuple of (field values, log entries)
1773 (job_info, log_entries) = result
1774 (status, ) = job_info
1777 for log_entry in log_entries:
1778 (serial, timestamp, log_type, message) = log_entry
1779 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1781 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1783 # TODO: Handle canceled and archived jobs
1784 elif status in (constants.JOB_STATUS_SUCCESS,
1785 constants.JOB_STATUS_ERROR,
1786 constants.JOB_STATUS_CANCELING,
1787 constants.JOB_STATUS_CANCELED):
1790 prev_job_info = job_info
1792 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1794 raise errors.JobLost("Job with id %s lost" % job_id)
1796 status, opstatus, result = jobs[0]
1798 if status == constants.JOB_STATUS_SUCCESS:
1801 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1802 raise errors.OpExecError("Job was canceled")
1805 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1806 if status == constants.OP_STATUS_SUCCESS:
1808 elif status == constants.OP_STATUS_ERROR:
1809 errors.MaybeRaise(msg)
1812 raise errors.OpExecError("partial failure (opcode %d): %s" %
1815 raise errors.OpExecError(str(msg))
1817 # default failure mode
1818 raise errors.OpExecError(result)
1821 class JobPollCbBase:
1822 """Base class for L{GenericPollJob} callbacks.
1826 """Initializes this class.
1830 def WaitForJobChangeOnce(self, job_id, fields,
1831 prev_job_info, prev_log_serial):
1832 """Waits for changes on a job.
1835 raise NotImplementedError()
1837 def QueryJobs(self, job_ids, fields):
1838 """Returns the selected fields for the selected job IDs.
1840 @type job_ids: list of numbers
1841 @param job_ids: Job IDs
1842 @type fields: list of strings
1843 @param fields: Fields
1846 raise NotImplementedError()
1849 class JobPollReportCbBase:
1850 """Base class for L{GenericPollJob} reporting callbacks.
1854 """Initializes this class.
1858 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1859 """Handles a log message.
1862 raise NotImplementedError()
1864 def ReportNotChanged(self, job_id, status):
1865 """Called for if a job hasn't changed in a while.
1867 @type job_id: number
1868 @param job_id: Job ID
1869 @type status: string or None
1870 @param status: Job status if available
1873 raise NotImplementedError()
1876 class _LuxiJobPollCb(JobPollCbBase):
1877 def __init__(self, cl):
1878 """Initializes this class.
1881 JobPollCbBase.__init__(self)
1884 def WaitForJobChangeOnce(self, job_id, fields,
1885 prev_job_info, prev_log_serial):
1886 """Waits for changes on a job.
1889 return self.cl.WaitForJobChangeOnce(job_id, fields,
1890 prev_job_info, prev_log_serial)
1892 def QueryJobs(self, job_ids, fields):
1893 """Returns the selected fields for the selected job IDs.
1896 return self.cl.QueryJobs(job_ids, fields)
1899 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1900 def __init__(self, feedback_fn):
1901 """Initializes this class.
1904 JobPollReportCbBase.__init__(self)
1906 self.feedback_fn = feedback_fn
1908 assert callable(feedback_fn)
1910 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1911 """Handles a log message.
1914 self.feedback_fn((timestamp, log_type, log_msg))
1916 def ReportNotChanged(self, job_id, status):
1917 """Called if a job hasn't changed in a while.
1923 class StdioJobPollReportCb(JobPollReportCbBase):
1925 """Initializes this class.
1928 JobPollReportCbBase.__init__(self)
1930 self.notified_queued = False
1931 self.notified_waitlock = False
1933 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1934 """Handles a log message.
1937 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1938 FormatLogMessage(log_type, log_msg))
1940 def ReportNotChanged(self, job_id, status):
1941 """Called if a job hasn't changed in a while.
1947 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1948 ToStderr("Job %s is waiting in queue", job_id)
1949 self.notified_queued = True
1951 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1952 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1953 self.notified_waitlock = True
1956 def FormatLogMessage(log_type, log_msg):
1957 """Formats a job message according to its type.
1960 if log_type != constants.ELOG_MESSAGE:
1961 log_msg = str(log_msg)
1963 return utils.SafeEncode(log_msg)
1966 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1967 """Function to poll for the result of a job.
1969 @type job_id: job identified
1970 @param job_id: the job to poll for results
1971 @type cl: luxi.Client
1972 @param cl: the luxi client to use for communicating with the master;
1973 if None, a new client will be created
1979 if reporter is None:
1981 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1983 reporter = StdioJobPollReportCb()
1985 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1987 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1990 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1991 """Legacy function to submit an opcode.
1993 This is just a simple wrapper over the construction of the processor
1994 instance. It should be extended to better handle feedback and
1995 interaction functions.
2001 SetGenericOpcodeOpts([op], opts)
2003 job_id = SendJob([op], cl=cl)
2005 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2008 return op_results[0]
2011 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2012 """Wrapper around SubmitOpCode or SendJob.
2014 This function will decide, based on the 'opts' parameter, whether to
2015 submit and wait for the result of the opcode (and return it), or
2016 whether to just send the job and print its identifier. It is used in
2017 order to simplify the implementation of the '--submit' option.
2019 It will also process the opcodes if we're sending the via SendJob
2020 (otherwise SubmitOpCode does it).
2023 if opts and opts.submit_only:
2025 SetGenericOpcodeOpts(job, opts)
2026 job_id = SendJob(job, cl=cl)
2027 raise JobSubmittedException(job_id)
2029 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2032 def SetGenericOpcodeOpts(opcode_list, options):
2033 """Processor for generic options.
2035 This function updates the given opcodes based on generic command
2036 line options (like debug, dry-run, etc.).
2038 @param opcode_list: list of opcodes
2039 @param options: command line options or None
2040 @return: None (in-place modification)
2045 for op in opcode_list:
2046 op.debug_level = options.debug
2047 if hasattr(options, "dry_run"):
2048 op.dry_run = options.dry_run
2049 if getattr(options, "priority", None) is not None:
2050 op.priority = _PRIONAME_TO_VALUE[options.priority]
2054 # TODO: Cache object?
2056 client = luxi.Client()
2057 except luxi.NoMasterError:
2058 ss = ssconf.SimpleStore()
2060 # Try to read ssconf file
2063 except errors.ConfigurationError:
2064 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2065 " not part of a cluster")
2067 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2068 if master != myself:
2069 raise errors.OpPrereqError("This is not the master node, please connect"
2070 " to node '%s' and rerun the command" %
2076 def FormatError(err):
2077 """Return a formatted error message for a given error.
2079 This function takes an exception instance and returns a tuple
2080 consisting of two values: first, the recommended exit code, and
2081 second, a string describing the error message (not
2082 newline-terminated).
2088 if isinstance(err, errors.ConfigurationError):
2089 txt = "Corrupt configuration file: %s" % msg
2091 obuf.write(txt + "\n")
2092 obuf.write("Aborting.")
2094 elif isinstance(err, errors.HooksAbort):
2095 obuf.write("Failure: hooks execution failed:\n")
2096 for node, script, out in err.args[0]:
2098 obuf.write(" node: %s, script: %s, output: %s\n" %
2099 (node, script, out))
2101 obuf.write(" node: %s, script: %s (no output)\n" %
2103 elif isinstance(err, errors.HooksFailure):
2104 obuf.write("Failure: hooks general failure: %s" % msg)
2105 elif isinstance(err, errors.ResolverError):
2106 this_host = netutils.Hostname.GetSysName()
2107 if err.args[0] == this_host:
2108 msg = "Failure: can't resolve my own hostname ('%s')"
2110 msg = "Failure: can't resolve hostname '%s'"
2111 obuf.write(msg % err.args[0])
2112 elif isinstance(err, errors.OpPrereqError):
2113 if len(err.args) == 2:
2114 obuf.write("Failure: prerequisites not met for this"
2115 " operation:\nerror type: %s, error details:\n%s" %
2116 (err.args[1], err.args[0]))
2118 obuf.write("Failure: prerequisites not met for this"
2119 " operation:\n%s" % msg)
2120 elif isinstance(err, errors.OpExecError):
2121 obuf.write("Failure: command execution error:\n%s" % msg)
2122 elif isinstance(err, errors.TagError):
2123 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2124 elif isinstance(err, errors.JobQueueDrainError):
2125 obuf.write("Failure: the job queue is marked for drain and doesn't"
2126 " accept new requests\n")
2127 elif isinstance(err, errors.JobQueueFull):
2128 obuf.write("Failure: the job queue is full and doesn't accept new"
2129 " job submissions until old jobs are archived\n")
2130 elif isinstance(err, errors.TypeEnforcementError):
2131 obuf.write("Parameter Error: %s" % msg)
2132 elif isinstance(err, errors.ParameterError):
2133 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2134 elif isinstance(err, luxi.NoMasterError):
2135 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2136 " and listening for connections?")
2137 elif isinstance(err, luxi.TimeoutError):
2138 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2139 " been submitted and will continue to run even if the call"
2140 " timed out. Useful commands in this situation are \"gnt-job"
2141 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2143 elif isinstance(err, luxi.PermissionError):
2144 obuf.write("It seems you don't have permissions to connect to the"
2145 " master daemon.\nPlease retry as a different user.")
2146 elif isinstance(err, luxi.ProtocolError):
2147 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2149 elif isinstance(err, errors.JobLost):
2150 obuf.write("Error checking job status: %s" % msg)
2151 elif isinstance(err, errors.QueryFilterParseError):
2152 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2153 obuf.write("\n".join(err.GetDetails()))
2154 elif isinstance(err, errors.GenericError):
2155 obuf.write("Unhandled Ganeti error: %s" % msg)
2156 elif isinstance(err, JobSubmittedException):
2157 obuf.write("JobID: %s\n" % err.args[0])
2160 obuf.write("Unhandled exception: %s" % msg)
2161 return retcode, obuf.getvalue().rstrip("\n")
2164 def GenericMain(commands, override=None, aliases=None,
2165 env_override=frozenset()):
2166 """Generic main function for all the gnt-* commands.
2168 @param commands: a dictionary with a special structure, see the design doc
2169 for command line handling.
2170 @param override: if not None, we expect a dictionary with keys that will
2171 override command line options; this can be used to pass
2172 options from the scripts to generic functions
2173 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2174 @param env_override: list of environment names which are allowed to submit
2175 default args for commands
2178 # save the program name and the entire command line for later logging
2180 binary = os.path.basename(sys.argv[0])
2182 binary = sys.argv[0]
2184 if len(sys.argv) >= 2:
2185 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2189 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2191 binary = "<unknown program>"
2192 cmdline = "<unknown>"
2198 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2199 except errors.ParameterError, err:
2200 result, err_msg = FormatError(err)
2204 if func is None: # parse error
2207 if override is not None:
2208 for key, val in override.iteritems():
2209 setattr(options, key, val)
2211 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2212 stderr_logging=True)
2214 logging.info("Command line: %s", cmdline)
2217 result = func(options, args)
2218 except (errors.GenericError, luxi.ProtocolError,
2219 JobSubmittedException), err:
2220 result, err_msg = FormatError(err)
2221 logging.exception("Error during command processing")
2223 except KeyboardInterrupt:
2224 result = constants.EXIT_FAILURE
2225 ToStderr("Aborted. Note that if the operation created any jobs, they"
2226 " might have been submitted and"
2227 " will continue to run in the background.")
2228 except IOError, err:
2229 if err.errno == errno.EPIPE:
2230 # our terminal went away, we'll exit
2231 sys.exit(constants.EXIT_FAILURE)
2238 def ParseNicOption(optvalue):
2239 """Parses the value of the --net option(s).
2243 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2244 except (TypeError, ValueError), err:
2245 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2247 nics = [{}] * nic_max
2248 for nidx, ndict in optvalue:
2251 if not isinstance(ndict, dict):
2252 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2253 " got %s" % (nidx, ndict))
2255 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2262 def GenericInstanceCreate(mode, opts, args):
2263 """Add an instance to the cluster via either creation or import.
2265 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2266 @param opts: the command line options selected by the user
2268 @param args: should contain only one element, the new instance name
2270 @return: the desired exit code
2275 (pnode, snode) = SplitNodeOption(opts.node)
2280 hypervisor, hvparams = opts.hypervisor
2283 nics = ParseNicOption(opts.nics)
2287 elif mode == constants.INSTANCE_CREATE:
2288 # default of one nic, all auto
2294 if opts.disk_template == constants.DT_DISKLESS:
2295 if opts.disks or opts.sd_size is not None:
2296 raise errors.OpPrereqError("Diskless instance but disk"
2297 " information passed")
2300 if (not opts.disks and not opts.sd_size
2301 and mode == constants.INSTANCE_CREATE):
2302 raise errors.OpPrereqError("No disk information specified")
2303 if opts.disks and opts.sd_size is not None:
2304 raise errors.OpPrereqError("Please use either the '--disk' or"
2306 if opts.sd_size is not None:
2307 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2311 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2312 except ValueError, err:
2313 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2314 disks = [{}] * disk_max
2317 for didx, ddict in opts.disks:
2319 if not isinstance(ddict, dict):
2320 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2321 raise errors.OpPrereqError(msg)
2322 elif constants.IDISK_SIZE in ddict:
2323 if constants.IDISK_ADOPT in ddict:
2324 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2325 " (disk %d)" % didx)
2327 ddict[constants.IDISK_SIZE] = \
2328 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2329 except ValueError, err:
2330 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2332 elif constants.IDISK_ADOPT in ddict:
2333 if mode == constants.INSTANCE_IMPORT:
2334 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2336 ddict[constants.IDISK_SIZE] = 0
2338 raise errors.OpPrereqError("Missing size or adoption source for"
2342 if opts.tags is not None:
2343 tags = opts.tags.split(",")
2347 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2348 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2350 if mode == constants.INSTANCE_CREATE:
2353 force_variant = opts.force_variant
2356 no_install = opts.no_install
2357 identify_defaults = False
2358 elif mode == constants.INSTANCE_IMPORT:
2361 force_variant = False
2362 src_node = opts.src_node
2363 src_path = opts.src_dir
2365 identify_defaults = opts.identify_defaults
2367 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2369 op = opcodes.OpInstanceCreate(instance_name=instance,
2371 disk_template=opts.disk_template,
2373 pnode=pnode, snode=snode,
2374 ip_check=opts.ip_check,
2375 name_check=opts.name_check,
2376 wait_for_sync=opts.wait_for_sync,
2377 file_storage_dir=opts.file_storage_dir,
2378 file_driver=opts.file_driver,
2379 iallocator=opts.iallocator,
2380 hypervisor=hypervisor,
2382 beparams=opts.beparams,
2383 osparams=opts.osparams,
2387 force_variant=force_variant,
2391 no_install=no_install,
2392 identify_defaults=identify_defaults,
2393 ignore_ipolicy=opts.ignore_ipolicy)
2395 SubmitOrSend(op, opts)
2399 class _RunWhileClusterStoppedHelper:
2400 """Helper class for L{RunWhileClusterStopped} to simplify state management
2403 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2404 """Initializes this class.
2406 @type feedback_fn: callable
2407 @param feedback_fn: Feedback function
2408 @type cluster_name: string
2409 @param cluster_name: Cluster name
2410 @type master_node: string
2411 @param master_node Master node name
2412 @type online_nodes: list
2413 @param online_nodes: List of names of online nodes
2416 self.feedback_fn = feedback_fn
2417 self.cluster_name = cluster_name
2418 self.master_node = master_node
2419 self.online_nodes = online_nodes
2421 self.ssh = ssh.SshRunner(self.cluster_name)
2423 self.nonmaster_nodes = [name for name in online_nodes
2424 if name != master_node]
2426 assert self.master_node not in self.nonmaster_nodes
2428 def _RunCmd(self, node_name, cmd):
2429 """Runs a command on the local or a remote machine.
2431 @type node_name: string
2432 @param node_name: Machine name
2437 if node_name is None or node_name == self.master_node:
2438 # No need to use SSH
2439 result = utils.RunCmd(cmd)
2441 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2444 errmsg = ["Failed to run command %s" % result.cmd]
2446 errmsg.append("on node %s" % node_name)
2447 errmsg.append(": exitcode %s and error %s" %
2448 (result.exit_code, result.output))
2449 raise errors.OpExecError(" ".join(errmsg))
2451 def Call(self, fn, *args):
2452 """Call function while all daemons are stopped.
2455 @param fn: Function to be called
2458 # Pause watcher by acquiring an exclusive lock on watcher state file
2459 self.feedback_fn("Blocking watcher")
2460 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2462 # TODO: Currently, this just blocks. There's no timeout.
2463 # TODO: Should it be a shared lock?
2464 watcher_block.Exclusive(blocking=True)
2466 # Stop master daemons, so that no new jobs can come in and all running
2468 self.feedback_fn("Stopping master daemons")
2469 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2471 # Stop daemons on all nodes
2472 for node_name in self.online_nodes:
2473 self.feedback_fn("Stopping daemons on %s" % node_name)
2474 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2476 # All daemons are shut down now
2478 return fn(self, *args)
2479 except Exception, err:
2480 _, errmsg = FormatError(err)
2481 logging.exception("Caught exception")
2482 self.feedback_fn(errmsg)
2485 # Start cluster again, master node last
2486 for node_name in self.nonmaster_nodes + [self.master_node]:
2487 self.feedback_fn("Starting daemons on %s" % node_name)
2488 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2491 watcher_block.Close()
2494 def RunWhileClusterStopped(feedback_fn, fn, *args):
2495 """Calls a function while all cluster daemons are stopped.
2497 @type feedback_fn: callable
2498 @param feedback_fn: Feedback function
2500 @param fn: Function to be called when daemons are stopped
2503 feedback_fn("Gathering cluster information")
2505 # This ensures we're running on the master daemon
2508 (cluster_name, master_node) = \
2509 cl.QueryConfigValues(["cluster_name", "master_node"])
2511 online_nodes = GetOnlineNodes([], cl=cl)
2513 # Don't keep a reference to the client. The master daemon will go away.
2516 assert master_node in online_nodes
2518 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2519 online_nodes).Call(fn, *args)
2522 def GenerateTable(headers, fields, separator, data,
2523 numfields=None, unitfields=None,
2525 """Prints a table with headers and different fields.
2528 @param headers: dictionary mapping field names to headers for
2531 @param fields: the field names corresponding to each row in
2533 @param separator: the separator to be used; if this is None,
2534 the default 'smart' algorithm is used which computes optimal
2535 field width, otherwise just the separator is used between
2538 @param data: a list of lists, each sublist being one row to be output
2539 @type numfields: list
2540 @param numfields: a list with the fields that hold numeric
2541 values and thus should be right-aligned
2542 @type unitfields: list
2543 @param unitfields: a list with the fields that hold numeric
2544 values that should be formatted with the units field
2545 @type units: string or None
2546 @param units: the units we should use for formatting, or None for
2547 automatic choice (human-readable for non-separator usage, otherwise
2548 megabytes); this is a one-letter string
2557 if numfields is None:
2559 if unitfields is None:
2562 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2563 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2566 for field in fields:
2567 if headers and field not in headers:
2568 # TODO: handle better unknown fields (either revert to old
2569 # style of raising exception, or deal more intelligently with
2571 headers[field] = field
2572 if separator is not None:
2573 format_fields.append("%s")
2574 elif numfields.Matches(field):
2575 format_fields.append("%*s")
2577 format_fields.append("%-*s")
2579 if separator is None:
2580 mlens = [0 for name in fields]
2581 format_str = " ".join(format_fields)
2583 format_str = separator.replace("%", "%%").join(format_fields)
2588 for idx, val in enumerate(row):
2589 if unitfields.Matches(fields[idx]):
2592 except (TypeError, ValueError):
2595 val = row[idx] = utils.FormatUnit(val, units)
2596 val = row[idx] = str(val)
2597 if separator is None:
2598 mlens[idx] = max(mlens[idx], len(val))
2603 for idx, name in enumerate(fields):
2605 if separator is None:
2606 mlens[idx] = max(mlens[idx], len(hdr))
2607 args.append(mlens[idx])
2609 result.append(format_str % tuple(args))
2611 if separator is None:
2612 assert len(mlens) == len(fields)
2614 if fields and not numfields.Matches(fields[-1]):
2620 line = ["-" for _ in fields]
2621 for idx in range(len(fields)):
2622 if separator is None:
2623 args.append(mlens[idx])
2624 args.append(line[idx])
2625 result.append(format_str % tuple(args))
2630 def _FormatBool(value):
2631 """Formats a boolean value as a string.
2639 #: Default formatting for query results; (callback, align right)
2640 _DEFAULT_FORMAT_QUERY = {
2641 constants.QFT_TEXT: (str, False),
2642 constants.QFT_BOOL: (_FormatBool, False),
2643 constants.QFT_NUMBER: (str, True),
2644 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2645 constants.QFT_OTHER: (str, False),
2646 constants.QFT_UNKNOWN: (str, False),
2650 def _GetColumnFormatter(fdef, override, unit):
2651 """Returns formatting function for a field.
2653 @type fdef: L{objects.QueryFieldDefinition}
2654 @type override: dict
2655 @param override: Dictionary for overriding field formatting functions,
2656 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2658 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2659 @rtype: tuple; (callable, bool)
2660 @return: Returns the function to format a value (takes one parameter) and a
2661 boolean for aligning the value on the right-hand side
2664 fmt = override.get(fdef.name, None)
2668 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2670 if fdef.kind == constants.QFT_UNIT:
2671 # Can't keep this information in the static dictionary
2672 return (lambda value: utils.FormatUnit(value, unit), True)
2674 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2678 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2681 class _QueryColumnFormatter:
2682 """Callable class for formatting fields of a query.
2685 def __init__(self, fn, status_fn, verbose):
2686 """Initializes this class.
2689 @param fn: Formatting function
2690 @type status_fn: callable
2691 @param status_fn: Function to report fields' status
2692 @type verbose: boolean
2693 @param verbose: whether to use verbose field descriptions or not
2697 self._status_fn = status_fn
2698 self._verbose = verbose
2700 def __call__(self, data):
2701 """Returns a field's string representation.
2704 (status, value) = data
2707 self._status_fn(status)
2709 if status == constants.RS_NORMAL:
2710 return self._fn(value)
2712 assert value is None, \
2713 "Found value %r for abnormal status %s" % (value, status)
2715 return FormatResultError(status, self._verbose)
2718 def FormatResultError(status, verbose):
2719 """Formats result status other than L{constants.RS_NORMAL}.
2721 @param status: The result status
2722 @type verbose: boolean
2723 @param verbose: Whether to return the verbose text
2724 @return: Text of result status
2727 assert status != constants.RS_NORMAL, \
2728 "FormatResultError called with status equal to constants.RS_NORMAL"
2730 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2732 raise NotImplementedError("Unknown status %s" % status)
2739 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2740 header=False, verbose=False):
2741 """Formats data in L{objects.QueryResponse}.
2743 @type result: L{objects.QueryResponse}
2744 @param result: result of query operation
2746 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2747 see L{utils.text.FormatUnit}
2748 @type format_override: dict
2749 @param format_override: Dictionary for overriding field formatting functions,
2750 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2751 @type separator: string or None
2752 @param separator: String used to separate fields
2754 @param header: Whether to output header row
2755 @type verbose: boolean
2756 @param verbose: whether to use verbose field descriptions or not
2765 if format_override is None:
2766 format_override = {}
2768 stats = dict.fromkeys(constants.RS_ALL, 0)
2770 def _RecordStatus(status):
2775 for fdef in result.fields:
2776 assert fdef.title and fdef.name
2777 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2778 columns.append(TableColumn(fdef.title,
2779 _QueryColumnFormatter(fn, _RecordStatus,
2783 table = FormatTable(result.data, columns, header, separator)
2785 # Collect statistics
2786 assert len(stats) == len(constants.RS_ALL)
2787 assert compat.all(count >= 0 for count in stats.values())
2789 # Determine overall status. If there was no data, unknown fields must be
2790 # detected via the field definitions.
2791 if (stats[constants.RS_UNKNOWN] or
2792 (not result.data and _GetUnknownFields(result.fields))):
2794 elif compat.any(count > 0 for key, count in stats.items()
2795 if key != constants.RS_NORMAL):
2796 status = QR_INCOMPLETE
2800 return (status, table)
2803 def _GetUnknownFields(fdefs):
2804 """Returns list of unknown fields included in C{fdefs}.
2806 @type fdefs: list of L{objects.QueryFieldDefinition}
2809 return [fdef for fdef in fdefs
2810 if fdef.kind == constants.QFT_UNKNOWN]
2813 def _WarnUnknownFields(fdefs):
2814 """Prints a warning to stderr if a query included unknown fields.
2816 @type fdefs: list of L{objects.QueryFieldDefinition}
2819 unknown = _GetUnknownFields(fdefs)
2821 ToStderr("Warning: Queried for unknown fields %s",
2822 utils.CommaJoin(fdef.name for fdef in unknown))
2828 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2829 format_override=None, verbose=False, force_filter=False,
2830 namefield=None, qfilter=None):
2831 """Generic implementation for listing all items of a resource.
2833 @param resource: One of L{constants.QR_VIA_LUXI}
2834 @type fields: list of strings
2835 @param fields: List of fields to query for
2836 @type names: list of strings
2837 @param names: Names of items to query for
2838 @type unit: string or None
2839 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2840 None for automatic choice (human-readable for non-separator usage,
2841 otherwise megabytes); this is a one-letter string
2842 @type separator: string or None
2843 @param separator: String used to separate fields
2845 @param header: Whether to show header row
2846 @type force_filter: bool
2847 @param force_filter: Whether to always treat names as filter
2848 @type format_override: dict
2849 @param format_override: Dictionary for overriding field formatting functions,
2850 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2851 @type verbose: boolean
2852 @param verbose: whether to use verbose field descriptions or not
2853 @type namefield: string
2854 @param namefield: Name of field to use for simple filters (see
2855 L{qlang.MakeFilter} for details)
2856 @type qfilter: list or None
2857 @param qfilter: Query filter (in addition to names)
2863 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2866 qfilter = namefilter
2867 elif namefilter is not None:
2868 qfilter = [qlang.OP_AND, namefilter, qfilter]
2873 response = cl.Query(resource, fields, qfilter)
2875 found_unknown = _WarnUnknownFields(response.fields)
2877 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2879 format_override=format_override,
2885 assert ((found_unknown and status == QR_UNKNOWN) or
2886 (not found_unknown and status != QR_UNKNOWN))
2888 if status == QR_UNKNOWN:
2889 return constants.EXIT_UNKNOWN_FIELD
2891 # TODO: Should the list command fail if not all data could be collected?
2892 return constants.EXIT_SUCCESS
2895 def GenericListFields(resource, fields, separator, header, cl=None):
2896 """Generic implementation for listing fields for a resource.
2898 @param resource: One of L{constants.QR_VIA_LUXI}
2899 @type fields: list of strings
2900 @param fields: List of fields to query for
2901 @type separator: string or None
2902 @param separator: String used to separate fields
2904 @param header: Whether to show header row
2913 response = cl.QueryFields(resource, fields)
2915 found_unknown = _WarnUnknownFields(response.fields)
2918 TableColumn("Name", str, False),
2919 TableColumn("Title", str, False),
2920 TableColumn("Description", str, False),
2923 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2925 for line in FormatTable(rows, columns, header, separator):
2929 return constants.EXIT_UNKNOWN_FIELD
2931 return constants.EXIT_SUCCESS
2935 """Describes a column for L{FormatTable}.
2938 def __init__(self, title, fn, align_right):
2939 """Initializes this class.
2942 @param title: Column title
2944 @param fn: Formatting function
2945 @type align_right: bool
2946 @param align_right: Whether to align values on the right-hand side
2951 self.align_right = align_right
2954 def _GetColFormatString(width, align_right):
2955 """Returns the format string for a field.
2963 return "%%%s%ss" % (sign, width)
2966 def FormatTable(rows, columns, header, separator):
2967 """Formats data as a table.
2969 @type rows: list of lists
2970 @param rows: Row data, one list per row
2971 @type columns: list of L{TableColumn}
2972 @param columns: Column descriptions
2974 @param header: Whether to show header row
2975 @type separator: string or None
2976 @param separator: String used to separate columns
2980 data = [[col.title for col in columns]]
2981 colwidth = [len(col.title) for col in columns]
2984 colwidth = [0 for _ in columns]
2988 assert len(row) == len(columns)
2990 formatted = [col.format(value) for value, col in zip(row, columns)]
2992 if separator is None:
2993 # Update column widths
2994 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2995 # Modifying a list's items while iterating is fine
2996 colwidth[idx] = max(oldwidth, len(value))
2998 data.append(formatted)
3000 if separator is not None:
3001 # Return early if a separator is used
3002 return [separator.join(row) for row in data]
3004 if columns and not columns[-1].align_right:
3005 # Avoid unnecessary spaces at end of line
3008 # Build format string
3009 fmt = " ".join([_GetColFormatString(width, col.align_right)
3010 for col, width in zip(columns, colwidth)])
3012 return [fmt % tuple(row) for row in data]
3015 def FormatTimestamp(ts):
3016 """Formats a given timestamp.
3019 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3022 @return: a string with the formatted timestamp
3025 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3029 return utils.FormatTime(sec, usecs=usecs)
3032 def ParseTimespec(value):
3033 """Parse a time specification.
3035 The following suffixed will be recognized:
3043 Without any suffix, the value will be taken to be in seconds.
3048 raise errors.OpPrereqError("Empty time specification passed")
3056 if value[-1] not in suffix_map:
3059 except (TypeError, ValueError):
3060 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3062 multiplier = suffix_map[value[-1]]
3064 if not value: # no data left after stripping the suffix
3065 raise errors.OpPrereqError("Invalid time specification (only"
3068 value = int(value) * multiplier
3069 except (TypeError, ValueError):
3070 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3074 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3075 filter_master=False, nodegroup=None):
3076 """Returns the names of online nodes.
3078 This function will also log a warning on stderr with the names of
3081 @param nodes: if not empty, use only this subset of nodes (minus the
3083 @param cl: if not None, luxi client to use
3084 @type nowarn: boolean
3085 @param nowarn: by default, this function will output a note with the
3086 offline nodes that are skipped; if this parameter is True the
3087 note is not displayed
3088 @type secondary_ips: boolean
3089 @param secondary_ips: if True, return the secondary IPs instead of the
3090 names, useful for doing network traffic over the replication interface
3092 @type filter_master: boolean
3093 @param filter_master: if True, do not return the master node in the list
3094 (useful in coordination with secondary_ips where we cannot check our
3095 node name against the list)
3096 @type nodegroup: string
3097 @param nodegroup: If set, only return nodes in this node group
3106 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3108 if nodegroup is not None:
3109 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3110 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3113 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3116 if len(qfilter) > 1:
3117 final_filter = [qlang.OP_AND] + qfilter
3119 assert len(qfilter) == 1
3120 final_filter = qfilter[0]
3124 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3126 def _IsOffline(row):
3127 (_, (_, offline), _) = row
3131 ((_, name), _, _) = row
3135 (_, _, (_, sip)) = row
3138 (offline, online) = compat.partition(result.data, _IsOffline)
3140 if offline and not nowarn:
3141 ToStderr("Note: skipping offline node(s): %s" %
3142 utils.CommaJoin(map(_GetName, offline)))
3149 return map(fn, online)
3152 def _ToStream(stream, txt, *args):
3153 """Write a message to a stream, bypassing the logging system
3155 @type stream: file object
3156 @param stream: the file to which we should write
3158 @param txt: the message
3164 stream.write(txt % args)
3169 except IOError, err:
3170 if err.errno == errno.EPIPE:
3171 # our terminal went away, we'll exit
3172 sys.exit(constants.EXIT_FAILURE)
3177 def ToStdout(txt, *args):
3178 """Write a message to stdout only, bypassing the logging system
3180 This is just a wrapper over _ToStream.
3183 @param txt: the message
3186 _ToStream(sys.stdout, txt, *args)
3189 def ToStderr(txt, *args):
3190 """Write a message to stderr only, bypassing the logging system
3192 This is just a wrapper over _ToStream.
3195 @param txt: the message
3198 _ToStream(sys.stderr, txt, *args)
3201 class JobExecutor(object):
3202 """Class which manages the submission and execution of multiple jobs.
3204 Note that instances of this class should not be reused between
3208 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3213 self.verbose = verbose
3216 self.feedback_fn = feedback_fn
3217 self._counter = itertools.count()
3220 def _IfName(name, fmt):
3221 """Helper function for formatting name.
3229 def QueueJob(self, name, *ops):
3230 """Record a job for later submit.
3233 @param name: a description of the job, will be used in WaitJobSet
3236 SetGenericOpcodeOpts(ops, self.opts)
3237 self.queue.append((self._counter.next(), name, ops))
3239 def AddJobId(self, name, status, job_id):
3240 """Adds a job ID to the internal queue.
3243 self.jobs.append((self._counter.next(), status, job_id, name))
3245 def SubmitPending(self, each=False):
3246 """Submit all pending jobs.
3251 for (_, _, ops) in self.queue:
3252 # SubmitJob will remove the success status, but raise an exception if
3253 # the submission fails, so we'll notice that anyway.
3254 results.append([True, self.cl.SubmitJob(ops)[0]])
3256 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3257 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3258 self.jobs.append((idx, status, data, name))
3260 def _ChooseJob(self):
3261 """Choose a non-waiting/queued job to poll next.
3264 assert self.jobs, "_ChooseJob called with empty job list"
3266 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3270 for job_data, status in zip(self.jobs, result):
3271 if (isinstance(status, list) and status and
3272 status[0] in (constants.JOB_STATUS_QUEUED,
3273 constants.JOB_STATUS_WAITING,
3274 constants.JOB_STATUS_CANCELING)):
3275 # job is still present and waiting
3277 # good candidate found (either running job or lost job)
3278 self.jobs.remove(job_data)
3282 return self.jobs.pop(0)
3284 def GetResults(self):
3285 """Wait for and return the results of all jobs.
3288 @return: list of tuples (success, job results), in the same order
3289 as the submitted jobs; if a job has failed, instead of the result
3290 there will be the error message
3294 self.SubmitPending()
3297 ok_jobs = [row[2] for row in self.jobs if row[1]]
3299 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3301 # first, remove any non-submitted jobs
3302 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3303 for idx, _, jid, name in failures:
3304 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3305 results.append((idx, False, jid))
3308 (idx, _, jid, name) = self._ChooseJob()
3309 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3311 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3313 except errors.JobLost, err:
3314 _, job_result = FormatError(err)
3315 ToStderr("Job %s%s has been archived, cannot check its result",
3316 jid, self._IfName(name, " for %s"))
3318 except (errors.GenericError, luxi.ProtocolError), err:
3319 _, job_result = FormatError(err)
3321 # the error message will always be shown, verbose or not
3322 ToStderr("Job %s%s has failed: %s",
3323 jid, self._IfName(name, " for %s"), job_result)
3325 results.append((idx, success, job_result))
3327 # sort based on the index, then drop it
3329 results = [i[1:] for i in results]
3333 def WaitOrShow(self, wait):
3334 """Wait for job results or only print the job IDs.
3337 @param wait: whether to wait or not
3341 return self.GetResults()
3344 self.SubmitPending()
3345 for _, status, result, name in self.jobs:
3347 ToStdout("%s: %s", result, name)
3349 ToStderr("Failure for %s: %s", name, result)
3350 return [row[1:3] for row in self.jobs]
3353 def FormatParameterDict(buf, param_dict, actual, level=1):
3354 """Formats a parameter dictionary.
3356 @type buf: L{StringIO}
3357 @param buf: the buffer into which to write
3358 @type param_dict: dict
3359 @param param_dict: the own parameters
3361 @param actual: the current parameter set (including defaults)
3362 @param level: Level of indent
3365 indent = " " * level
3367 for key in sorted(actual):
3369 buf.write("%s- %s:" % (indent, key))
3371 if isinstance(data, dict) and data:
3373 FormatParameterDict(buf, param_dict.get(key, {}), data,
3376 val = param_dict.get(key, "default (%s)" % data)
3377 buf.write(" %s\n" % val)
3380 def ConfirmOperation(names, list_type, text, extra=""):
3381 """Ask the user to confirm an operation on a list of list_type.
3383 This function is used to request confirmation for doing an operation
3384 on a given list of list_type.
3387 @param names: the list of names that we display when
3388 we ask for confirmation
3389 @type list_type: str
3390 @param list_type: Human readable name for elements in the list (e.g. nodes)
3392 @param text: the operation that the user should confirm
3394 @return: True or False depending on user's confirmation.
3398 msg = ("The %s will operate on %d %s.\n%s"
3399 "Do you want to continue?" % (text, count, list_type, extra))
3400 affected = (("\nAffected %s:\n" % list_type) +
3401 "\n".join([" %s" % name for name in names]))
3403 choices = [("y", True, "Yes, execute the %s" % text),
3404 ("n", False, "No, abort the %s" % text)]
3407 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3410 question = msg + affected
3412 choice = AskUser(question, choices)
3415 choice = AskUser(msg + affected, choices)
3419 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3420 ispecs_cpu_count=None,
3421 ispecs_disk_count=None,
3422 ispecs_disk_size=None,
3423 ispecs_nic_count=None,
3424 ipolicy_disk_templates=None,
3425 ipolicy_vcpu_ratio=None,
3426 group_ipolicy=False,
3427 allowed_values=None,
3429 """Creation of instance policy based on command line options.
3431 @param fill_all: whether for cluster policies we should ensure that
3432 all values are filled
3438 for k in ispecs_mem_size:
3439 ispecs_mem_size[k] = utils.ParseUnit(ispecs_mem_size[k])
3440 if ispecs_disk_size:
3441 for k in ispecs_disk_size:
3442 ispecs_disk_size[k] = utils.ParseUnit(ispecs_disk_size[k])
3443 except (TypeError, ValueError, errors.UnitParseError), err:
3444 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3446 (ispecs_disk_size, ispecs_mem_size, err),
3449 # prepare ipolicy dict
3450 ipolicy_transposed = {
3451 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3452 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3453 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3454 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3455 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3458 # first, check that the values given are correct
3460 forced_type = TISPECS_GROUP_TYPES
3462 forced_type = TISPECS_CLUSTER_TYPES
3464 for specs in ipolicy_transposed.values():
3465 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3468 ipolicy_out = objects.MakeEmptyIPolicy()
3469 for name, specs in ipolicy_transposed.iteritems():
3470 assert name in constants.ISPECS_PARAMETERS
3471 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3472 ipolicy_out[key][name] = val
3474 # no filldict for non-dicts
3475 if not group_ipolicy and fill_all:
3476 if ipolicy_disk_templates is None:
3477 ipolicy_disk_templates = constants.DISK_TEMPLATES
3478 if ipolicy_vcpu_ratio is None:
3479 ipolicy_vcpu_ratio = \
3480 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3481 if ipolicy_disk_templates is not None:
3482 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3483 if ipolicy_vcpu_ratio is not None:
3484 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3486 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)