4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
67 "CLUSTER_DOMAIN_SECRET_OPT",
85 "FILESTORE_DRIVER_OPT",
91 "GLOBAL_SHARED_FILEDIR_OPT",
96 "DEFAULT_IALLOCATOR_OPT",
97 "IDENTIFY_DEFAULTS_OPT",
100 "IGNORE_FAILURES_OPT",
101 "IGNORE_OFFLINE_OPT",
102 "IGNORE_REMOVE_FAILURES_OPT",
103 "IGNORE_SECONDARIES_OPT",
107 "MAINTAIN_NODE_HEALTH_OPT",
109 "MASTER_NETMASK_OPT",
111 "MIGRATION_MODE_OPT",
113 "NEW_CLUSTER_CERT_OPT",
114 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115 "NEW_CONFD_HMAC_KEY_OPT",
118 "NEW_SPICE_CERT_OPT",
120 "NODE_FORCE_JOIN_OPT",
122 "NODE_PLACEMENT_OPT",
126 "NODRBD_STORAGE_OPT",
132 "NOMODIFY_ETCHOSTS_OPT",
133 "NOMODIFY_SSH_SETUP_OPT",
137 "NORUNTIME_CHGS_OPT",
140 "NOSSH_KEYCHECK_OPT",
154 "PREALLOC_WIPE_DISKS_OPT",
155 "PRIMARY_IP_VERSION_OPT",
161 "REMOVE_INSTANCE_OPT",
167 "SECONDARY_ONLY_OPT",
171 "SHUTDOWN_TIMEOUT_OPT",
173 "SPECS_CPU_COUNT_OPT",
174 "SPECS_DISK_COUNT_OPT",
175 "SPECS_DISK_SIZE_OPT",
176 "SPECS_MEM_SIZE_OPT",
177 "SPECS_NIC_COUNT_OPT",
178 "IPOLICY_DISK_TEMPLATES",
179 "IPOLICY_VCPU_RATIO",
185 "STARTUP_PAUSED_OPT",
194 "USE_EXTERNAL_MIP_SCRIPT",
201 "IGNORE_IPOLICY_OPT",
202 "INSTANCE_POLICY_OPTS",
203 # Generic functions for CLI programs
205 "CreateIPolicyFromOpts",
207 "GenericInstanceCreate",
213 "JobSubmittedException",
215 "RunWhileClusterStopped",
219 # Formatting functions
220 "ToStderr", "ToStdout",
223 "FormatParameterDict",
232 # command line options support infrastructure
233 "ARGS_MANY_INSTANCES",
252 "OPT_COMPL_INST_ADD_NODES",
253 "OPT_COMPL_MANY_NODES",
254 "OPT_COMPL_ONE_IALLOCATOR",
255 "OPT_COMPL_ONE_INSTANCE",
256 "OPT_COMPL_ONE_NODE",
257 "OPT_COMPL_ONE_NODEGROUP",
263 "COMMON_CREATE_OPTS",
269 #: Priorities (sorted)
271 ("low", constants.OP_PRIO_LOW),
272 ("normal", constants.OP_PRIO_NORMAL),
273 ("high", constants.OP_PRIO_HIGH),
276 #: Priority dictionary for easier lookup
277 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
278 # we migrate to Python 2.6
279 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
281 # Query result status for clients
284 QR_INCOMPLETE) = range(3)
286 #: Maximum batch size for ChooseJob
290 # constants used to create InstancePolicy dictionary
291 TISPECS_GROUP_TYPES = {
292 constants.ISPECS_MIN: constants.VTYPE_INT,
293 constants.ISPECS_MAX: constants.VTYPE_INT,
296 TISPECS_CLUSTER_TYPES = {
297 constants.ISPECS_MIN: constants.VTYPE_INT,
298 constants.ISPECS_MAX: constants.VTYPE_INT,
299 constants.ISPECS_STD: constants.VTYPE_INT,
304 def __init__(self, min=0, max=None): # pylint: disable=W0622
309 return ("<%s min=%s max=%s>" %
310 (self.__class__.__name__, self.min, self.max))
313 class ArgSuggest(_Argument):
314 """Suggesting argument.
316 Value can be any of the ones passed to the constructor.
319 # pylint: disable=W0622
320 def __init__(self, min=0, max=None, choices=None):
321 _Argument.__init__(self, min=min, max=max)
322 self.choices = choices
325 return ("<%s min=%s max=%s choices=%r>" %
326 (self.__class__.__name__, self.min, self.max, self.choices))
329 class ArgChoice(ArgSuggest):
332 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
333 but value must be one of the choices.
338 class ArgUnknown(_Argument):
339 """Unknown argument to program (e.g. determined at runtime).
344 class ArgInstance(_Argument):
345 """Instances argument.
350 class ArgNode(_Argument):
356 class ArgGroup(_Argument):
357 """Node group argument.
362 class ArgJobId(_Argument):
368 class ArgFile(_Argument):
369 """File path argument.
374 class ArgCommand(_Argument):
380 class ArgHost(_Argument):
386 class ArgOs(_Argument):
393 ARGS_MANY_INSTANCES = [ArgInstance()]
394 ARGS_MANY_NODES = [ArgNode()]
395 ARGS_MANY_GROUPS = [ArgGroup()]
396 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
397 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
399 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
400 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
403 def _ExtractTagsObject(opts, args):
404 """Extract the tag type object.
406 Note that this function will modify its args parameter.
409 if not hasattr(opts, "tag_type"):
410 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
412 if kind == constants.TAG_CLUSTER:
414 elif kind in (constants.TAG_NODEGROUP,
416 constants.TAG_INSTANCE):
418 raise errors.OpPrereqError("no arguments passed to the command")
422 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
426 def _ExtendTags(opts, args):
427 """Extend the args if a source file has been given.
429 This function will extend the tags with the contents of the file
430 passed in the 'tags_source' attribute of the opts parameter. A file
431 named '-' will be replaced by stdin.
434 fname = opts.tags_source
440 new_fh = open(fname, "r")
443 # we don't use the nice 'new_data = [line.strip() for line in fh]'
444 # because of python bug 1633941
446 line = new_fh.readline()
449 new_data.append(line.strip())
452 args.extend(new_data)
455 def ListTags(opts, args):
456 """List the tags on a given object.
458 This is a generic implementation that knows how to deal with all
459 three cases of tag objects (cluster, node, instance). The opts
460 argument is expected to contain a tag_type field denoting what
461 object type we work on.
464 kind, name = _ExtractTagsObject(opts, args)
466 result = cl.QueryTags(kind, name)
467 result = list(result)
473 def AddTags(opts, args):
474 """Add tags on a given object.
476 This is a generic implementation that knows how to deal with all
477 three cases of tag objects (cluster, node, instance). The opts
478 argument is expected to contain a tag_type field denoting what
479 object type we work on.
482 kind, name = _ExtractTagsObject(opts, args)
483 _ExtendTags(opts, args)
485 raise errors.OpPrereqError("No tags to be added")
486 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
487 SubmitOrSend(op, opts)
490 def RemoveTags(opts, args):
491 """Remove tags from a given object.
493 This is a generic implementation that knows how to deal with all
494 three cases of tag objects (cluster, node, instance). The opts
495 argument is expected to contain a tag_type field denoting what
496 object type we work on.
499 kind, name = _ExtractTagsObject(opts, args)
500 _ExtendTags(opts, args)
502 raise errors.OpPrereqError("No tags to be removed")
503 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
504 SubmitOrSend(op, opts)
507 def check_unit(option, opt, value): # pylint: disable=W0613
508 """OptParsers custom converter for units.
512 return utils.ParseUnit(value)
513 except errors.UnitParseError, err:
514 raise OptionValueError("option %s: %s" % (opt, err))
517 def _SplitKeyVal(opt, data):
518 """Convert a KeyVal string into a dict.
520 This function will convert a key=val[,...] string into a dict. Empty
521 values will be converted specially: keys which have the prefix 'no_'
522 will have the value=False and the prefix stripped, the others will
526 @param opt: a string holding the option name for which we process the
527 data, used in building error messages
529 @param data: a string of the format key=val,key=val,...
531 @return: {key=val, key=val}
532 @raises errors.ParameterError: if there are duplicate keys
537 for elem in utils.UnescapeAndSplit(data, sep=","):
539 key, val = elem.split("=", 1)
541 if elem.startswith(NO_PREFIX):
542 key, val = elem[len(NO_PREFIX):], False
543 elif elem.startswith(UN_PREFIX):
544 key, val = elem[len(UN_PREFIX):], None
546 key, val = elem, True
548 raise errors.ParameterError("Duplicate key '%s' in option %s" %
554 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
555 """Custom parser for ident:key=val,key=val options.
557 This will store the parsed values as a tuple (ident, {key: val}). As such,
558 multiple uses of this option via action=append is possible.
562 ident, rest = value, ""
564 ident, rest = value.split(":", 1)
566 if ident.startswith(NO_PREFIX):
568 msg = "Cannot pass options when removing parameter groups: %s" % value
569 raise errors.ParameterError(msg)
570 retval = (ident[len(NO_PREFIX):], False)
571 elif (ident.startswith(UN_PREFIX) and
572 (len(ident) <= len(UN_PREFIX) or
573 not ident[len(UN_PREFIX)][0].isdigit())):
575 msg = "Cannot pass options when removing parameter groups: %s" % value
576 raise errors.ParameterError(msg)
577 retval = (ident[len(UN_PREFIX):], None)
579 kv_dict = _SplitKeyVal(opt, rest)
580 retval = (ident, kv_dict)
584 def check_key_val(option, opt, value): # pylint: disable=W0613
585 """Custom parser class for key=val,key=val options.
587 This will store the parsed values as a dict {key: val}.
590 return _SplitKeyVal(opt, value)
593 def check_bool(option, opt, value): # pylint: disable=W0613
594 """Custom parser for yes/no options.
596 This will store the parsed value as either True or False.
599 value = value.lower()
600 if value == constants.VALUE_FALSE or value == "no":
602 elif value == constants.VALUE_TRUE or value == "yes":
605 raise errors.ParameterError("Invalid boolean value '%s'" % value)
608 def check_list(option, opt, value): # pylint: disable=W0613
609 """Custom parser for comma-separated lists.
612 # we have to make this explicit check since "".split(",") is [""],
613 # not an empty list :(
617 return utils.UnescapeAndSplit(value)
620 # completion_suggestion is normally a list. Using numeric values not evaluating
621 # to False for dynamic completion.
622 (OPT_COMPL_MANY_NODES,
624 OPT_COMPL_ONE_INSTANCE,
626 OPT_COMPL_ONE_IALLOCATOR,
627 OPT_COMPL_INST_ADD_NODES,
628 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
630 OPT_COMPL_ALL = frozenset([
631 OPT_COMPL_MANY_NODES,
633 OPT_COMPL_ONE_INSTANCE,
635 OPT_COMPL_ONE_IALLOCATOR,
636 OPT_COMPL_INST_ADD_NODES,
637 OPT_COMPL_ONE_NODEGROUP,
641 class CliOption(Option):
642 """Custom option class for optparse.
645 ATTRS = Option.ATTRS + [
646 "completion_suggest",
648 TYPES = Option.TYPES + (
655 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
656 TYPE_CHECKER["identkeyval"] = check_ident_key_val
657 TYPE_CHECKER["keyval"] = check_key_val
658 TYPE_CHECKER["unit"] = check_unit
659 TYPE_CHECKER["bool"] = check_bool
660 TYPE_CHECKER["list"] = check_list
663 # optparse.py sets make_option, so we do it for our own option class, too
664 cli_option = CliOption
669 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
670 help="Increase debugging level")
672 NOHDR_OPT = cli_option("--no-headers", default=False,
673 action="store_true", dest="no_headers",
674 help="Don't display column headers")
676 SEP_OPT = cli_option("--separator", default=None,
677 action="store", dest="separator",
678 help=("Separator between output fields"
679 " (defaults to one space)"))
681 USEUNITS_OPT = cli_option("--units", default=None,
682 dest="units", choices=("h", "m", "g", "t"),
683 help="Specify units for output (one of h/m/g/t)")
685 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
686 type="string", metavar="FIELDS",
687 help="Comma separated list of output fields")
689 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
690 default=False, help="Force the operation")
692 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
693 default=False, help="Do not require confirmation")
695 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
696 action="store_true", default=False,
697 help=("Ignore offline nodes and do as much"
700 TAG_ADD_OPT = cli_option("--tags", dest="tags",
701 default=None, help="Comma-separated list of instance"
704 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
705 default=None, help="File with tag names")
707 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
708 default=False, action="store_true",
709 help=("Submit the job and return the job ID, but"
710 " don't wait for the job to finish"))
712 SYNC_OPT = cli_option("--sync", dest="do_locking",
713 default=False, action="store_true",
714 help=("Grab locks while doing the queries"
715 " in order to ensure more consistent results"))
717 DRY_RUN_OPT = cli_option("--dry-run", default=False,
719 help=("Do not execute the operation, just run the"
720 " check steps and verify it it could be"
723 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
725 help="Increase the verbosity of the operation")
727 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
728 action="store_true", dest="simulate_errors",
729 help="Debugging option that makes the operation"
730 " treat most runtime checks as failed")
732 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
733 default=True, action="store_false",
734 help="Don't wait for sync (DANGEROUS!)")
736 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
737 action="store_true", default=False,
738 help="Enable offline instance")
740 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
741 action="store_true", default=False,
742 help="Disable down instance")
744 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
745 help=("Custom disk setup (%s)" %
746 utils.CommaJoin(constants.DISK_TEMPLATES)),
747 default=None, metavar="TEMPL",
748 choices=list(constants.DISK_TEMPLATES))
750 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
751 help="Do not create any network cards for"
754 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
755 help="Relative path under default cluster-wide"
756 " file storage dir to store file-based disks",
757 default=None, metavar="<DIR>")
759 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
760 help="Driver to use for image files",
761 default="loop", metavar="<DRIVER>",
762 choices=list(constants.FILE_DRIVER))
764 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
765 help="Select nodes for the instance automatically"
766 " using the <NAME> iallocator plugin",
767 default=None, type="string",
768 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
770 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
772 help="Set the default instance allocator plugin",
773 default=None, type="string",
774 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
776 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
778 completion_suggest=OPT_COMPL_ONE_OS)
780 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
781 type="keyval", default={},
782 help="OS parameters")
784 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
785 action="store_true", default=False,
786 help="Force an unknown variant")
788 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
789 action="store_true", default=False,
790 help="Do not install the OS (will"
793 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
794 dest="allow_runtime_chgs",
795 default=True, action="store_false",
796 help="Don't allow runtime changes")
798 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
799 type="keyval", default={},
800 help="Backend parameters")
802 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
803 default={}, dest="hvparams",
804 help="Hypervisor parameters")
806 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
807 help="Disk template parameters, in the format"
808 " template:option=value,option=value,...",
809 type="identkeyval", action="append", default=[])
811 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
812 type="keyval", default={},
813 help="Memory count specs: min, max, std"
816 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
817 type="keyval", default={},
818 help="CPU count specs: min, max, std")
820 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
821 dest="ispecs_disk_count",
822 type="keyval", default={},
823 help="Disk count specs: min, max, std")
825 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
826 type="keyval", default={},
827 help="Disk size specs: min, max, std (in MB)")
829 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
830 type="keyval", default={},
831 help="NIC count specs: min, max, std")
833 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
834 dest="ipolicy_disk_templates",
835 type="list", default=None,
836 help="Comma-separated list of"
837 " enabled disk templates")
839 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
840 dest="ipolicy_vcpu_ratio",
841 type="float", default=None,
842 help="The maximum allowed vcpu-to-cpu ratio")
844 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
845 help="Hypervisor and hypervisor options, in the"
846 " format hypervisor:option=value,option=value,...",
847 default=None, type="identkeyval")
849 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
850 help="Hypervisor and hypervisor options, in the"
851 " format hypervisor:option=value,option=value,...",
852 default=[], action="append", type="identkeyval")
854 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
855 action="store_false",
856 help="Don't check that the instance's IP"
859 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
860 default=True, action="store_false",
861 help="Don't check that the instance's name"
864 NET_OPT = cli_option("--net",
865 help="NIC parameters", default=[],
866 dest="nics", action="append", type="identkeyval")
868 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
869 dest="disks", action="append", type="identkeyval")
871 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
872 help="Comma-separated list of disks"
873 " indices to act on (e.g. 0,2) (optional,"
874 " defaults to all disks)")
876 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
877 help="Enforces a single-disk configuration using the"
878 " given disk size, in MiB unless a suffix is used",
879 default=None, type="unit", metavar="<size>")
881 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
882 dest="ignore_consistency",
883 action="store_true", default=False,
884 help="Ignore the consistency of the disks on"
887 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
888 dest="allow_failover",
889 action="store_true", default=False,
890 help="If migration is not possible fallback to"
893 NONLIVE_OPT = cli_option("--non-live", dest="live",
894 default=True, action="store_false",
895 help="Do a non-live migration (this usually means"
896 " freeze the instance, save the state, transfer and"
897 " only then resume running on the secondary node)")
899 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
901 choices=list(constants.HT_MIGRATION_MODES),
902 help="Override default migration mode (choose"
903 " either live or non-live")
905 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
906 help="Target node and optional secondary node",
907 metavar="<pnode>[:<snode>]",
908 completion_suggest=OPT_COMPL_INST_ADD_NODES)
910 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
911 action="append", metavar="<node>",
912 help="Use only this node (can be used multiple"
913 " times, if not given defaults to all nodes)",
914 completion_suggest=OPT_COMPL_ONE_NODE)
916 NODEGROUP_OPT_NAME = "--node-group"
917 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
919 help="Node group (name or uuid)",
920 metavar="<nodegroup>",
921 default=None, type="string",
922 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
924 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
926 completion_suggest=OPT_COMPL_ONE_NODE)
928 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
929 action="store_false",
930 help="Don't start the instance after creation")
932 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
933 action="store_true", default=False,
934 help="Show command instead of executing it")
936 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
937 default=False, action="store_true",
938 help="Instead of performing the migration, try to"
939 " recover from a failed cleanup. This is safe"
940 " to run even if the instance is healthy, but it"
941 " will create extra replication traffic and "
942 " disrupt briefly the replication (like during the"
945 STATIC_OPT = cli_option("-s", "--static", dest="static",
946 action="store_true", default=False,
947 help="Only show configuration data, not runtime data")
949 ALL_OPT = cli_option("--all", dest="show_all",
950 default=False, action="store_true",
951 help="Show info on all instances on the cluster."
952 " This can take a long time to run, use wisely")
954 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
955 action="store_true", default=False,
956 help="Interactive OS reinstall, lists available"
957 " OS templates for selection")
959 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
960 action="store_true", default=False,
961 help="Remove the instance from the cluster"
962 " configuration even if there are failures"
963 " during the removal process")
965 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
966 dest="ignore_remove_failures",
967 action="store_true", default=False,
968 help="Remove the instance from the"
969 " cluster configuration even if there"
970 " are failures during the removal"
973 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
974 action="store_true", default=False,
975 help="Remove the instance from the cluster")
977 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
978 help="Specifies the new node for the instance",
979 metavar="NODE", default=None,
980 completion_suggest=OPT_COMPL_ONE_NODE)
982 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
983 help="Specifies the new secondary node",
984 metavar="NODE", default=None,
985 completion_suggest=OPT_COMPL_ONE_NODE)
987 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
988 default=False, action="store_true",
989 help="Replace the disk(s) on the primary"
990 " node (applies only to internally mirrored"
991 " disk templates, e.g. %s)" %
992 utils.CommaJoin(constants.DTS_INT_MIRROR))
994 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
995 default=False, action="store_true",
996 help="Replace the disk(s) on the secondary"
997 " node (applies only to internally mirrored"
998 " disk templates, e.g. %s)" %
999 utils.CommaJoin(constants.DTS_INT_MIRROR))
1001 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1002 default=False, action="store_true",
1003 help="Lock all nodes and auto-promote as needed"
1006 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1007 default=False, action="store_true",
1008 help="Automatically replace faulty disks"
1009 " (applies only to internally mirrored"
1010 " disk templates, e.g. %s)" %
1011 utils.CommaJoin(constants.DTS_INT_MIRROR))
1013 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1014 default=False, action="store_true",
1015 help="Ignore current recorded size"
1016 " (useful for forcing activation when"
1017 " the recorded size is wrong)")
1019 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1021 completion_suggest=OPT_COMPL_ONE_NODE)
1023 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1026 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1027 help="Specify the secondary ip for the node",
1028 metavar="ADDRESS", default=None)
1030 READD_OPT = cli_option("--readd", dest="readd",
1031 default=False, action="store_true",
1032 help="Readd old node after replacing it")
1034 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1035 default=True, action="store_false",
1036 help="Disable SSH key fingerprint checking")
1038 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1039 default=False, action="store_true",
1040 help="Force the joining of a node")
1042 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1043 type="bool", default=None, metavar=_YORNO,
1044 help="Set the master_candidate flag on the node")
1046 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1047 type="bool", default=None,
1048 help=("Set the offline flag on the node"
1049 " (cluster does not communicate with offline"
1052 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1053 type="bool", default=None,
1054 help=("Set the drained flag on the node"
1055 " (excluded from allocation operations)"))
1057 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1058 type="bool", default=None, metavar=_YORNO,
1059 help="Set the master_capable flag on the node")
1061 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1062 type="bool", default=None, metavar=_YORNO,
1063 help="Set the vm_capable flag on the node")
1065 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1066 type="bool", default=None, metavar=_YORNO,
1067 help="Set the allocatable flag on a volume")
1069 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1070 help="Disable support for lvm based instances"
1072 action="store_false", default=True)
1074 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1075 dest="enabled_hypervisors",
1076 help="Comma-separated list of hypervisors",
1077 type="string", default=None)
1079 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1080 type="keyval", default={},
1081 help="NIC parameters")
1083 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1084 dest="candidate_pool_size", type="int",
1085 help="Set the candidate pool size")
1087 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1088 help=("Enables LVM and specifies the volume group"
1089 " name (cluster-wide) for disk allocation"
1090 " [%s]" % constants.DEFAULT_VG),
1091 metavar="VG", default=None)
1093 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1094 help="Destroy cluster", action="store_true")
1096 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1097 help="Skip node agreement check (dangerous)",
1098 action="store_true", default=False)
1100 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1101 help="Specify the mac prefix for the instance IP"
1102 " addresses, in the format XX:XX:XX",
1106 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1107 help="Specify the node interface (cluster-wide)"
1108 " on which the master IP address will be added"
1109 " (cluster init default: %s)" %
1110 constants.DEFAULT_BRIDGE,
1114 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1115 help="Specify the netmask of the master IP",
1119 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1120 dest="use_external_mip_script",
1121 help="Specify whether to run a user-provided"
1122 " script for the master IP address turnup and"
1123 " turndown operations",
1124 type="bool", metavar=_YORNO, default=None)
1126 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1127 help="Specify the default directory (cluster-"
1128 "wide) for storing the file-based disks [%s]" %
1129 constants.DEFAULT_FILE_STORAGE_DIR,
1131 default=constants.DEFAULT_FILE_STORAGE_DIR)
1133 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1134 dest="shared_file_storage_dir",
1135 help="Specify the default directory (cluster-"
1136 "wide) for storing the shared file-based"
1138 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1139 metavar="SHAREDDIR",
1140 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1142 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1143 help="Don't modify /etc/hosts",
1144 action="store_false", default=True)
1146 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1147 help="Don't initialize SSH keys",
1148 action="store_false", default=True)
1150 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1151 help="Enable parseable error messages",
1152 action="store_true", default=False)
1154 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1155 help="Skip N+1 memory redundancy tests",
1156 action="store_true", default=False)
1158 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1159 help="Type of reboot: soft/hard/full",
1160 default=constants.INSTANCE_REBOOT_HARD,
1162 choices=list(constants.REBOOT_TYPES))
1164 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1165 dest="ignore_secondaries",
1166 default=False, action="store_true",
1167 help="Ignore errors from secondaries")
1169 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1170 action="store_false", default=True,
1171 help="Don't shutdown the instance (unsafe)")
1173 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1174 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1175 help="Maximum time to wait")
1177 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1178 dest="shutdown_timeout", type="int",
1179 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1180 help="Maximum time to wait for instance shutdown")
1182 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1184 help=("Number of seconds between repetions of the"
1187 EARLY_RELEASE_OPT = cli_option("--early-release",
1188 dest="early_release", default=False,
1189 action="store_true",
1190 help="Release the locks on the secondary"
1193 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1194 dest="new_cluster_cert",
1195 default=False, action="store_true",
1196 help="Generate a new cluster certificate")
1198 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1200 help="File containing new RAPI certificate")
1202 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1203 default=None, action="store_true",
1204 help=("Generate a new self-signed RAPI"
1207 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1209 help="File containing new SPICE certificate")
1211 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1213 help="File containing the certificate of the CA"
1214 " which signed the SPICE certificate")
1216 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1217 dest="new_spice_cert", default=None,
1218 action="store_true",
1219 help=("Generate a new self-signed SPICE"
1222 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1223 dest="new_confd_hmac_key",
1224 default=False, action="store_true",
1225 help=("Create a new HMAC key for %s" %
1228 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1229 dest="cluster_domain_secret",
1231 help=("Load new new cluster domain"
1232 " secret from file"))
1234 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1235 dest="new_cluster_domain_secret",
1236 default=False, action="store_true",
1237 help=("Create a new cluster domain"
1240 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1241 dest="use_replication_network",
1242 help="Whether to use the replication network"
1243 " for talking to the nodes",
1244 action="store_true", default=False)
1246 MAINTAIN_NODE_HEALTH_OPT = \
1247 cli_option("--maintain-node-health", dest="maintain_node_health",
1248 metavar=_YORNO, default=None, type="bool",
1249 help="Configure the cluster to automatically maintain node"
1250 " health, by shutting down unknown instances, shutting down"
1251 " unknown DRBD devices, etc.")
1253 IDENTIFY_DEFAULTS_OPT = \
1254 cli_option("--identify-defaults", dest="identify_defaults",
1255 default=False, action="store_true",
1256 help="Identify which saved instance parameters are equal to"
1257 " the current cluster defaults and set them as such, instead"
1258 " of marking them as overridden")
1260 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1261 action="store", dest="uid_pool",
1262 help=("A list of user-ids or user-id"
1263 " ranges separated by commas"))
1265 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1266 action="store", dest="add_uids",
1267 help=("A list of user-ids or user-id"
1268 " ranges separated by commas, to be"
1269 " added to the user-id pool"))
1271 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1272 action="store", dest="remove_uids",
1273 help=("A list of user-ids or user-id"
1274 " ranges separated by commas, to be"
1275 " removed from the user-id pool"))
1277 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1278 action="store", dest="reserved_lvs",
1279 help=("A comma-separated list of reserved"
1280 " logical volumes names, that will be"
1281 " ignored by cluster verify"))
1283 ROMAN_OPT = cli_option("--roman",
1284 dest="roman_integers", default=False,
1285 action="store_true",
1286 help="Use roman numbers for positive integers")
1288 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1289 action="store", default=None,
1290 help="Specifies usermode helper for DRBD")
1292 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1293 action="store_false", default=True,
1294 help="Disable support for DRBD")
1296 PRIMARY_IP_VERSION_OPT = \
1297 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1298 action="store", dest="primary_ip_version",
1299 metavar="%d|%d" % (constants.IP4_VERSION,
1300 constants.IP6_VERSION),
1301 help="Cluster-wide IP version for primary IP")
1303 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1304 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1305 choices=_PRIONAME_TO_VALUE.keys(),
1306 help="Priority for opcode processing")
1308 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1309 type="bool", default=None, metavar=_YORNO,
1310 help="Sets the hidden flag on the OS")
1312 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1313 type="bool", default=None, metavar=_YORNO,
1314 help="Sets the blacklisted flag on the OS")
1316 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1317 type="bool", metavar=_YORNO,
1318 dest="prealloc_wipe_disks",
1319 help=("Wipe disks prior to instance"
1322 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1323 type="keyval", default=None,
1324 help="Node parameters")
1326 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1327 action="store", metavar="POLICY", default=None,
1328 help="Allocation policy for the node group")
1330 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1331 type="bool", metavar=_YORNO,
1332 dest="node_powered",
1333 help="Specify if the SoR for node is powered")
1335 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1336 default=constants.OOB_TIMEOUT,
1337 help="Maximum time to wait for out-of-band helper")
1339 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1340 default=constants.OOB_POWER_DELAY,
1341 help="Time in seconds to wait between power-ons")
1343 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1344 action="store_true", default=False,
1345 help=("Whether command argument should be treated"
1348 NO_REMEMBER_OPT = cli_option("--no-remember",
1350 action="store_true", default=False,
1351 help="Perform but do not record the change"
1352 " in the configuration")
1354 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1355 default=False, action="store_true",
1356 help="Evacuate primary instances only")
1358 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1359 default=False, action="store_true",
1360 help="Evacuate secondary instances only"
1361 " (applies only to internally mirrored"
1362 " disk templates, e.g. %s)" %
1363 utils.CommaJoin(constants.DTS_INT_MIRROR))
1365 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1366 action="store_true", default=False,
1367 help="Pause instance at startup")
1369 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1370 help="Destination node group (name or uuid)",
1371 default=None, action="append",
1372 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1374 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1375 action="append", dest="ignore_errors",
1376 choices=list(constants.CV_ALL_ECODES_STRINGS),
1377 help="Error code to be ignored")
1379 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1381 help=("Specify disk state information in the format"
1382 " storage_type/identifier:option=value,..."),
1385 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1387 help=("Specify hypervisor state information in the"
1388 " format hypervisor:option=value,..."),
1391 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1392 action="store_true", default=False,
1393 help="Ignore instance policy violations")
1395 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1396 help="Sets the instance's runtime memory,"
1397 " ballooning it up or down to the new value",
1398 default=None, type="unit", metavar="<size>")
1400 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1401 action="store_true", default=False,
1402 help="Marks the grow as absolute instead of the"
1403 " (default) relative mode")
1405 #: Options provided by all commands
1406 COMMON_OPTS = [DEBUG_OPT]
1408 # common options for creating instances. add and import then add their own
1410 COMMON_CREATE_OPTS = [
1415 FILESTORE_DRIVER_OPT,
1432 # common instance policy options
1433 INSTANCE_POLICY_OPTS = [
1434 SPECS_CPU_COUNT_OPT,
1435 SPECS_DISK_COUNT_OPT,
1436 SPECS_DISK_SIZE_OPT,
1438 SPECS_NIC_COUNT_OPT,
1439 IPOLICY_DISK_TEMPLATES,
1444 def _ParseArgs(argv, commands, aliases, env_override):
1445 """Parser for the command line arguments.
1447 This function parses the arguments and returns the function which
1448 must be executed together with its (modified) arguments.
1450 @param argv: the command line
1451 @param commands: dictionary with special contents, see the design
1452 doc for cmdline handling
1453 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1454 @param env_override: list of env variables allowed for default args
1457 assert not (env_override - set(commands))
1460 binary = "<command>"
1462 binary = argv[0].split("/")[-1]
1464 if len(argv) > 1 and argv[1] == "--version":
1465 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1466 constants.RELEASE_VERSION)
1467 # Quit right away. That way we don't have to care about this special
1468 # argument. optparse.py does it the same.
1471 if len(argv) < 2 or not (argv[1] in commands or
1472 argv[1] in aliases):
1473 # let's do a nice thing
1474 sortedcmds = commands.keys()
1477 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1478 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1481 # compute the max line length for cmd + usage
1482 mlen = max([len(" %s" % cmd) for cmd in commands])
1483 mlen = min(60, mlen) # should not get here...
1485 # and format a nice command list
1486 ToStdout("Commands:")
1487 for cmd in sortedcmds:
1488 cmdstr = " %s" % (cmd,)
1489 help_text = commands[cmd][4]
1490 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1491 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1492 for line in help_lines:
1493 ToStdout("%-*s %s", mlen, "", line)
1497 return None, None, None
1499 # get command, unalias it, and look it up in commands
1503 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1506 if aliases[cmd] not in commands:
1507 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1508 " command '%s'" % (cmd, aliases[cmd]))
1512 if cmd in env_override:
1513 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1514 env_args = os.environ.get(args_env_name)
1516 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1518 func, args_def, parser_opts, usage, description = commands[cmd]
1519 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1520 description=description,
1521 formatter=TitledHelpFormatter(),
1522 usage="%%prog %s %s" % (cmd, usage))
1523 parser.disable_interspersed_args()
1524 options, args = parser.parse_args(args=argv[1:])
1526 if not _CheckArguments(cmd, args_def, args):
1527 return None, None, None
1529 return func, options, args
1532 def _CheckArguments(cmd, args_def, args):
1533 """Verifies the arguments using the argument definition.
1537 1. Abort with error if values specified by user but none expected.
1539 1. For each argument in definition
1541 1. Keep running count of minimum number of values (min_count)
1542 1. Keep running count of maximum number of values (max_count)
1543 1. If it has an unlimited number of values
1545 1. Abort with error if it's not the last argument in the definition
1547 1. If last argument has limited number of values
1549 1. Abort with error if number of values doesn't match or is too large
1551 1. Abort with error if user didn't pass enough values (min_count)
1554 if args and not args_def:
1555 ToStderr("Error: Command %s expects no arguments", cmd)
1562 last_idx = len(args_def) - 1
1564 for idx, arg in enumerate(args_def):
1565 if min_count is None:
1567 elif arg.min is not None:
1568 min_count += arg.min
1570 if max_count is None:
1572 elif arg.max is not None:
1573 max_count += arg.max
1576 check_max = (arg.max is not None)
1578 elif arg.max is None:
1579 raise errors.ProgrammerError("Only the last argument can have max=None")
1582 # Command with exact number of arguments
1583 if (min_count is not None and max_count is not None and
1584 min_count == max_count and len(args) != min_count):
1585 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1588 # Command with limited number of arguments
1589 if max_count is not None and len(args) > max_count:
1590 ToStderr("Error: Command %s expects only %d argument(s)",
1594 # Command with some required arguments
1595 if min_count is not None and len(args) < min_count:
1596 ToStderr("Error: Command %s expects at least %d argument(s)",
1603 def SplitNodeOption(value):
1604 """Splits the value of a --node option.
1607 if value and ":" in value:
1608 return value.split(":", 1)
1610 return (value, None)
1613 def CalculateOSNames(os_name, os_variants):
1614 """Calculates all the names an OS can be called, according to its variants.
1616 @type os_name: string
1617 @param os_name: base name of the os
1618 @type os_variants: list or None
1619 @param os_variants: list of supported variants
1621 @return: list of valid names
1625 return ["%s+%s" % (os_name, v) for v in os_variants]
1630 def ParseFields(selected, default):
1631 """Parses the values of "--field"-like options.
1633 @type selected: string or None
1634 @param selected: User-selected options
1636 @param default: Default fields
1639 if selected is None:
1642 if selected.startswith("+"):
1643 return default + selected[1:].split(",")
1645 return selected.split(",")
1648 UsesRPC = rpc.RunWithRPC
1651 def AskUser(text, choices=None):
1652 """Ask the user a question.
1654 @param text: the question to ask
1656 @param choices: list with elements tuples (input_char, return_value,
1657 description); if not given, it will default to: [('y', True,
1658 'Perform the operation'), ('n', False, 'Do no do the operation')];
1659 note that the '?' char is reserved for help
1661 @return: one of the return values from the choices list; if input is
1662 not possible (i.e. not running with a tty, we return the last
1667 choices = [("y", True, "Perform the operation"),
1668 ("n", False, "Do not perform the operation")]
1669 if not choices or not isinstance(choices, list):
1670 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1671 for entry in choices:
1672 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1673 raise errors.ProgrammerError("Invalid choices element to AskUser")
1675 answer = choices[-1][1]
1677 for line in text.splitlines():
1678 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1679 text = "\n".join(new_text)
1681 f = file("/dev/tty", "a+")
1685 chars = [entry[0] for entry in choices]
1686 chars[-1] = "[%s]" % chars[-1]
1688 maps = dict([(entry[0], entry[1]) for entry in choices])
1692 f.write("/".join(chars))
1694 line = f.readline(2).strip().lower()
1699 for entry in choices:
1700 f.write(" %s - %s\n" % (entry[0], entry[2]))
1708 class JobSubmittedException(Exception):
1709 """Job was submitted, client should exit.
1711 This exception has one argument, the ID of the job that was
1712 submitted. The handler should print this ID.
1714 This is not an error, just a structured way to exit from clients.
1719 def SendJob(ops, cl=None):
1720 """Function to submit an opcode without waiting for the results.
1723 @param ops: list of opcodes
1724 @type cl: luxi.Client
1725 @param cl: the luxi client to use for communicating with the master;
1726 if None, a new client will be created
1732 job_id = cl.SubmitJob(ops)
1737 def GenericPollJob(job_id, cbs, report_cbs):
1738 """Generic job-polling function.
1740 @type job_id: number
1741 @param job_id: Job ID
1742 @type cbs: Instance of L{JobPollCbBase}
1743 @param cbs: Data callbacks
1744 @type report_cbs: Instance of L{JobPollReportCbBase}
1745 @param report_cbs: Reporting callbacks
1748 prev_job_info = None
1749 prev_logmsg_serial = None
1754 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1757 # job not found, go away!
1758 raise errors.JobLost("Job with id %s lost" % job_id)
1760 if result == constants.JOB_NOTCHANGED:
1761 report_cbs.ReportNotChanged(job_id, status)
1766 # Split result, a tuple of (field values, log entries)
1767 (job_info, log_entries) = result
1768 (status, ) = job_info
1771 for log_entry in log_entries:
1772 (serial, timestamp, log_type, message) = log_entry
1773 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1775 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1777 # TODO: Handle canceled and archived jobs
1778 elif status in (constants.JOB_STATUS_SUCCESS,
1779 constants.JOB_STATUS_ERROR,
1780 constants.JOB_STATUS_CANCELING,
1781 constants.JOB_STATUS_CANCELED):
1784 prev_job_info = job_info
1786 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1788 raise errors.JobLost("Job with id %s lost" % job_id)
1790 status, opstatus, result = jobs[0]
1792 if status == constants.JOB_STATUS_SUCCESS:
1795 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1796 raise errors.OpExecError("Job was canceled")
1799 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1800 if status == constants.OP_STATUS_SUCCESS:
1802 elif status == constants.OP_STATUS_ERROR:
1803 errors.MaybeRaise(msg)
1806 raise errors.OpExecError("partial failure (opcode %d): %s" %
1809 raise errors.OpExecError(str(msg))
1811 # default failure mode
1812 raise errors.OpExecError(result)
1815 class JobPollCbBase:
1816 """Base class for L{GenericPollJob} callbacks.
1820 """Initializes this class.
1824 def WaitForJobChangeOnce(self, job_id, fields,
1825 prev_job_info, prev_log_serial):
1826 """Waits for changes on a job.
1829 raise NotImplementedError()
1831 def QueryJobs(self, job_ids, fields):
1832 """Returns the selected fields for the selected job IDs.
1834 @type job_ids: list of numbers
1835 @param job_ids: Job IDs
1836 @type fields: list of strings
1837 @param fields: Fields
1840 raise NotImplementedError()
1843 class JobPollReportCbBase:
1844 """Base class for L{GenericPollJob} reporting callbacks.
1848 """Initializes this class.
1852 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1853 """Handles a log message.
1856 raise NotImplementedError()
1858 def ReportNotChanged(self, job_id, status):
1859 """Called for if a job hasn't changed in a while.
1861 @type job_id: number
1862 @param job_id: Job ID
1863 @type status: string or None
1864 @param status: Job status if available
1867 raise NotImplementedError()
1870 class _LuxiJobPollCb(JobPollCbBase):
1871 def __init__(self, cl):
1872 """Initializes this class.
1875 JobPollCbBase.__init__(self)
1878 def WaitForJobChangeOnce(self, job_id, fields,
1879 prev_job_info, prev_log_serial):
1880 """Waits for changes on a job.
1883 return self.cl.WaitForJobChangeOnce(job_id, fields,
1884 prev_job_info, prev_log_serial)
1886 def QueryJobs(self, job_ids, fields):
1887 """Returns the selected fields for the selected job IDs.
1890 return self.cl.QueryJobs(job_ids, fields)
1893 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1894 def __init__(self, feedback_fn):
1895 """Initializes this class.
1898 JobPollReportCbBase.__init__(self)
1900 self.feedback_fn = feedback_fn
1902 assert callable(feedback_fn)
1904 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1905 """Handles a log message.
1908 self.feedback_fn((timestamp, log_type, log_msg))
1910 def ReportNotChanged(self, job_id, status):
1911 """Called if a job hasn't changed in a while.
1917 class StdioJobPollReportCb(JobPollReportCbBase):
1919 """Initializes this class.
1922 JobPollReportCbBase.__init__(self)
1924 self.notified_queued = False
1925 self.notified_waitlock = False
1927 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1928 """Handles a log message.
1931 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1932 FormatLogMessage(log_type, log_msg))
1934 def ReportNotChanged(self, job_id, status):
1935 """Called if a job hasn't changed in a while.
1941 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1942 ToStderr("Job %s is waiting in queue", job_id)
1943 self.notified_queued = True
1945 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1946 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1947 self.notified_waitlock = True
1950 def FormatLogMessage(log_type, log_msg):
1951 """Formats a job message according to its type.
1954 if log_type != constants.ELOG_MESSAGE:
1955 log_msg = str(log_msg)
1957 return utils.SafeEncode(log_msg)
1960 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1961 """Function to poll for the result of a job.
1963 @type job_id: job identified
1964 @param job_id: the job to poll for results
1965 @type cl: luxi.Client
1966 @param cl: the luxi client to use for communicating with the master;
1967 if None, a new client will be created
1973 if reporter is None:
1975 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1977 reporter = StdioJobPollReportCb()
1979 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1981 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1984 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1985 """Legacy function to submit an opcode.
1987 This is just a simple wrapper over the construction of the processor
1988 instance. It should be extended to better handle feedback and
1989 interaction functions.
1995 SetGenericOpcodeOpts([op], opts)
1997 job_id = SendJob([op], cl=cl)
1999 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2002 return op_results[0]
2005 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2006 """Wrapper around SubmitOpCode or SendJob.
2008 This function will decide, based on the 'opts' parameter, whether to
2009 submit and wait for the result of the opcode (and return it), or
2010 whether to just send the job and print its identifier. It is used in
2011 order to simplify the implementation of the '--submit' option.
2013 It will also process the opcodes if we're sending the via SendJob
2014 (otherwise SubmitOpCode does it).
2017 if opts and opts.submit_only:
2019 SetGenericOpcodeOpts(job, opts)
2020 job_id = SendJob(job, cl=cl)
2021 raise JobSubmittedException(job_id)
2023 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2026 def SetGenericOpcodeOpts(opcode_list, options):
2027 """Processor for generic options.
2029 This function updates the given opcodes based on generic command
2030 line options (like debug, dry-run, etc.).
2032 @param opcode_list: list of opcodes
2033 @param options: command line options or None
2034 @return: None (in-place modification)
2039 for op in opcode_list:
2040 op.debug_level = options.debug
2041 if hasattr(options, "dry_run"):
2042 op.dry_run = options.dry_run
2043 if getattr(options, "priority", None) is not None:
2044 op.priority = _PRIONAME_TO_VALUE[options.priority]
2048 # TODO: Cache object?
2050 client = luxi.Client()
2051 except luxi.NoMasterError:
2052 ss = ssconf.SimpleStore()
2054 # Try to read ssconf file
2057 except errors.ConfigurationError:
2058 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2059 " not part of a cluster")
2061 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2062 if master != myself:
2063 raise errors.OpPrereqError("This is not the master node, please connect"
2064 " to node '%s' and rerun the command" %
2070 def FormatError(err):
2071 """Return a formatted error message for a given error.
2073 This function takes an exception instance and returns a tuple
2074 consisting of two values: first, the recommended exit code, and
2075 second, a string describing the error message (not
2076 newline-terminated).
2082 if isinstance(err, errors.ConfigurationError):
2083 txt = "Corrupt configuration file: %s" % msg
2085 obuf.write(txt + "\n")
2086 obuf.write("Aborting.")
2088 elif isinstance(err, errors.HooksAbort):
2089 obuf.write("Failure: hooks execution failed:\n")
2090 for node, script, out in err.args[0]:
2092 obuf.write(" node: %s, script: %s, output: %s\n" %
2093 (node, script, out))
2095 obuf.write(" node: %s, script: %s (no output)\n" %
2097 elif isinstance(err, errors.HooksFailure):
2098 obuf.write("Failure: hooks general failure: %s" % msg)
2099 elif isinstance(err, errors.ResolverError):
2100 this_host = netutils.Hostname.GetSysName()
2101 if err.args[0] == this_host:
2102 msg = "Failure: can't resolve my own hostname ('%s')"
2104 msg = "Failure: can't resolve hostname '%s'"
2105 obuf.write(msg % err.args[0])
2106 elif isinstance(err, errors.OpPrereqError):
2107 if len(err.args) == 2:
2108 obuf.write("Failure: prerequisites not met for this"
2109 " operation:\nerror type: %s, error details:\n%s" %
2110 (err.args[1], err.args[0]))
2112 obuf.write("Failure: prerequisites not met for this"
2113 " operation:\n%s" % msg)
2114 elif isinstance(err, errors.OpExecError):
2115 obuf.write("Failure: command execution error:\n%s" % msg)
2116 elif isinstance(err, errors.TagError):
2117 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2118 elif isinstance(err, errors.JobQueueDrainError):
2119 obuf.write("Failure: the job queue is marked for drain and doesn't"
2120 " accept new requests\n")
2121 elif isinstance(err, errors.JobQueueFull):
2122 obuf.write("Failure: the job queue is full and doesn't accept new"
2123 " job submissions until old jobs are archived\n")
2124 elif isinstance(err, errors.TypeEnforcementError):
2125 obuf.write("Parameter Error: %s" % msg)
2126 elif isinstance(err, errors.ParameterError):
2127 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2128 elif isinstance(err, luxi.NoMasterError):
2129 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2130 " and listening for connections?")
2131 elif isinstance(err, luxi.TimeoutError):
2132 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2133 " been submitted and will continue to run even if the call"
2134 " timed out. Useful commands in this situation are \"gnt-job"
2135 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2137 elif isinstance(err, luxi.PermissionError):
2138 obuf.write("It seems you don't have permissions to connect to the"
2139 " master daemon.\nPlease retry as a different user.")
2140 elif isinstance(err, luxi.ProtocolError):
2141 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2143 elif isinstance(err, errors.JobLost):
2144 obuf.write("Error checking job status: %s" % msg)
2145 elif isinstance(err, errors.QueryFilterParseError):
2146 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2147 obuf.write("\n".join(err.GetDetails()))
2148 elif isinstance(err, errors.GenericError):
2149 obuf.write("Unhandled Ganeti error: %s" % msg)
2150 elif isinstance(err, JobSubmittedException):
2151 obuf.write("JobID: %s\n" % err.args[0])
2154 obuf.write("Unhandled exception: %s" % msg)
2155 return retcode, obuf.getvalue().rstrip("\n")
2158 def GenericMain(commands, override=None, aliases=None,
2159 env_override=frozenset()):
2160 """Generic main function for all the gnt-* commands.
2162 @param commands: a dictionary with a special structure, see the design doc
2163 for command line handling.
2164 @param override: if not None, we expect a dictionary with keys that will
2165 override command line options; this can be used to pass
2166 options from the scripts to generic functions
2167 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2168 @param env_override: list of environment names which are allowed to submit
2169 default args for commands
2172 # save the program name and the entire command line for later logging
2174 binary = os.path.basename(sys.argv[0])
2176 binary = sys.argv[0]
2178 if len(sys.argv) >= 2:
2179 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2183 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2185 binary = "<unknown program>"
2186 cmdline = "<unknown>"
2192 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2193 except errors.ParameterError, err:
2194 result, err_msg = FormatError(err)
2198 if func is None: # parse error
2201 if override is not None:
2202 for key, val in override.iteritems():
2203 setattr(options, key, val)
2205 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2206 stderr_logging=True)
2208 logging.info("Command line: %s", cmdline)
2211 result = func(options, args)
2212 except (errors.GenericError, luxi.ProtocolError,
2213 JobSubmittedException), err:
2214 result, err_msg = FormatError(err)
2215 logging.exception("Error during command processing")
2217 except KeyboardInterrupt:
2218 result = constants.EXIT_FAILURE
2219 ToStderr("Aborted. Note that if the operation created any jobs, they"
2220 " might have been submitted and"
2221 " will continue to run in the background.")
2222 except IOError, err:
2223 if err.errno == errno.EPIPE:
2224 # our terminal went away, we'll exit
2225 sys.exit(constants.EXIT_FAILURE)
2232 def ParseNicOption(optvalue):
2233 """Parses the value of the --net option(s).
2237 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2238 except (TypeError, ValueError), err:
2239 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2241 nics = [{}] * nic_max
2242 for nidx, ndict in optvalue:
2245 if not isinstance(ndict, dict):
2246 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2247 " got %s" % (nidx, ndict))
2249 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2256 def GenericInstanceCreate(mode, opts, args):
2257 """Add an instance to the cluster via either creation or import.
2259 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2260 @param opts: the command line options selected by the user
2262 @param args: should contain only one element, the new instance name
2264 @return: the desired exit code
2269 (pnode, snode) = SplitNodeOption(opts.node)
2274 hypervisor, hvparams = opts.hypervisor
2277 nics = ParseNicOption(opts.nics)
2281 elif mode == constants.INSTANCE_CREATE:
2282 # default of one nic, all auto
2288 if opts.disk_template == constants.DT_DISKLESS:
2289 if opts.disks or opts.sd_size is not None:
2290 raise errors.OpPrereqError("Diskless instance but disk"
2291 " information passed")
2294 if (not opts.disks and not opts.sd_size
2295 and mode == constants.INSTANCE_CREATE):
2296 raise errors.OpPrereqError("No disk information specified")
2297 if opts.disks and opts.sd_size is not None:
2298 raise errors.OpPrereqError("Please use either the '--disk' or"
2300 if opts.sd_size is not None:
2301 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2305 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2306 except ValueError, err:
2307 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2308 disks = [{}] * disk_max
2311 for didx, ddict in opts.disks:
2313 if not isinstance(ddict, dict):
2314 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2315 raise errors.OpPrereqError(msg)
2316 elif constants.IDISK_SIZE in ddict:
2317 if constants.IDISK_ADOPT in ddict:
2318 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2319 " (disk %d)" % didx)
2321 ddict[constants.IDISK_SIZE] = \
2322 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2323 except ValueError, err:
2324 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2326 elif constants.IDISK_ADOPT in ddict:
2327 if mode == constants.INSTANCE_IMPORT:
2328 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2330 ddict[constants.IDISK_SIZE] = 0
2332 raise errors.OpPrereqError("Missing size or adoption source for"
2336 if opts.tags is not None:
2337 tags = opts.tags.split(",")
2341 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2342 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2344 if mode == constants.INSTANCE_CREATE:
2347 force_variant = opts.force_variant
2350 no_install = opts.no_install
2351 identify_defaults = False
2352 elif mode == constants.INSTANCE_IMPORT:
2355 force_variant = False
2356 src_node = opts.src_node
2357 src_path = opts.src_dir
2359 identify_defaults = opts.identify_defaults
2361 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2363 op = opcodes.OpInstanceCreate(instance_name=instance,
2365 disk_template=opts.disk_template,
2367 pnode=pnode, snode=snode,
2368 ip_check=opts.ip_check,
2369 name_check=opts.name_check,
2370 wait_for_sync=opts.wait_for_sync,
2371 file_storage_dir=opts.file_storage_dir,
2372 file_driver=opts.file_driver,
2373 iallocator=opts.iallocator,
2374 hypervisor=hypervisor,
2376 beparams=opts.beparams,
2377 osparams=opts.osparams,
2381 force_variant=force_variant,
2385 no_install=no_install,
2386 identify_defaults=identify_defaults,
2387 ignore_ipolicy=opts.ignore_ipolicy)
2389 SubmitOrSend(op, opts)
2393 class _RunWhileClusterStoppedHelper:
2394 """Helper class for L{RunWhileClusterStopped} to simplify state management
2397 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2398 """Initializes this class.
2400 @type feedback_fn: callable
2401 @param feedback_fn: Feedback function
2402 @type cluster_name: string
2403 @param cluster_name: Cluster name
2404 @type master_node: string
2405 @param master_node Master node name
2406 @type online_nodes: list
2407 @param online_nodes: List of names of online nodes
2410 self.feedback_fn = feedback_fn
2411 self.cluster_name = cluster_name
2412 self.master_node = master_node
2413 self.online_nodes = online_nodes
2415 self.ssh = ssh.SshRunner(self.cluster_name)
2417 self.nonmaster_nodes = [name for name in online_nodes
2418 if name != master_node]
2420 assert self.master_node not in self.nonmaster_nodes
2422 def _RunCmd(self, node_name, cmd):
2423 """Runs a command on the local or a remote machine.
2425 @type node_name: string
2426 @param node_name: Machine name
2431 if node_name is None or node_name == self.master_node:
2432 # No need to use SSH
2433 result = utils.RunCmd(cmd)
2435 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2438 errmsg = ["Failed to run command %s" % result.cmd]
2440 errmsg.append("on node %s" % node_name)
2441 errmsg.append(": exitcode %s and error %s" %
2442 (result.exit_code, result.output))
2443 raise errors.OpExecError(" ".join(errmsg))
2445 def Call(self, fn, *args):
2446 """Call function while all daemons are stopped.
2449 @param fn: Function to be called
2452 # Pause watcher by acquiring an exclusive lock on watcher state file
2453 self.feedback_fn("Blocking watcher")
2454 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2456 # TODO: Currently, this just blocks. There's no timeout.
2457 # TODO: Should it be a shared lock?
2458 watcher_block.Exclusive(blocking=True)
2460 # Stop master daemons, so that no new jobs can come in and all running
2462 self.feedback_fn("Stopping master daemons")
2463 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2465 # Stop daemons on all nodes
2466 for node_name in self.online_nodes:
2467 self.feedback_fn("Stopping daemons on %s" % node_name)
2468 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2470 # All daemons are shut down now
2472 return fn(self, *args)
2473 except Exception, err:
2474 _, errmsg = FormatError(err)
2475 logging.exception("Caught exception")
2476 self.feedback_fn(errmsg)
2479 # Start cluster again, master node last
2480 for node_name in self.nonmaster_nodes + [self.master_node]:
2481 self.feedback_fn("Starting daemons on %s" % node_name)
2482 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2485 watcher_block.Close()
2488 def RunWhileClusterStopped(feedback_fn, fn, *args):
2489 """Calls a function while all cluster daemons are stopped.
2491 @type feedback_fn: callable
2492 @param feedback_fn: Feedback function
2494 @param fn: Function to be called when daemons are stopped
2497 feedback_fn("Gathering cluster information")
2499 # This ensures we're running on the master daemon
2502 (cluster_name, master_node) = \
2503 cl.QueryConfigValues(["cluster_name", "master_node"])
2505 online_nodes = GetOnlineNodes([], cl=cl)
2507 # Don't keep a reference to the client. The master daemon will go away.
2510 assert master_node in online_nodes
2512 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2513 online_nodes).Call(fn, *args)
2516 def GenerateTable(headers, fields, separator, data,
2517 numfields=None, unitfields=None,
2519 """Prints a table with headers and different fields.
2522 @param headers: dictionary mapping field names to headers for
2525 @param fields: the field names corresponding to each row in
2527 @param separator: the separator to be used; if this is None,
2528 the default 'smart' algorithm is used which computes optimal
2529 field width, otherwise just the separator is used between
2532 @param data: a list of lists, each sublist being one row to be output
2533 @type numfields: list
2534 @param numfields: a list with the fields that hold numeric
2535 values and thus should be right-aligned
2536 @type unitfields: list
2537 @param unitfields: a list with the fields that hold numeric
2538 values that should be formatted with the units field
2539 @type units: string or None
2540 @param units: the units we should use for formatting, or None for
2541 automatic choice (human-readable for non-separator usage, otherwise
2542 megabytes); this is a one-letter string
2551 if numfields is None:
2553 if unitfields is None:
2556 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2557 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2560 for field in fields:
2561 if headers and field not in headers:
2562 # TODO: handle better unknown fields (either revert to old
2563 # style of raising exception, or deal more intelligently with
2565 headers[field] = field
2566 if separator is not None:
2567 format_fields.append("%s")
2568 elif numfields.Matches(field):
2569 format_fields.append("%*s")
2571 format_fields.append("%-*s")
2573 if separator is None:
2574 mlens = [0 for name in fields]
2575 format_str = " ".join(format_fields)
2577 format_str = separator.replace("%", "%%").join(format_fields)
2582 for idx, val in enumerate(row):
2583 if unitfields.Matches(fields[idx]):
2586 except (TypeError, ValueError):
2589 val = row[idx] = utils.FormatUnit(val, units)
2590 val = row[idx] = str(val)
2591 if separator is None:
2592 mlens[idx] = max(mlens[idx], len(val))
2597 for idx, name in enumerate(fields):
2599 if separator is None:
2600 mlens[idx] = max(mlens[idx], len(hdr))
2601 args.append(mlens[idx])
2603 result.append(format_str % tuple(args))
2605 if separator is None:
2606 assert len(mlens) == len(fields)
2608 if fields and not numfields.Matches(fields[-1]):
2614 line = ["-" for _ in fields]
2615 for idx in range(len(fields)):
2616 if separator is None:
2617 args.append(mlens[idx])
2618 args.append(line[idx])
2619 result.append(format_str % tuple(args))
2624 def _FormatBool(value):
2625 """Formats a boolean value as a string.
2633 #: Default formatting for query results; (callback, align right)
2634 _DEFAULT_FORMAT_QUERY = {
2635 constants.QFT_TEXT: (str, False),
2636 constants.QFT_BOOL: (_FormatBool, False),
2637 constants.QFT_NUMBER: (str, True),
2638 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2639 constants.QFT_OTHER: (str, False),
2640 constants.QFT_UNKNOWN: (str, False),
2644 def _GetColumnFormatter(fdef, override, unit):
2645 """Returns formatting function for a field.
2647 @type fdef: L{objects.QueryFieldDefinition}
2648 @type override: dict
2649 @param override: Dictionary for overriding field formatting functions,
2650 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2652 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2653 @rtype: tuple; (callable, bool)
2654 @return: Returns the function to format a value (takes one parameter) and a
2655 boolean for aligning the value on the right-hand side
2658 fmt = override.get(fdef.name, None)
2662 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2664 if fdef.kind == constants.QFT_UNIT:
2665 # Can't keep this information in the static dictionary
2666 return (lambda value: utils.FormatUnit(value, unit), True)
2668 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2672 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2675 class _QueryColumnFormatter:
2676 """Callable class for formatting fields of a query.
2679 def __init__(self, fn, status_fn, verbose):
2680 """Initializes this class.
2683 @param fn: Formatting function
2684 @type status_fn: callable
2685 @param status_fn: Function to report fields' status
2686 @type verbose: boolean
2687 @param verbose: whether to use verbose field descriptions or not
2691 self._status_fn = status_fn
2692 self._verbose = verbose
2694 def __call__(self, data):
2695 """Returns a field's string representation.
2698 (status, value) = data
2701 self._status_fn(status)
2703 if status == constants.RS_NORMAL:
2704 return self._fn(value)
2706 assert value is None, \
2707 "Found value %r for abnormal status %s" % (value, status)
2709 return FormatResultError(status, self._verbose)
2712 def FormatResultError(status, verbose):
2713 """Formats result status other than L{constants.RS_NORMAL}.
2715 @param status: The result status
2716 @type verbose: boolean
2717 @param verbose: Whether to return the verbose text
2718 @return: Text of result status
2721 assert status != constants.RS_NORMAL, \
2722 "FormatResultError called with status equal to constants.RS_NORMAL"
2724 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2726 raise NotImplementedError("Unknown status %s" % status)
2733 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2734 header=False, verbose=False):
2735 """Formats data in L{objects.QueryResponse}.
2737 @type result: L{objects.QueryResponse}
2738 @param result: result of query operation
2740 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2741 see L{utils.text.FormatUnit}
2742 @type format_override: dict
2743 @param format_override: Dictionary for overriding field formatting functions,
2744 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2745 @type separator: string or None
2746 @param separator: String used to separate fields
2748 @param header: Whether to output header row
2749 @type verbose: boolean
2750 @param verbose: whether to use verbose field descriptions or not
2759 if format_override is None:
2760 format_override = {}
2762 stats = dict.fromkeys(constants.RS_ALL, 0)
2764 def _RecordStatus(status):
2769 for fdef in result.fields:
2770 assert fdef.title and fdef.name
2771 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2772 columns.append(TableColumn(fdef.title,
2773 _QueryColumnFormatter(fn, _RecordStatus,
2777 table = FormatTable(result.data, columns, header, separator)
2779 # Collect statistics
2780 assert len(stats) == len(constants.RS_ALL)
2781 assert compat.all(count >= 0 for count in stats.values())
2783 # Determine overall status. If there was no data, unknown fields must be
2784 # detected via the field definitions.
2785 if (stats[constants.RS_UNKNOWN] or
2786 (not result.data and _GetUnknownFields(result.fields))):
2788 elif compat.any(count > 0 for key, count in stats.items()
2789 if key != constants.RS_NORMAL):
2790 status = QR_INCOMPLETE
2794 return (status, table)
2797 def _GetUnknownFields(fdefs):
2798 """Returns list of unknown fields included in C{fdefs}.
2800 @type fdefs: list of L{objects.QueryFieldDefinition}
2803 return [fdef for fdef in fdefs
2804 if fdef.kind == constants.QFT_UNKNOWN]
2807 def _WarnUnknownFields(fdefs):
2808 """Prints a warning to stderr if a query included unknown fields.
2810 @type fdefs: list of L{objects.QueryFieldDefinition}
2813 unknown = _GetUnknownFields(fdefs)
2815 ToStderr("Warning: Queried for unknown fields %s",
2816 utils.CommaJoin(fdef.name for fdef in unknown))
2822 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2823 format_override=None, verbose=False, force_filter=False,
2824 namefield=None, qfilter=None):
2825 """Generic implementation for listing all items of a resource.
2827 @param resource: One of L{constants.QR_VIA_LUXI}
2828 @type fields: list of strings
2829 @param fields: List of fields to query for
2830 @type names: list of strings
2831 @param names: Names of items to query for
2832 @type unit: string or None
2833 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2834 None for automatic choice (human-readable for non-separator usage,
2835 otherwise megabytes); this is a one-letter string
2836 @type separator: string or None
2837 @param separator: String used to separate fields
2839 @param header: Whether to show header row
2840 @type force_filter: bool
2841 @param force_filter: Whether to always treat names as filter
2842 @type format_override: dict
2843 @param format_override: Dictionary for overriding field formatting functions,
2844 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2845 @type verbose: boolean
2846 @param verbose: whether to use verbose field descriptions or not
2847 @type namefield: string
2848 @param namefield: Name of field to use for simple filters (see
2849 L{qlang.MakeFilter} for details)
2850 @type qfilter: list or None
2851 @param qfilter: Query filter (in addition to names)
2857 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2860 qfilter = namefilter
2861 elif namefilter is not None:
2862 qfilter = [qlang.OP_AND, namefilter, qfilter]
2867 response = cl.Query(resource, fields, qfilter)
2869 found_unknown = _WarnUnknownFields(response.fields)
2871 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2873 format_override=format_override,
2879 assert ((found_unknown and status == QR_UNKNOWN) or
2880 (not found_unknown and status != QR_UNKNOWN))
2882 if status == QR_UNKNOWN:
2883 return constants.EXIT_UNKNOWN_FIELD
2885 # TODO: Should the list command fail if not all data could be collected?
2886 return constants.EXIT_SUCCESS
2889 def GenericListFields(resource, fields, separator, header, cl=None):
2890 """Generic implementation for listing fields for a resource.
2892 @param resource: One of L{constants.QR_VIA_LUXI}
2893 @type fields: list of strings
2894 @param fields: List of fields to query for
2895 @type separator: string or None
2896 @param separator: String used to separate fields
2898 @param header: Whether to show header row
2907 response = cl.QueryFields(resource, fields)
2909 found_unknown = _WarnUnknownFields(response.fields)
2912 TableColumn("Name", str, False),
2913 TableColumn("Title", str, False),
2914 TableColumn("Description", str, False),
2917 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2919 for line in FormatTable(rows, columns, header, separator):
2923 return constants.EXIT_UNKNOWN_FIELD
2925 return constants.EXIT_SUCCESS
2929 """Describes a column for L{FormatTable}.
2932 def __init__(self, title, fn, align_right):
2933 """Initializes this class.
2936 @param title: Column title
2938 @param fn: Formatting function
2939 @type align_right: bool
2940 @param align_right: Whether to align values on the right-hand side
2945 self.align_right = align_right
2948 def _GetColFormatString(width, align_right):
2949 """Returns the format string for a field.
2957 return "%%%s%ss" % (sign, width)
2960 def FormatTable(rows, columns, header, separator):
2961 """Formats data as a table.
2963 @type rows: list of lists
2964 @param rows: Row data, one list per row
2965 @type columns: list of L{TableColumn}
2966 @param columns: Column descriptions
2968 @param header: Whether to show header row
2969 @type separator: string or None
2970 @param separator: String used to separate columns
2974 data = [[col.title for col in columns]]
2975 colwidth = [len(col.title) for col in columns]
2978 colwidth = [0 for _ in columns]
2982 assert len(row) == len(columns)
2984 formatted = [col.format(value) for value, col in zip(row, columns)]
2986 if separator is None:
2987 # Update column widths
2988 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2989 # Modifying a list's items while iterating is fine
2990 colwidth[idx] = max(oldwidth, len(value))
2992 data.append(formatted)
2994 if separator is not None:
2995 # Return early if a separator is used
2996 return [separator.join(row) for row in data]
2998 if columns and not columns[-1].align_right:
2999 # Avoid unnecessary spaces at end of line
3002 # Build format string
3003 fmt = " ".join([_GetColFormatString(width, col.align_right)
3004 for col, width in zip(columns, colwidth)])
3006 return [fmt % tuple(row) for row in data]
3009 def FormatTimestamp(ts):
3010 """Formats a given timestamp.
3013 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3016 @return: a string with the formatted timestamp
3019 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3023 return utils.FormatTime(sec, usecs=usecs)
3026 def ParseTimespec(value):
3027 """Parse a time specification.
3029 The following suffixed will be recognized:
3037 Without any suffix, the value will be taken to be in seconds.
3042 raise errors.OpPrereqError("Empty time specification passed")
3050 if value[-1] not in suffix_map:
3053 except (TypeError, ValueError):
3054 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3056 multiplier = suffix_map[value[-1]]
3058 if not value: # no data left after stripping the suffix
3059 raise errors.OpPrereqError("Invalid time specification (only"
3062 value = int(value) * multiplier
3063 except (TypeError, ValueError):
3064 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3068 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3069 filter_master=False, nodegroup=None):
3070 """Returns the names of online nodes.
3072 This function will also log a warning on stderr with the names of
3075 @param nodes: if not empty, use only this subset of nodes (minus the
3077 @param cl: if not None, luxi client to use
3078 @type nowarn: boolean
3079 @param nowarn: by default, this function will output a note with the
3080 offline nodes that are skipped; if this parameter is True the
3081 note is not displayed
3082 @type secondary_ips: boolean
3083 @param secondary_ips: if True, return the secondary IPs instead of the
3084 names, useful for doing network traffic over the replication interface
3086 @type filter_master: boolean
3087 @param filter_master: if True, do not return the master node in the list
3088 (useful in coordination with secondary_ips where we cannot check our
3089 node name against the list)
3090 @type nodegroup: string
3091 @param nodegroup: If set, only return nodes in this node group
3100 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3102 if nodegroup is not None:
3103 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3104 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3107 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3110 if len(qfilter) > 1:
3111 final_filter = [qlang.OP_AND] + qfilter
3113 assert len(qfilter) == 1
3114 final_filter = qfilter[0]
3118 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3120 def _IsOffline(row):
3121 (_, (_, offline), _) = row
3125 ((_, name), _, _) = row
3129 (_, _, (_, sip)) = row
3132 (offline, online) = compat.partition(result.data, _IsOffline)
3134 if offline and not nowarn:
3135 ToStderr("Note: skipping offline node(s): %s" %
3136 utils.CommaJoin(map(_GetName, offline)))
3143 return map(fn, online)
3146 def _ToStream(stream, txt, *args):
3147 """Write a message to a stream, bypassing the logging system
3149 @type stream: file object
3150 @param stream: the file to which we should write
3152 @param txt: the message
3158 stream.write(txt % args)
3163 except IOError, err:
3164 if err.errno == errno.EPIPE:
3165 # our terminal went away, we'll exit
3166 sys.exit(constants.EXIT_FAILURE)
3171 def ToStdout(txt, *args):
3172 """Write a message to stdout only, bypassing the logging system
3174 This is just a wrapper over _ToStream.
3177 @param txt: the message
3180 _ToStream(sys.stdout, txt, *args)
3183 def ToStderr(txt, *args):
3184 """Write a message to stderr only, bypassing the logging system
3186 This is just a wrapper over _ToStream.
3189 @param txt: the message
3192 _ToStream(sys.stderr, txt, *args)
3195 class JobExecutor(object):
3196 """Class which manages the submission and execution of multiple jobs.
3198 Note that instances of this class should not be reused between
3202 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3207 self.verbose = verbose
3210 self.feedback_fn = feedback_fn
3211 self._counter = itertools.count()
3214 def _IfName(name, fmt):
3215 """Helper function for formatting name.
3223 def QueueJob(self, name, *ops):
3224 """Record a job for later submit.
3227 @param name: a description of the job, will be used in WaitJobSet
3230 SetGenericOpcodeOpts(ops, self.opts)
3231 self.queue.append((self._counter.next(), name, ops))
3233 def AddJobId(self, name, status, job_id):
3234 """Adds a job ID to the internal queue.
3237 self.jobs.append((self._counter.next(), status, job_id, name))
3239 def SubmitPending(self, each=False):
3240 """Submit all pending jobs.
3245 for (_, _, ops) in self.queue:
3246 # SubmitJob will remove the success status, but raise an exception if
3247 # the submission fails, so we'll notice that anyway.
3248 results.append([True, self.cl.SubmitJob(ops)[0]])
3250 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3251 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3252 self.jobs.append((idx, status, data, name))
3254 def _ChooseJob(self):
3255 """Choose a non-waiting/queued job to poll next.
3258 assert self.jobs, "_ChooseJob called with empty job list"
3260 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3264 for job_data, status in zip(self.jobs, result):
3265 if (isinstance(status, list) and status and
3266 status[0] in (constants.JOB_STATUS_QUEUED,
3267 constants.JOB_STATUS_WAITING,
3268 constants.JOB_STATUS_CANCELING)):
3269 # job is still present and waiting
3271 # good candidate found (either running job or lost job)
3272 self.jobs.remove(job_data)
3276 return self.jobs.pop(0)
3278 def GetResults(self):
3279 """Wait for and return the results of all jobs.
3282 @return: list of tuples (success, job results), in the same order
3283 as the submitted jobs; if a job has failed, instead of the result
3284 there will be the error message
3288 self.SubmitPending()
3291 ok_jobs = [row[2] for row in self.jobs if row[1]]
3293 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3295 # first, remove any non-submitted jobs
3296 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3297 for idx, _, jid, name in failures:
3298 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3299 results.append((idx, False, jid))
3302 (idx, _, jid, name) = self._ChooseJob()
3303 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3305 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3307 except errors.JobLost, err:
3308 _, job_result = FormatError(err)
3309 ToStderr("Job %s%s has been archived, cannot check its result",
3310 jid, self._IfName(name, " for %s"))
3312 except (errors.GenericError, luxi.ProtocolError), err:
3313 _, job_result = FormatError(err)
3315 # the error message will always be shown, verbose or not
3316 ToStderr("Job %s%s has failed: %s",
3317 jid, self._IfName(name, " for %s"), job_result)
3319 results.append((idx, success, job_result))
3321 # sort based on the index, then drop it
3323 results = [i[1:] for i in results]
3327 def WaitOrShow(self, wait):
3328 """Wait for job results or only print the job IDs.
3331 @param wait: whether to wait or not
3335 return self.GetResults()
3338 self.SubmitPending()
3339 for _, status, result, name in self.jobs:
3341 ToStdout("%s: %s", result, name)
3343 ToStderr("Failure for %s: %s", name, result)
3344 return [row[1:3] for row in self.jobs]
3347 def FormatParameterDict(buf, param_dict, actual, level=1):
3348 """Formats a parameter dictionary.
3350 @type buf: L{StringIO}
3351 @param buf: the buffer into which to write
3352 @type param_dict: dict
3353 @param param_dict: the own parameters
3355 @param actual: the current parameter set (including defaults)
3356 @param level: Level of indent
3359 indent = " " * level
3361 for key in sorted(actual):
3363 buf.write("%s- %s:" % (indent, key))
3365 if isinstance(data, dict) and data:
3367 FormatParameterDict(buf, param_dict.get(key, {}), data,
3370 val = param_dict.get(key, "default (%s)" % data)
3371 buf.write(" %s\n" % val)
3374 def ConfirmOperation(names, list_type, text, extra=""):
3375 """Ask the user to confirm an operation on a list of list_type.
3377 This function is used to request confirmation for doing an operation
3378 on a given list of list_type.
3381 @param names: the list of names that we display when
3382 we ask for confirmation
3383 @type list_type: str
3384 @param list_type: Human readable name for elements in the list (e.g. nodes)
3386 @param text: the operation that the user should confirm
3388 @return: True or False depending on user's confirmation.
3392 msg = ("The %s will operate on %d %s.\n%s"
3393 "Do you want to continue?" % (text, count, list_type, extra))
3394 affected = (("\nAffected %s:\n" % list_type) +
3395 "\n".join([" %s" % name for name in names]))
3397 choices = [("y", True, "Yes, execute the %s" % text),
3398 ("n", False, "No, abort the %s" % text)]
3401 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3404 question = msg + affected
3406 choice = AskUser(question, choices)
3409 choice = AskUser(msg + affected, choices)
3413 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3414 ispecs_cpu_count=None,
3415 ispecs_disk_count=None,
3416 ispecs_disk_size=None,
3417 ispecs_nic_count=None,
3418 ipolicy_disk_templates=None,
3419 ipolicy_vcpu_ratio=None,
3420 group_ipolicy=False,
3421 allowed_values=None,
3423 """Creation of instance policy based on command line options.
3425 @param fill_all: whether for cluster policies we should ensure that
3426 all values are filled
3430 # prepare ipolicy dict
3431 ipolicy_transposed = {
3432 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3433 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3434 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3435 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3436 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3439 # first, check that the values given are correct
3441 forced_type = TISPECS_GROUP_TYPES
3443 forced_type = TISPECS_CLUSTER_TYPES
3445 for specs in ipolicy_transposed.values():
3446 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3449 ipolicy_out = objects.MakeEmptyIPolicy()
3450 for name, specs in ipolicy_transposed.iteritems():
3451 assert name in constants.ISPECS_PARAMETERS
3452 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3453 ipolicy_out[key][name] = val
3455 # no filldict for non-dicts
3456 if not group_ipolicy and fill_all:
3457 if ipolicy_disk_templates is None:
3458 ipolicy_disk_templates = constants.DISK_TEMPLATES
3459 if ipolicy_vcpu_ratio is None:
3460 ipolicy_vcpu_ratio = \
3461 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3462 if ipolicy_disk_templates is not None:
3463 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3464 if ipolicy_vcpu_ratio is not None:
3465 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3467 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)