4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
48 from optparse import (OptionParser, TitledHelpFormatter,
49 Option, OptionValueError)
53 # Command line options
67 "CLUSTER_DOMAIN_SECRET_OPT",
85 "FILESTORE_DRIVER_OPT",
91 "GLOBAL_SHARED_FILEDIR_OPT",
96 "DEFAULT_IALLOCATOR_OPT",
97 "IDENTIFY_DEFAULTS_OPT",
100 "IGNORE_FAILURES_OPT",
101 "IGNORE_OFFLINE_OPT",
102 "IGNORE_REMOVE_FAILURES_OPT",
103 "IGNORE_SECONDARIES_OPT",
107 "MAINTAIN_NODE_HEALTH_OPT",
109 "MASTER_NETMASK_OPT",
111 "MIGRATION_MODE_OPT",
113 "NEW_CLUSTER_CERT_OPT",
114 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
115 "NEW_CONFD_HMAC_KEY_OPT",
118 "NEW_SPICE_CERT_OPT",
120 "NODE_FORCE_JOIN_OPT",
122 "NODE_PLACEMENT_OPT",
126 "NODRBD_STORAGE_OPT",
132 "NOMODIFY_ETCHOSTS_OPT",
133 "NOMODIFY_SSH_SETUP_OPT",
137 "NORUNTIME_CHGS_OPT",
140 "NOSSH_KEYCHECK_OPT",
154 "PREALLOC_WIPE_DISKS_OPT",
155 "PRIMARY_IP_VERSION_OPT",
161 "REMOVE_INSTANCE_OPT",
167 "SECONDARY_ONLY_OPT",
171 "SHUTDOWN_TIMEOUT_OPT",
173 "SPECS_CPU_COUNT_OPT",
174 "SPECS_DISK_COUNT_OPT",
175 "SPECS_DISK_SIZE_OPT",
176 "SPECS_MEM_SIZE_OPT",
177 "SPECS_NIC_COUNT_OPT",
178 "IPOLICY_DISK_TEMPLATES",
179 "IPOLICY_VCPU_RATIO",
185 "STARTUP_PAUSED_OPT",
194 "USE_EXTERNAL_MIP_SCRIPT",
202 "IGNORE_IPOLICY_OPT",
203 "INSTANCE_POLICY_OPTS",
204 # Generic functions for CLI programs
206 "CreateIPolicyFromOpts",
208 "GenericInstanceCreate",
214 "JobSubmittedException",
216 "RunWhileClusterStopped",
220 # Formatting functions
221 "ToStderr", "ToStdout",
224 "FormatParameterDict",
233 # command line options support infrastructure
234 "ARGS_MANY_INSTANCES",
253 "OPT_COMPL_INST_ADD_NODES",
254 "OPT_COMPL_MANY_NODES",
255 "OPT_COMPL_ONE_IALLOCATOR",
256 "OPT_COMPL_ONE_INSTANCE",
257 "OPT_COMPL_ONE_NODE",
258 "OPT_COMPL_ONE_NODEGROUP",
264 "COMMON_CREATE_OPTS",
270 #: Priorities (sorted)
272 ("low", constants.OP_PRIO_LOW),
273 ("normal", constants.OP_PRIO_NORMAL),
274 ("high", constants.OP_PRIO_HIGH),
277 #: Priority dictionary for easier lookup
278 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
279 # we migrate to Python 2.6
280 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
282 # Query result status for clients
285 QR_INCOMPLETE) = range(3)
287 #: Maximum batch size for ChooseJob
291 # constants used to create InstancePolicy dictionary
292 TISPECS_GROUP_TYPES = {
293 constants.ISPECS_MIN: constants.VTYPE_INT,
294 constants.ISPECS_MAX: constants.VTYPE_INT,
297 TISPECS_CLUSTER_TYPES = {
298 constants.ISPECS_MIN: constants.VTYPE_INT,
299 constants.ISPECS_MAX: constants.VTYPE_INT,
300 constants.ISPECS_STD: constants.VTYPE_INT,
305 def __init__(self, min=0, max=None): # pylint: disable=W0622
310 return ("<%s min=%s max=%s>" %
311 (self.__class__.__name__, self.min, self.max))
314 class ArgSuggest(_Argument):
315 """Suggesting argument.
317 Value can be any of the ones passed to the constructor.
320 # pylint: disable=W0622
321 def __init__(self, min=0, max=None, choices=None):
322 _Argument.__init__(self, min=min, max=max)
323 self.choices = choices
326 return ("<%s min=%s max=%s choices=%r>" %
327 (self.__class__.__name__, self.min, self.max, self.choices))
330 class ArgChoice(ArgSuggest):
333 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
334 but value must be one of the choices.
339 class ArgUnknown(_Argument):
340 """Unknown argument to program (e.g. determined at runtime).
345 class ArgInstance(_Argument):
346 """Instances argument.
351 class ArgNode(_Argument):
357 class ArgGroup(_Argument):
358 """Node group argument.
363 class ArgJobId(_Argument):
369 class ArgFile(_Argument):
370 """File path argument.
375 class ArgCommand(_Argument):
381 class ArgHost(_Argument):
387 class ArgOs(_Argument):
394 ARGS_MANY_INSTANCES = [ArgInstance()]
395 ARGS_MANY_NODES = [ArgNode()]
396 ARGS_MANY_GROUPS = [ArgGroup()]
397 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
398 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
400 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
401 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
404 def _ExtractTagsObject(opts, args):
405 """Extract the tag type object.
407 Note that this function will modify its args parameter.
410 if not hasattr(opts, "tag_type"):
411 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
413 if kind == constants.TAG_CLUSTER:
415 elif kind in (constants.TAG_NODEGROUP,
417 constants.TAG_INSTANCE):
419 raise errors.OpPrereqError("no arguments passed to the command",
424 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
428 def _ExtendTags(opts, args):
429 """Extend the args if a source file has been given.
431 This function will extend the tags with the contents of the file
432 passed in the 'tags_source' attribute of the opts parameter. A file
433 named '-' will be replaced by stdin.
436 fname = opts.tags_source
442 new_fh = open(fname, "r")
445 # we don't use the nice 'new_data = [line.strip() for line in fh]'
446 # because of python bug 1633941
448 line = new_fh.readline()
451 new_data.append(line.strip())
454 args.extend(new_data)
457 def ListTags(opts, args):
458 """List the tags on a given object.
460 This is a generic implementation that knows how to deal with all
461 three cases of tag objects (cluster, node, instance). The opts
462 argument is expected to contain a tag_type field denoting what
463 object type we work on.
466 kind, name = _ExtractTagsObject(opts, args)
468 result = cl.QueryTags(kind, name)
469 result = list(result)
475 def AddTags(opts, args):
476 """Add tags on a given object.
478 This is a generic implementation that knows how to deal with all
479 three cases of tag objects (cluster, node, instance). The opts
480 argument is expected to contain a tag_type field denoting what
481 object type we work on.
484 kind, name = _ExtractTagsObject(opts, args)
485 _ExtendTags(opts, args)
487 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
488 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
489 SubmitOrSend(op, opts)
492 def RemoveTags(opts, args):
493 """Remove tags from a given object.
495 This is a generic implementation that knows how to deal with all
496 three cases of tag objects (cluster, node, instance). The opts
497 argument is expected to contain a tag_type field denoting what
498 object type we work on.
501 kind, name = _ExtractTagsObject(opts, args)
502 _ExtendTags(opts, args)
504 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
505 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
506 SubmitOrSend(op, opts)
509 def check_unit(option, opt, value): # pylint: disable=W0613
510 """OptParsers custom converter for units.
514 return utils.ParseUnit(value)
515 except errors.UnitParseError, err:
516 raise OptionValueError("option %s: %s" % (opt, err))
519 def _SplitKeyVal(opt, data):
520 """Convert a KeyVal string into a dict.
522 This function will convert a key=val[,...] string into a dict. Empty
523 values will be converted specially: keys which have the prefix 'no_'
524 will have the value=False and the prefix stripped, the others will
528 @param opt: a string holding the option name for which we process the
529 data, used in building error messages
531 @param data: a string of the format key=val,key=val,...
533 @return: {key=val, key=val}
534 @raises errors.ParameterError: if there are duplicate keys
539 for elem in utils.UnescapeAndSplit(data, sep=","):
541 key, val = elem.split("=", 1)
543 if elem.startswith(NO_PREFIX):
544 key, val = elem[len(NO_PREFIX):], False
545 elif elem.startswith(UN_PREFIX):
546 key, val = elem[len(UN_PREFIX):], None
548 key, val = elem, True
550 raise errors.ParameterError("Duplicate key '%s' in option %s" %
556 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
557 """Custom parser for ident:key=val,key=val options.
559 This will store the parsed values as a tuple (ident, {key: val}). As such,
560 multiple uses of this option via action=append is possible.
564 ident, rest = value, ""
566 ident, rest = value.split(":", 1)
568 if ident.startswith(NO_PREFIX):
570 msg = "Cannot pass options when removing parameter groups: %s" % value
571 raise errors.ParameterError(msg)
572 retval = (ident[len(NO_PREFIX):], False)
573 elif (ident.startswith(UN_PREFIX) and
574 (len(ident) <= len(UN_PREFIX) or
575 not ident[len(UN_PREFIX)][0].isdigit())):
577 msg = "Cannot pass options when removing parameter groups: %s" % value
578 raise errors.ParameterError(msg)
579 retval = (ident[len(UN_PREFIX):], None)
581 kv_dict = _SplitKeyVal(opt, rest)
582 retval = (ident, kv_dict)
586 def check_key_val(option, opt, value): # pylint: disable=W0613
587 """Custom parser class for key=val,key=val options.
589 This will store the parsed values as a dict {key: val}.
592 return _SplitKeyVal(opt, value)
595 def check_bool(option, opt, value): # pylint: disable=W0613
596 """Custom parser for yes/no options.
598 This will store the parsed value as either True or False.
601 value = value.lower()
602 if value == constants.VALUE_FALSE or value == "no":
604 elif value == constants.VALUE_TRUE or value == "yes":
607 raise errors.ParameterError("Invalid boolean value '%s'" % value)
610 def check_list(option, opt, value): # pylint: disable=W0613
611 """Custom parser for comma-separated lists.
614 # we have to make this explicit check since "".split(",") is [""],
615 # not an empty list :(
619 return utils.UnescapeAndSplit(value)
622 def check_maybefloat(option, opt, value): # pylint: disable=W0613
623 """Custom parser for float numbers which might be also defaults.
626 value = value.lower()
628 if value == constants.VALUE_DEFAULT:
634 # completion_suggestion is normally a list. Using numeric values not evaluating
635 # to False for dynamic completion.
636 (OPT_COMPL_MANY_NODES,
638 OPT_COMPL_ONE_INSTANCE,
640 OPT_COMPL_ONE_IALLOCATOR,
641 OPT_COMPL_INST_ADD_NODES,
642 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
644 OPT_COMPL_ALL = frozenset([
645 OPT_COMPL_MANY_NODES,
647 OPT_COMPL_ONE_INSTANCE,
649 OPT_COMPL_ONE_IALLOCATOR,
650 OPT_COMPL_INST_ADD_NODES,
651 OPT_COMPL_ONE_NODEGROUP,
655 class CliOption(Option):
656 """Custom option class for optparse.
659 ATTRS = Option.ATTRS + [
660 "completion_suggest",
662 TYPES = Option.TYPES + (
670 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
671 TYPE_CHECKER["identkeyval"] = check_ident_key_val
672 TYPE_CHECKER["keyval"] = check_key_val
673 TYPE_CHECKER["unit"] = check_unit
674 TYPE_CHECKER["bool"] = check_bool
675 TYPE_CHECKER["list"] = check_list
676 TYPE_CHECKER["maybefloat"] = check_maybefloat
679 # optparse.py sets make_option, so we do it for our own option class, too
680 cli_option = CliOption
685 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
686 help="Increase debugging level")
688 NOHDR_OPT = cli_option("--no-headers", default=False,
689 action="store_true", dest="no_headers",
690 help="Don't display column headers")
692 SEP_OPT = cli_option("--separator", default=None,
693 action="store", dest="separator",
694 help=("Separator between output fields"
695 " (defaults to one space)"))
697 USEUNITS_OPT = cli_option("--units", default=None,
698 dest="units", choices=("h", "m", "g", "t"),
699 help="Specify units for output (one of h/m/g/t)")
701 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
702 type="string", metavar="FIELDS",
703 help="Comma separated list of output fields")
705 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
706 default=False, help="Force the operation")
708 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
709 default=False, help="Do not require confirmation")
711 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
712 action="store_true", default=False,
713 help=("Ignore offline nodes and do as much"
716 TAG_ADD_OPT = cli_option("--tags", dest="tags",
717 default=None, help="Comma-separated list of instance"
720 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
721 default=None, help="File with tag names")
723 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
724 default=False, action="store_true",
725 help=("Submit the job and return the job ID, but"
726 " don't wait for the job to finish"))
728 SYNC_OPT = cli_option("--sync", dest="do_locking",
729 default=False, action="store_true",
730 help=("Grab locks while doing the queries"
731 " in order to ensure more consistent results"))
733 DRY_RUN_OPT = cli_option("--dry-run", default=False,
735 help=("Do not execute the operation, just run the"
736 " check steps and verify it it could be"
739 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
741 help="Increase the verbosity of the operation")
743 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
744 action="store_true", dest="simulate_errors",
745 help="Debugging option that makes the operation"
746 " treat most runtime checks as failed")
748 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
749 default=True, action="store_false",
750 help="Don't wait for sync (DANGEROUS!)")
752 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
753 default=False, action="store_true",
754 help="Wait for disks to sync")
756 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
757 action="store_true", default=False,
758 help="Enable offline instance")
760 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
761 action="store_true", default=False,
762 help="Disable down instance")
764 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
765 help=("Custom disk setup (%s)" %
766 utils.CommaJoin(constants.DISK_TEMPLATES)),
767 default=None, metavar="TEMPL",
768 choices=list(constants.DISK_TEMPLATES))
770 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
771 help="Do not create any network cards for"
774 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
775 help="Relative path under default cluster-wide"
776 " file storage dir to store file-based disks",
777 default=None, metavar="<DIR>")
779 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
780 help="Driver to use for image files",
781 default="loop", metavar="<DRIVER>",
782 choices=list(constants.FILE_DRIVER))
784 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
785 help="Select nodes for the instance automatically"
786 " using the <NAME> iallocator plugin",
787 default=None, type="string",
788 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
790 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
792 help="Set the default instance allocator plugin",
793 default=None, type="string",
794 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
796 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
798 completion_suggest=OPT_COMPL_ONE_OS)
800 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
801 type="keyval", default={},
802 help="OS parameters")
804 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
805 action="store_true", default=False,
806 help="Force an unknown variant")
808 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
809 action="store_true", default=False,
810 help="Do not install the OS (will"
813 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
814 dest="allow_runtime_chgs",
815 default=True, action="store_false",
816 help="Don't allow runtime changes")
818 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
819 type="keyval", default={},
820 help="Backend parameters")
822 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
823 default={}, dest="hvparams",
824 help="Hypervisor parameters")
826 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
827 help="Disk template parameters, in the format"
828 " template:option=value,option=value,...",
829 type="identkeyval", action="append", default=[])
831 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
832 type="keyval", default={},
833 help="Memory size specs: list of key=value,"
834 " where key is one of min, max, std"
835 " (in MB or using a unit)")
837 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
838 type="keyval", default={},
839 help="CPU count specs: list of key=value,"
840 " where key is one of min, max, std")
842 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
843 dest="ispecs_disk_count",
844 type="keyval", default={},
845 help="Disk count specs: list of key=value,"
846 " where key is one of min, max, std")
848 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
849 type="keyval", default={},
850 help="Disk size specs: list of key=value,"
851 " where key is one of min, max, std"
852 " (in MB or using a unit)")
854 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
855 type="keyval", default={},
856 help="NIC count specs: list of key=value,"
857 " where key is one of min, max, std")
859 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
860 dest="ipolicy_disk_templates",
861 type="list", default=None,
862 help="Comma-separated list of"
863 " enabled disk templates")
865 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
866 dest="ipolicy_vcpu_ratio",
867 type="maybefloat", default=None,
868 help="The maximum allowed vcpu-to-cpu ratio")
870 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
871 dest="ipolicy_spindle_ratio",
872 type="maybefloat", default=None,
873 help=("The maximum allowed instances to"
876 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
877 help="Hypervisor and hypervisor options, in the"
878 " format hypervisor:option=value,option=value,...",
879 default=None, type="identkeyval")
881 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
882 help="Hypervisor and hypervisor options, in the"
883 " format hypervisor:option=value,option=value,...",
884 default=[], action="append", type="identkeyval")
886 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
887 action="store_false",
888 help="Don't check that the instance's IP"
891 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
892 default=True, action="store_false",
893 help="Don't check that the instance's name"
896 NET_OPT = cli_option("--net",
897 help="NIC parameters", default=[],
898 dest="nics", action="append", type="identkeyval")
900 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
901 dest="disks", action="append", type="identkeyval")
903 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
904 help="Comma-separated list of disks"
905 " indices to act on (e.g. 0,2) (optional,"
906 " defaults to all disks)")
908 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
909 help="Enforces a single-disk configuration using the"
910 " given disk size, in MiB unless a suffix is used",
911 default=None, type="unit", metavar="<size>")
913 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
914 dest="ignore_consistency",
915 action="store_true", default=False,
916 help="Ignore the consistency of the disks on"
919 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
920 dest="allow_failover",
921 action="store_true", default=False,
922 help="If migration is not possible fallback to"
925 NONLIVE_OPT = cli_option("--non-live", dest="live",
926 default=True, action="store_false",
927 help="Do a non-live migration (this usually means"
928 " freeze the instance, save the state, transfer and"
929 " only then resume running on the secondary node)")
931 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
933 choices=list(constants.HT_MIGRATION_MODES),
934 help="Override default migration mode (choose"
935 " either live or non-live")
937 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
938 help="Target node and optional secondary node",
939 metavar="<pnode>[:<snode>]",
940 completion_suggest=OPT_COMPL_INST_ADD_NODES)
942 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
943 action="append", metavar="<node>",
944 help="Use only this node (can be used multiple"
945 " times, if not given defaults to all nodes)",
946 completion_suggest=OPT_COMPL_ONE_NODE)
948 NODEGROUP_OPT_NAME = "--node-group"
949 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
951 help="Node group (name or uuid)",
952 metavar="<nodegroup>",
953 default=None, type="string",
954 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
956 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
958 completion_suggest=OPT_COMPL_ONE_NODE)
960 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
961 action="store_false",
962 help="Don't start the instance after creation")
964 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
965 action="store_true", default=False,
966 help="Show command instead of executing it")
968 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
969 default=False, action="store_true",
970 help="Instead of performing the migration, try to"
971 " recover from a failed cleanup. This is safe"
972 " to run even if the instance is healthy, but it"
973 " will create extra replication traffic and "
974 " disrupt briefly the replication (like during the"
977 STATIC_OPT = cli_option("-s", "--static", dest="static",
978 action="store_true", default=False,
979 help="Only show configuration data, not runtime data")
981 ALL_OPT = cli_option("--all", dest="show_all",
982 default=False, action="store_true",
983 help="Show info on all instances on the cluster."
984 " This can take a long time to run, use wisely")
986 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
987 action="store_true", default=False,
988 help="Interactive OS reinstall, lists available"
989 " OS templates for selection")
991 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
992 action="store_true", default=False,
993 help="Remove the instance from the cluster"
994 " configuration even if there are failures"
995 " during the removal process")
997 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
998 dest="ignore_remove_failures",
999 action="store_true", default=False,
1000 help="Remove the instance from the"
1001 " cluster configuration even if there"
1002 " are failures during the removal"
1005 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1006 action="store_true", default=False,
1007 help="Remove the instance from the cluster")
1009 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1010 help="Specifies the new node for the instance",
1011 metavar="NODE", default=None,
1012 completion_suggest=OPT_COMPL_ONE_NODE)
1014 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1015 help="Specifies the new secondary node",
1016 metavar="NODE", default=None,
1017 completion_suggest=OPT_COMPL_ONE_NODE)
1019 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1020 default=False, action="store_true",
1021 help="Replace the disk(s) on the primary"
1022 " node (applies only to internally mirrored"
1023 " disk templates, e.g. %s)" %
1024 utils.CommaJoin(constants.DTS_INT_MIRROR))
1026 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1027 default=False, action="store_true",
1028 help="Replace the disk(s) on the secondary"
1029 " node (applies only to internally mirrored"
1030 " disk templates, e.g. %s)" %
1031 utils.CommaJoin(constants.DTS_INT_MIRROR))
1033 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1034 default=False, action="store_true",
1035 help="Lock all nodes and auto-promote as needed"
1038 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1039 default=False, action="store_true",
1040 help="Automatically replace faulty disks"
1041 " (applies only to internally mirrored"
1042 " disk templates, e.g. %s)" %
1043 utils.CommaJoin(constants.DTS_INT_MIRROR))
1045 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1046 default=False, action="store_true",
1047 help="Ignore current recorded size"
1048 " (useful for forcing activation when"
1049 " the recorded size is wrong)")
1051 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1053 completion_suggest=OPT_COMPL_ONE_NODE)
1055 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1058 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1059 help="Specify the secondary ip for the node",
1060 metavar="ADDRESS", default=None)
1062 READD_OPT = cli_option("--readd", dest="readd",
1063 default=False, action="store_true",
1064 help="Readd old node after replacing it")
1066 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1067 default=True, action="store_false",
1068 help="Disable SSH key fingerprint checking")
1070 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1071 default=False, action="store_true",
1072 help="Force the joining of a node")
1074 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1075 type="bool", default=None, metavar=_YORNO,
1076 help="Set the master_candidate flag on the node")
1078 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1079 type="bool", default=None,
1080 help=("Set the offline flag on the node"
1081 " (cluster does not communicate with offline"
1084 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1085 type="bool", default=None,
1086 help=("Set the drained flag on the node"
1087 " (excluded from allocation operations)"))
1089 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1090 type="bool", default=None, metavar=_YORNO,
1091 help="Set the master_capable flag on the node")
1093 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1094 type="bool", default=None, metavar=_YORNO,
1095 help="Set the vm_capable flag on the node")
1097 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1098 type="bool", default=None, metavar=_YORNO,
1099 help="Set the allocatable flag on a volume")
1101 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1102 help="Disable support for lvm based instances"
1104 action="store_false", default=True)
1106 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1107 dest="enabled_hypervisors",
1108 help="Comma-separated list of hypervisors",
1109 type="string", default=None)
1111 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1112 type="keyval", default={},
1113 help="NIC parameters")
1115 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1116 dest="candidate_pool_size", type="int",
1117 help="Set the candidate pool size")
1119 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1120 help=("Enables LVM and specifies the volume group"
1121 " name (cluster-wide) for disk allocation"
1122 " [%s]" % constants.DEFAULT_VG),
1123 metavar="VG", default=None)
1125 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1126 help="Destroy cluster", action="store_true")
1128 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1129 help="Skip node agreement check (dangerous)",
1130 action="store_true", default=False)
1132 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1133 help="Specify the mac prefix for the instance IP"
1134 " addresses, in the format XX:XX:XX",
1138 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1139 help="Specify the node interface (cluster-wide)"
1140 " on which the master IP address will be added"
1141 " (cluster init default: %s)" %
1142 constants.DEFAULT_BRIDGE,
1146 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1147 help="Specify the netmask of the master IP",
1151 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1152 dest="use_external_mip_script",
1153 help="Specify whether to run a user-provided"
1154 " script for the master IP address turnup and"
1155 " turndown operations",
1156 type="bool", metavar=_YORNO, default=None)
1158 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1159 help="Specify the default directory (cluster-"
1160 "wide) for storing the file-based disks [%s]" %
1161 constants.DEFAULT_FILE_STORAGE_DIR,
1163 default=constants.DEFAULT_FILE_STORAGE_DIR)
1165 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1166 dest="shared_file_storage_dir",
1167 help="Specify the default directory (cluster-"
1168 "wide) for storing the shared file-based"
1170 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1171 metavar="SHAREDDIR",
1172 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1174 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1175 help="Don't modify /etc/hosts",
1176 action="store_false", default=True)
1178 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1179 help="Don't initialize SSH keys",
1180 action="store_false", default=True)
1182 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1183 help="Enable parseable error messages",
1184 action="store_true", default=False)
1186 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1187 help="Skip N+1 memory redundancy tests",
1188 action="store_true", default=False)
1190 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1191 help="Type of reboot: soft/hard/full",
1192 default=constants.INSTANCE_REBOOT_HARD,
1194 choices=list(constants.REBOOT_TYPES))
1196 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1197 dest="ignore_secondaries",
1198 default=False, action="store_true",
1199 help="Ignore errors from secondaries")
1201 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1202 action="store_false", default=True,
1203 help="Don't shutdown the instance (unsafe)")
1205 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1206 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1207 help="Maximum time to wait")
1209 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1210 dest="shutdown_timeout", type="int",
1211 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1212 help="Maximum time to wait for instance shutdown")
1214 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1216 help=("Number of seconds between repetions of the"
1219 EARLY_RELEASE_OPT = cli_option("--early-release",
1220 dest="early_release", default=False,
1221 action="store_true",
1222 help="Release the locks on the secondary"
1225 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1226 dest="new_cluster_cert",
1227 default=False, action="store_true",
1228 help="Generate a new cluster certificate")
1230 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1232 help="File containing new RAPI certificate")
1234 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1235 default=None, action="store_true",
1236 help=("Generate a new self-signed RAPI"
1239 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1241 help="File containing new SPICE certificate")
1243 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1245 help="File containing the certificate of the CA"
1246 " which signed the SPICE certificate")
1248 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1249 dest="new_spice_cert", default=None,
1250 action="store_true",
1251 help=("Generate a new self-signed SPICE"
1254 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1255 dest="new_confd_hmac_key",
1256 default=False, action="store_true",
1257 help=("Create a new HMAC key for %s" %
1260 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1261 dest="cluster_domain_secret",
1263 help=("Load new new cluster domain"
1264 " secret from file"))
1266 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1267 dest="new_cluster_domain_secret",
1268 default=False, action="store_true",
1269 help=("Create a new cluster domain"
1272 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1273 dest="use_replication_network",
1274 help="Whether to use the replication network"
1275 " for talking to the nodes",
1276 action="store_true", default=False)
1278 MAINTAIN_NODE_HEALTH_OPT = \
1279 cli_option("--maintain-node-health", dest="maintain_node_health",
1280 metavar=_YORNO, default=None, type="bool",
1281 help="Configure the cluster to automatically maintain node"
1282 " health, by shutting down unknown instances, shutting down"
1283 " unknown DRBD devices, etc.")
1285 IDENTIFY_DEFAULTS_OPT = \
1286 cli_option("--identify-defaults", dest="identify_defaults",
1287 default=False, action="store_true",
1288 help="Identify which saved instance parameters are equal to"
1289 " the current cluster defaults and set them as such, instead"
1290 " of marking them as overridden")
1292 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1293 action="store", dest="uid_pool",
1294 help=("A list of user-ids or user-id"
1295 " ranges separated by commas"))
1297 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1298 action="store", dest="add_uids",
1299 help=("A list of user-ids or user-id"
1300 " ranges separated by commas, to be"
1301 " added to the user-id pool"))
1303 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1304 action="store", dest="remove_uids",
1305 help=("A list of user-ids or user-id"
1306 " ranges separated by commas, to be"
1307 " removed from the user-id pool"))
1309 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1310 action="store", dest="reserved_lvs",
1311 help=("A comma-separated list of reserved"
1312 " logical volumes names, that will be"
1313 " ignored by cluster verify"))
1315 ROMAN_OPT = cli_option("--roman",
1316 dest="roman_integers", default=False,
1317 action="store_true",
1318 help="Use roman numbers for positive integers")
1320 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1321 action="store", default=None,
1322 help="Specifies usermode helper for DRBD")
1324 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1325 action="store_false", default=True,
1326 help="Disable support for DRBD")
1328 PRIMARY_IP_VERSION_OPT = \
1329 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1330 action="store", dest="primary_ip_version",
1331 metavar="%d|%d" % (constants.IP4_VERSION,
1332 constants.IP6_VERSION),
1333 help="Cluster-wide IP version for primary IP")
1335 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1336 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1337 choices=_PRIONAME_TO_VALUE.keys(),
1338 help="Priority for opcode processing")
1340 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1341 type="bool", default=None, metavar=_YORNO,
1342 help="Sets the hidden flag on the OS")
1344 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1345 type="bool", default=None, metavar=_YORNO,
1346 help="Sets the blacklisted flag on the OS")
1348 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1349 type="bool", metavar=_YORNO,
1350 dest="prealloc_wipe_disks",
1351 help=("Wipe disks prior to instance"
1354 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1355 type="keyval", default=None,
1356 help="Node parameters")
1358 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1359 action="store", metavar="POLICY", default=None,
1360 help="Allocation policy for the node group")
1362 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1363 type="bool", metavar=_YORNO,
1364 dest="node_powered",
1365 help="Specify if the SoR for node is powered")
1367 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1368 default=constants.OOB_TIMEOUT,
1369 help="Maximum time to wait for out-of-band helper")
1371 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1372 default=constants.OOB_POWER_DELAY,
1373 help="Time in seconds to wait between power-ons")
1375 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1376 action="store_true", default=False,
1377 help=("Whether command argument should be treated"
1380 NO_REMEMBER_OPT = cli_option("--no-remember",
1382 action="store_true", default=False,
1383 help="Perform but do not record the change"
1384 " in the configuration")
1386 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1387 default=False, action="store_true",
1388 help="Evacuate primary instances only")
1390 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1391 default=False, action="store_true",
1392 help="Evacuate secondary instances only"
1393 " (applies only to internally mirrored"
1394 " disk templates, e.g. %s)" %
1395 utils.CommaJoin(constants.DTS_INT_MIRROR))
1397 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1398 action="store_true", default=False,
1399 help="Pause instance at startup")
1401 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1402 help="Destination node group (name or uuid)",
1403 default=None, action="append",
1404 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1406 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1407 action="append", dest="ignore_errors",
1408 choices=list(constants.CV_ALL_ECODES_STRINGS),
1409 help="Error code to be ignored")
1411 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1413 help=("Specify disk state information in the"
1415 " storage_type/identifier:option=value,...;"
1416 " note this is unused for now"),
1419 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1421 help=("Specify hypervisor state information in the"
1422 " format hypervisor:option=value,...;"
1423 " note this is unused for now"),
1426 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1427 action="store_true", default=False,
1428 help="Ignore instance policy violations")
1430 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1431 help="Sets the instance's runtime memory,"
1432 " ballooning it up or down to the new value",
1433 default=None, type="unit", metavar="<size>")
1435 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1436 action="store_true", default=False,
1437 help="Marks the grow as absolute instead of the"
1438 " (default) relative mode")
1440 #: Options provided by all commands
1441 COMMON_OPTS = [DEBUG_OPT]
1443 # common options for creating instances. add and import then add their own
1445 COMMON_CREATE_OPTS = [
1450 FILESTORE_DRIVER_OPT,
1467 # common instance policy options
1468 INSTANCE_POLICY_OPTS = [
1469 SPECS_CPU_COUNT_OPT,
1470 SPECS_DISK_COUNT_OPT,
1471 SPECS_DISK_SIZE_OPT,
1473 SPECS_NIC_COUNT_OPT,
1474 IPOLICY_DISK_TEMPLATES,
1476 IPOLICY_SPINDLE_RATIO,
1480 def _ParseArgs(argv, commands, aliases, env_override):
1481 """Parser for the command line arguments.
1483 This function parses the arguments and returns the function which
1484 must be executed together with its (modified) arguments.
1486 @param argv: the command line
1487 @param commands: dictionary with special contents, see the design
1488 doc for cmdline handling
1489 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1490 @param env_override: list of env variables allowed for default args
1493 assert not (env_override - set(commands))
1496 binary = "<command>"
1498 binary = argv[0].split("/")[-1]
1500 if len(argv) > 1 and argv[1] == "--version":
1501 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1502 constants.RELEASE_VERSION)
1503 # Quit right away. That way we don't have to care about this special
1504 # argument. optparse.py does it the same.
1507 if len(argv) < 2 or not (argv[1] in commands or
1508 argv[1] in aliases):
1509 # let's do a nice thing
1510 sortedcmds = commands.keys()
1513 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1514 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1517 # compute the max line length for cmd + usage
1518 mlen = max([len(" %s" % cmd) for cmd in commands])
1519 mlen = min(60, mlen) # should not get here...
1521 # and format a nice command list
1522 ToStdout("Commands:")
1523 for cmd in sortedcmds:
1524 cmdstr = " %s" % (cmd,)
1525 help_text = commands[cmd][4]
1526 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1527 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1528 for line in help_lines:
1529 ToStdout("%-*s %s", mlen, "", line)
1533 return None, None, None
1535 # get command, unalias it, and look it up in commands
1539 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1542 if aliases[cmd] not in commands:
1543 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1544 " command '%s'" % (cmd, aliases[cmd]))
1548 if cmd in env_override:
1549 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1550 env_args = os.environ.get(args_env_name)
1552 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1554 func, args_def, parser_opts, usage, description = commands[cmd]
1555 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1556 description=description,
1557 formatter=TitledHelpFormatter(),
1558 usage="%%prog %s %s" % (cmd, usage))
1559 parser.disable_interspersed_args()
1560 options, args = parser.parse_args(args=argv[1:])
1562 if not _CheckArguments(cmd, args_def, args):
1563 return None, None, None
1565 return func, options, args
1568 def _CheckArguments(cmd, args_def, args):
1569 """Verifies the arguments using the argument definition.
1573 1. Abort with error if values specified by user but none expected.
1575 1. For each argument in definition
1577 1. Keep running count of minimum number of values (min_count)
1578 1. Keep running count of maximum number of values (max_count)
1579 1. If it has an unlimited number of values
1581 1. Abort with error if it's not the last argument in the definition
1583 1. If last argument has limited number of values
1585 1. Abort with error if number of values doesn't match or is too large
1587 1. Abort with error if user didn't pass enough values (min_count)
1590 if args and not args_def:
1591 ToStderr("Error: Command %s expects no arguments", cmd)
1598 last_idx = len(args_def) - 1
1600 for idx, arg in enumerate(args_def):
1601 if min_count is None:
1603 elif arg.min is not None:
1604 min_count += arg.min
1606 if max_count is None:
1608 elif arg.max is not None:
1609 max_count += arg.max
1612 check_max = (arg.max is not None)
1614 elif arg.max is None:
1615 raise errors.ProgrammerError("Only the last argument can have max=None")
1618 # Command with exact number of arguments
1619 if (min_count is not None and max_count is not None and
1620 min_count == max_count and len(args) != min_count):
1621 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1624 # Command with limited number of arguments
1625 if max_count is not None and len(args) > max_count:
1626 ToStderr("Error: Command %s expects only %d argument(s)",
1630 # Command with some required arguments
1631 if min_count is not None and len(args) < min_count:
1632 ToStderr("Error: Command %s expects at least %d argument(s)",
1639 def SplitNodeOption(value):
1640 """Splits the value of a --node option.
1643 if value and ":" in value:
1644 return value.split(":", 1)
1646 return (value, None)
1649 def CalculateOSNames(os_name, os_variants):
1650 """Calculates all the names an OS can be called, according to its variants.
1652 @type os_name: string
1653 @param os_name: base name of the os
1654 @type os_variants: list or None
1655 @param os_variants: list of supported variants
1657 @return: list of valid names
1661 return ["%s+%s" % (os_name, v) for v in os_variants]
1666 def ParseFields(selected, default):
1667 """Parses the values of "--field"-like options.
1669 @type selected: string or None
1670 @param selected: User-selected options
1672 @param default: Default fields
1675 if selected is None:
1678 if selected.startswith("+"):
1679 return default + selected[1:].split(",")
1681 return selected.split(",")
1684 UsesRPC = rpc.RunWithRPC
1687 def AskUser(text, choices=None):
1688 """Ask the user a question.
1690 @param text: the question to ask
1692 @param choices: list with elements tuples (input_char, return_value,
1693 description); if not given, it will default to: [('y', True,
1694 'Perform the operation'), ('n', False, 'Do no do the operation')];
1695 note that the '?' char is reserved for help
1697 @return: one of the return values from the choices list; if input is
1698 not possible (i.e. not running with a tty, we return the last
1703 choices = [("y", True, "Perform the operation"),
1704 ("n", False, "Do not perform the operation")]
1705 if not choices or not isinstance(choices, list):
1706 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1707 for entry in choices:
1708 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1709 raise errors.ProgrammerError("Invalid choices element to AskUser")
1711 answer = choices[-1][1]
1713 for line in text.splitlines():
1714 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1715 text = "\n".join(new_text)
1717 f = file("/dev/tty", "a+")
1721 chars = [entry[0] for entry in choices]
1722 chars[-1] = "[%s]" % chars[-1]
1724 maps = dict([(entry[0], entry[1]) for entry in choices])
1728 f.write("/".join(chars))
1730 line = f.readline(2).strip().lower()
1735 for entry in choices:
1736 f.write(" %s - %s\n" % (entry[0], entry[2]))
1744 class JobSubmittedException(Exception):
1745 """Job was submitted, client should exit.
1747 This exception has one argument, the ID of the job that was
1748 submitted. The handler should print this ID.
1750 This is not an error, just a structured way to exit from clients.
1755 def SendJob(ops, cl=None):
1756 """Function to submit an opcode without waiting for the results.
1759 @param ops: list of opcodes
1760 @type cl: luxi.Client
1761 @param cl: the luxi client to use for communicating with the master;
1762 if None, a new client will be created
1768 job_id = cl.SubmitJob(ops)
1773 def GenericPollJob(job_id, cbs, report_cbs):
1774 """Generic job-polling function.
1776 @type job_id: number
1777 @param job_id: Job ID
1778 @type cbs: Instance of L{JobPollCbBase}
1779 @param cbs: Data callbacks
1780 @type report_cbs: Instance of L{JobPollReportCbBase}
1781 @param report_cbs: Reporting callbacks
1784 prev_job_info = None
1785 prev_logmsg_serial = None
1790 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1793 # job not found, go away!
1794 raise errors.JobLost("Job with id %s lost" % job_id)
1796 if result == constants.JOB_NOTCHANGED:
1797 report_cbs.ReportNotChanged(job_id, status)
1802 # Split result, a tuple of (field values, log entries)
1803 (job_info, log_entries) = result
1804 (status, ) = job_info
1807 for log_entry in log_entries:
1808 (serial, timestamp, log_type, message) = log_entry
1809 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1811 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1813 # TODO: Handle canceled and archived jobs
1814 elif status in (constants.JOB_STATUS_SUCCESS,
1815 constants.JOB_STATUS_ERROR,
1816 constants.JOB_STATUS_CANCELING,
1817 constants.JOB_STATUS_CANCELED):
1820 prev_job_info = job_info
1822 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1824 raise errors.JobLost("Job with id %s lost" % job_id)
1826 status, opstatus, result = jobs[0]
1828 if status == constants.JOB_STATUS_SUCCESS:
1831 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1832 raise errors.OpExecError("Job was canceled")
1835 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1836 if status == constants.OP_STATUS_SUCCESS:
1838 elif status == constants.OP_STATUS_ERROR:
1839 errors.MaybeRaise(msg)
1842 raise errors.OpExecError("partial failure (opcode %d): %s" %
1845 raise errors.OpExecError(str(msg))
1847 # default failure mode
1848 raise errors.OpExecError(result)
1851 class JobPollCbBase:
1852 """Base class for L{GenericPollJob} callbacks.
1856 """Initializes this class.
1860 def WaitForJobChangeOnce(self, job_id, fields,
1861 prev_job_info, prev_log_serial):
1862 """Waits for changes on a job.
1865 raise NotImplementedError()
1867 def QueryJobs(self, job_ids, fields):
1868 """Returns the selected fields for the selected job IDs.
1870 @type job_ids: list of numbers
1871 @param job_ids: Job IDs
1872 @type fields: list of strings
1873 @param fields: Fields
1876 raise NotImplementedError()
1879 class JobPollReportCbBase:
1880 """Base class for L{GenericPollJob} reporting callbacks.
1884 """Initializes this class.
1888 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1889 """Handles a log message.
1892 raise NotImplementedError()
1894 def ReportNotChanged(self, job_id, status):
1895 """Called for if a job hasn't changed in a while.
1897 @type job_id: number
1898 @param job_id: Job ID
1899 @type status: string or None
1900 @param status: Job status if available
1903 raise NotImplementedError()
1906 class _LuxiJobPollCb(JobPollCbBase):
1907 def __init__(self, cl):
1908 """Initializes this class.
1911 JobPollCbBase.__init__(self)
1914 def WaitForJobChangeOnce(self, job_id, fields,
1915 prev_job_info, prev_log_serial):
1916 """Waits for changes on a job.
1919 return self.cl.WaitForJobChangeOnce(job_id, fields,
1920 prev_job_info, prev_log_serial)
1922 def QueryJobs(self, job_ids, fields):
1923 """Returns the selected fields for the selected job IDs.
1926 return self.cl.QueryJobs(job_ids, fields)
1929 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1930 def __init__(self, feedback_fn):
1931 """Initializes this class.
1934 JobPollReportCbBase.__init__(self)
1936 self.feedback_fn = feedback_fn
1938 assert callable(feedback_fn)
1940 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1941 """Handles a log message.
1944 self.feedback_fn((timestamp, log_type, log_msg))
1946 def ReportNotChanged(self, job_id, status):
1947 """Called if a job hasn't changed in a while.
1953 class StdioJobPollReportCb(JobPollReportCbBase):
1955 """Initializes this class.
1958 JobPollReportCbBase.__init__(self)
1960 self.notified_queued = False
1961 self.notified_waitlock = False
1963 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1964 """Handles a log message.
1967 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1968 FormatLogMessage(log_type, log_msg))
1970 def ReportNotChanged(self, job_id, status):
1971 """Called if a job hasn't changed in a while.
1977 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1978 ToStderr("Job %s is waiting in queue", job_id)
1979 self.notified_queued = True
1981 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1982 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1983 self.notified_waitlock = True
1986 def FormatLogMessage(log_type, log_msg):
1987 """Formats a job message according to its type.
1990 if log_type != constants.ELOG_MESSAGE:
1991 log_msg = str(log_msg)
1993 return utils.SafeEncode(log_msg)
1996 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1997 """Function to poll for the result of a job.
1999 @type job_id: job identified
2000 @param job_id: the job to poll for results
2001 @type cl: luxi.Client
2002 @param cl: the luxi client to use for communicating with the master;
2003 if None, a new client will be created
2009 if reporter is None:
2011 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2013 reporter = StdioJobPollReportCb()
2015 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2017 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2020 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2021 """Legacy function to submit an opcode.
2023 This is just a simple wrapper over the construction of the processor
2024 instance. It should be extended to better handle feedback and
2025 interaction functions.
2031 SetGenericOpcodeOpts([op], opts)
2033 job_id = SendJob([op], cl=cl)
2035 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2038 return op_results[0]
2041 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2042 """Wrapper around SubmitOpCode or SendJob.
2044 This function will decide, based on the 'opts' parameter, whether to
2045 submit and wait for the result of the opcode (and return it), or
2046 whether to just send the job and print its identifier. It is used in
2047 order to simplify the implementation of the '--submit' option.
2049 It will also process the opcodes if we're sending the via SendJob
2050 (otherwise SubmitOpCode does it).
2053 if opts and opts.submit_only:
2055 SetGenericOpcodeOpts(job, opts)
2056 job_id = SendJob(job, cl=cl)
2057 raise JobSubmittedException(job_id)
2059 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2062 def SetGenericOpcodeOpts(opcode_list, options):
2063 """Processor for generic options.
2065 This function updates the given opcodes based on generic command
2066 line options (like debug, dry-run, etc.).
2068 @param opcode_list: list of opcodes
2069 @param options: command line options or None
2070 @return: None (in-place modification)
2075 for op in opcode_list:
2076 op.debug_level = options.debug
2077 if hasattr(options, "dry_run"):
2078 op.dry_run = options.dry_run
2079 if getattr(options, "priority", None) is not None:
2080 op.priority = _PRIONAME_TO_VALUE[options.priority]
2084 # TODO: Cache object?
2086 client = luxi.Client()
2087 except luxi.NoMasterError:
2088 ss = ssconf.SimpleStore()
2090 # Try to read ssconf file
2093 except errors.ConfigurationError:
2094 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2095 " not part of a cluster",
2098 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2099 if master != myself:
2100 raise errors.OpPrereqError("This is not the master node, please connect"
2101 " to node '%s' and rerun the command" %
2102 master, errors.ECODE_INVAL)
2107 def FormatError(err):
2108 """Return a formatted error message for a given error.
2110 This function takes an exception instance and returns a tuple
2111 consisting of two values: first, the recommended exit code, and
2112 second, a string describing the error message (not
2113 newline-terminated).
2119 if isinstance(err, errors.ConfigurationError):
2120 txt = "Corrupt configuration file: %s" % msg
2122 obuf.write(txt + "\n")
2123 obuf.write("Aborting.")
2125 elif isinstance(err, errors.HooksAbort):
2126 obuf.write("Failure: hooks execution failed:\n")
2127 for node, script, out in err.args[0]:
2129 obuf.write(" node: %s, script: %s, output: %s\n" %
2130 (node, script, out))
2132 obuf.write(" node: %s, script: %s (no output)\n" %
2134 elif isinstance(err, errors.HooksFailure):
2135 obuf.write("Failure: hooks general failure: %s" % msg)
2136 elif isinstance(err, errors.ResolverError):
2137 this_host = netutils.Hostname.GetSysName()
2138 if err.args[0] == this_host:
2139 msg = "Failure: can't resolve my own hostname ('%s')"
2141 msg = "Failure: can't resolve hostname '%s'"
2142 obuf.write(msg % err.args[0])
2143 elif isinstance(err, errors.OpPrereqError):
2144 if len(err.args) == 2:
2145 obuf.write("Failure: prerequisites not met for this"
2146 " operation:\nerror type: %s, error details:\n%s" %
2147 (err.args[1], err.args[0]))
2149 obuf.write("Failure: prerequisites not met for this"
2150 " operation:\n%s" % msg)
2151 elif isinstance(err, errors.OpExecError):
2152 obuf.write("Failure: command execution error:\n%s" % msg)
2153 elif isinstance(err, errors.TagError):
2154 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2155 elif isinstance(err, errors.JobQueueDrainError):
2156 obuf.write("Failure: the job queue is marked for drain and doesn't"
2157 " accept new requests\n")
2158 elif isinstance(err, errors.JobQueueFull):
2159 obuf.write("Failure: the job queue is full and doesn't accept new"
2160 " job submissions until old jobs are archived\n")
2161 elif isinstance(err, errors.TypeEnforcementError):
2162 obuf.write("Parameter Error: %s" % msg)
2163 elif isinstance(err, errors.ParameterError):
2164 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2165 elif isinstance(err, luxi.NoMasterError):
2166 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2167 " and listening for connections?")
2168 elif isinstance(err, luxi.TimeoutError):
2169 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2170 " been submitted and will continue to run even if the call"
2171 " timed out. Useful commands in this situation are \"gnt-job"
2172 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2174 elif isinstance(err, luxi.PermissionError):
2175 obuf.write("It seems you don't have permissions to connect to the"
2176 " master daemon.\nPlease retry as a different user.")
2177 elif isinstance(err, luxi.ProtocolError):
2178 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2180 elif isinstance(err, errors.JobLost):
2181 obuf.write("Error checking job status: %s" % msg)
2182 elif isinstance(err, errors.QueryFilterParseError):
2183 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2184 obuf.write("\n".join(err.GetDetails()))
2185 elif isinstance(err, errors.GenericError):
2186 obuf.write("Unhandled Ganeti error: %s" % msg)
2187 elif isinstance(err, JobSubmittedException):
2188 obuf.write("JobID: %s\n" % err.args[0])
2191 obuf.write("Unhandled exception: %s" % msg)
2192 return retcode, obuf.getvalue().rstrip("\n")
2195 def GenericMain(commands, override=None, aliases=None,
2196 env_override=frozenset()):
2197 """Generic main function for all the gnt-* commands.
2199 @param commands: a dictionary with a special structure, see the design doc
2200 for command line handling.
2201 @param override: if not None, we expect a dictionary with keys that will
2202 override command line options; this can be used to pass
2203 options from the scripts to generic functions
2204 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2205 @param env_override: list of environment names which are allowed to submit
2206 default args for commands
2209 # save the program name and the entire command line for later logging
2211 binary = os.path.basename(sys.argv[0])
2213 binary = sys.argv[0]
2215 if len(sys.argv) >= 2:
2216 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2220 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2222 binary = "<unknown program>"
2223 cmdline = "<unknown>"
2229 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2230 except errors.ParameterError, err:
2231 result, err_msg = FormatError(err)
2235 if func is None: # parse error
2238 if override is not None:
2239 for key, val in override.iteritems():
2240 setattr(options, key, val)
2242 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2243 stderr_logging=True)
2245 logging.info("Command line: %s", cmdline)
2248 result = func(options, args)
2249 except (errors.GenericError, luxi.ProtocolError,
2250 JobSubmittedException), err:
2251 result, err_msg = FormatError(err)
2252 logging.exception("Error during command processing")
2254 except KeyboardInterrupt:
2255 result = constants.EXIT_FAILURE
2256 ToStderr("Aborted. Note that if the operation created any jobs, they"
2257 " might have been submitted and"
2258 " will continue to run in the background.")
2259 except IOError, err:
2260 if err.errno == errno.EPIPE:
2261 # our terminal went away, we'll exit
2262 sys.exit(constants.EXIT_FAILURE)
2269 def ParseNicOption(optvalue):
2270 """Parses the value of the --net option(s).
2274 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2275 except (TypeError, ValueError), err:
2276 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2279 nics = [{}] * nic_max
2280 for nidx, ndict in optvalue:
2283 if not isinstance(ndict, dict):
2284 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2285 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2287 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2294 def GenericInstanceCreate(mode, opts, args):
2295 """Add an instance to the cluster via either creation or import.
2297 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2298 @param opts: the command line options selected by the user
2300 @param args: should contain only one element, the new instance name
2302 @return: the desired exit code
2307 (pnode, snode) = SplitNodeOption(opts.node)
2312 hypervisor, hvparams = opts.hypervisor
2315 nics = ParseNicOption(opts.nics)
2319 elif mode == constants.INSTANCE_CREATE:
2320 # default of one nic, all auto
2326 if opts.disk_template == constants.DT_DISKLESS:
2327 if opts.disks or opts.sd_size is not None:
2328 raise errors.OpPrereqError("Diskless instance but disk"
2329 " information passed", errors.ECODE_INVAL)
2332 if (not opts.disks and not opts.sd_size
2333 and mode == constants.INSTANCE_CREATE):
2334 raise errors.OpPrereqError("No disk information specified",
2336 if opts.disks and opts.sd_size is not None:
2337 raise errors.OpPrereqError("Please use either the '--disk' or"
2338 " '-s' option", errors.ECODE_INVAL)
2339 if opts.sd_size is not None:
2340 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2344 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2345 except ValueError, err:
2346 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2348 disks = [{}] * disk_max
2351 for didx, ddict in opts.disks:
2353 if not isinstance(ddict, dict):
2354 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2355 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2356 elif constants.IDISK_SIZE in ddict:
2357 if constants.IDISK_ADOPT in ddict:
2358 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2359 " (disk %d)" % didx, errors.ECODE_INVAL)
2361 ddict[constants.IDISK_SIZE] = \
2362 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2363 except ValueError, err:
2364 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2365 (didx, err), errors.ECODE_INVAL)
2366 elif constants.IDISK_ADOPT in ddict:
2367 if mode == constants.INSTANCE_IMPORT:
2368 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2369 " import", errors.ECODE_INVAL)
2370 ddict[constants.IDISK_SIZE] = 0
2372 raise errors.OpPrereqError("Missing size or adoption source for"
2373 " disk %d" % didx, errors.ECODE_INVAL)
2376 if opts.tags is not None:
2377 tags = opts.tags.split(",")
2381 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2382 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2384 if mode == constants.INSTANCE_CREATE:
2387 force_variant = opts.force_variant
2390 no_install = opts.no_install
2391 identify_defaults = False
2392 elif mode == constants.INSTANCE_IMPORT:
2395 force_variant = False
2396 src_node = opts.src_node
2397 src_path = opts.src_dir
2399 identify_defaults = opts.identify_defaults
2401 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2403 op = opcodes.OpInstanceCreate(instance_name=instance,
2405 disk_template=opts.disk_template,
2407 pnode=pnode, snode=snode,
2408 ip_check=opts.ip_check,
2409 name_check=opts.name_check,
2410 wait_for_sync=opts.wait_for_sync,
2411 file_storage_dir=opts.file_storage_dir,
2412 file_driver=opts.file_driver,
2413 iallocator=opts.iallocator,
2414 hypervisor=hypervisor,
2416 beparams=opts.beparams,
2417 osparams=opts.osparams,
2421 force_variant=force_variant,
2425 no_install=no_install,
2426 identify_defaults=identify_defaults,
2427 ignore_ipolicy=opts.ignore_ipolicy)
2429 SubmitOrSend(op, opts)
2433 class _RunWhileClusterStoppedHelper:
2434 """Helper class for L{RunWhileClusterStopped} to simplify state management
2437 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2438 """Initializes this class.
2440 @type feedback_fn: callable
2441 @param feedback_fn: Feedback function
2442 @type cluster_name: string
2443 @param cluster_name: Cluster name
2444 @type master_node: string
2445 @param master_node Master node name
2446 @type online_nodes: list
2447 @param online_nodes: List of names of online nodes
2450 self.feedback_fn = feedback_fn
2451 self.cluster_name = cluster_name
2452 self.master_node = master_node
2453 self.online_nodes = online_nodes
2455 self.ssh = ssh.SshRunner(self.cluster_name)
2457 self.nonmaster_nodes = [name for name in online_nodes
2458 if name != master_node]
2460 assert self.master_node not in self.nonmaster_nodes
2462 def _RunCmd(self, node_name, cmd):
2463 """Runs a command on the local or a remote machine.
2465 @type node_name: string
2466 @param node_name: Machine name
2471 if node_name is None or node_name == self.master_node:
2472 # No need to use SSH
2473 result = utils.RunCmd(cmd)
2475 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2478 errmsg = ["Failed to run command %s" % result.cmd]
2480 errmsg.append("on node %s" % node_name)
2481 errmsg.append(": exitcode %s and error %s" %
2482 (result.exit_code, result.output))
2483 raise errors.OpExecError(" ".join(errmsg))
2485 def Call(self, fn, *args):
2486 """Call function while all daemons are stopped.
2489 @param fn: Function to be called
2492 # Pause watcher by acquiring an exclusive lock on watcher state file
2493 self.feedback_fn("Blocking watcher")
2494 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2496 # TODO: Currently, this just blocks. There's no timeout.
2497 # TODO: Should it be a shared lock?
2498 watcher_block.Exclusive(blocking=True)
2500 # Stop master daemons, so that no new jobs can come in and all running
2502 self.feedback_fn("Stopping master daemons")
2503 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2505 # Stop daemons on all nodes
2506 for node_name in self.online_nodes:
2507 self.feedback_fn("Stopping daemons on %s" % node_name)
2508 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2510 # All daemons are shut down now
2512 return fn(self, *args)
2513 except Exception, err:
2514 _, errmsg = FormatError(err)
2515 logging.exception("Caught exception")
2516 self.feedback_fn(errmsg)
2519 # Start cluster again, master node last
2520 for node_name in self.nonmaster_nodes + [self.master_node]:
2521 self.feedback_fn("Starting daemons on %s" % node_name)
2522 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2525 watcher_block.Close()
2528 def RunWhileClusterStopped(feedback_fn, fn, *args):
2529 """Calls a function while all cluster daemons are stopped.
2531 @type feedback_fn: callable
2532 @param feedback_fn: Feedback function
2534 @param fn: Function to be called when daemons are stopped
2537 feedback_fn("Gathering cluster information")
2539 # This ensures we're running on the master daemon
2542 (cluster_name, master_node) = \
2543 cl.QueryConfigValues(["cluster_name", "master_node"])
2545 online_nodes = GetOnlineNodes([], cl=cl)
2547 # Don't keep a reference to the client. The master daemon will go away.
2550 assert master_node in online_nodes
2552 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2553 online_nodes).Call(fn, *args)
2556 def GenerateTable(headers, fields, separator, data,
2557 numfields=None, unitfields=None,
2559 """Prints a table with headers and different fields.
2562 @param headers: dictionary mapping field names to headers for
2565 @param fields: the field names corresponding to each row in
2567 @param separator: the separator to be used; if this is None,
2568 the default 'smart' algorithm is used which computes optimal
2569 field width, otherwise just the separator is used between
2572 @param data: a list of lists, each sublist being one row to be output
2573 @type numfields: list
2574 @param numfields: a list with the fields that hold numeric
2575 values and thus should be right-aligned
2576 @type unitfields: list
2577 @param unitfields: a list with the fields that hold numeric
2578 values that should be formatted with the units field
2579 @type units: string or None
2580 @param units: the units we should use for formatting, or None for
2581 automatic choice (human-readable for non-separator usage, otherwise
2582 megabytes); this is a one-letter string
2591 if numfields is None:
2593 if unitfields is None:
2596 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2597 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2600 for field in fields:
2601 if headers and field not in headers:
2602 # TODO: handle better unknown fields (either revert to old
2603 # style of raising exception, or deal more intelligently with
2605 headers[field] = field
2606 if separator is not None:
2607 format_fields.append("%s")
2608 elif numfields.Matches(field):
2609 format_fields.append("%*s")
2611 format_fields.append("%-*s")
2613 if separator is None:
2614 mlens = [0 for name in fields]
2615 format_str = " ".join(format_fields)
2617 format_str = separator.replace("%", "%%").join(format_fields)
2622 for idx, val in enumerate(row):
2623 if unitfields.Matches(fields[idx]):
2626 except (TypeError, ValueError):
2629 val = row[idx] = utils.FormatUnit(val, units)
2630 val = row[idx] = str(val)
2631 if separator is None:
2632 mlens[idx] = max(mlens[idx], len(val))
2637 for idx, name in enumerate(fields):
2639 if separator is None:
2640 mlens[idx] = max(mlens[idx], len(hdr))
2641 args.append(mlens[idx])
2643 result.append(format_str % tuple(args))
2645 if separator is None:
2646 assert len(mlens) == len(fields)
2648 if fields and not numfields.Matches(fields[-1]):
2654 line = ["-" for _ in fields]
2655 for idx in range(len(fields)):
2656 if separator is None:
2657 args.append(mlens[idx])
2658 args.append(line[idx])
2659 result.append(format_str % tuple(args))
2664 def _FormatBool(value):
2665 """Formats a boolean value as a string.
2673 #: Default formatting for query results; (callback, align right)
2674 _DEFAULT_FORMAT_QUERY = {
2675 constants.QFT_TEXT: (str, False),
2676 constants.QFT_BOOL: (_FormatBool, False),
2677 constants.QFT_NUMBER: (str, True),
2678 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2679 constants.QFT_OTHER: (str, False),
2680 constants.QFT_UNKNOWN: (str, False),
2684 def _GetColumnFormatter(fdef, override, unit):
2685 """Returns formatting function for a field.
2687 @type fdef: L{objects.QueryFieldDefinition}
2688 @type override: dict
2689 @param override: Dictionary for overriding field formatting functions,
2690 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2692 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2693 @rtype: tuple; (callable, bool)
2694 @return: Returns the function to format a value (takes one parameter) and a
2695 boolean for aligning the value on the right-hand side
2698 fmt = override.get(fdef.name, None)
2702 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2704 if fdef.kind == constants.QFT_UNIT:
2705 # Can't keep this information in the static dictionary
2706 return (lambda value: utils.FormatUnit(value, unit), True)
2708 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2712 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2715 class _QueryColumnFormatter:
2716 """Callable class for formatting fields of a query.
2719 def __init__(self, fn, status_fn, verbose):
2720 """Initializes this class.
2723 @param fn: Formatting function
2724 @type status_fn: callable
2725 @param status_fn: Function to report fields' status
2726 @type verbose: boolean
2727 @param verbose: whether to use verbose field descriptions or not
2731 self._status_fn = status_fn
2732 self._verbose = verbose
2734 def __call__(self, data):
2735 """Returns a field's string representation.
2738 (status, value) = data
2741 self._status_fn(status)
2743 if status == constants.RS_NORMAL:
2744 return self._fn(value)
2746 assert value is None, \
2747 "Found value %r for abnormal status %s" % (value, status)
2749 return FormatResultError(status, self._verbose)
2752 def FormatResultError(status, verbose):
2753 """Formats result status other than L{constants.RS_NORMAL}.
2755 @param status: The result status
2756 @type verbose: boolean
2757 @param verbose: Whether to return the verbose text
2758 @return: Text of result status
2761 assert status != constants.RS_NORMAL, \
2762 "FormatResultError called with status equal to constants.RS_NORMAL"
2764 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2766 raise NotImplementedError("Unknown status %s" % status)
2773 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2774 header=False, verbose=False):
2775 """Formats data in L{objects.QueryResponse}.
2777 @type result: L{objects.QueryResponse}
2778 @param result: result of query operation
2780 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2781 see L{utils.text.FormatUnit}
2782 @type format_override: dict
2783 @param format_override: Dictionary for overriding field formatting functions,
2784 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2785 @type separator: string or None
2786 @param separator: String used to separate fields
2788 @param header: Whether to output header row
2789 @type verbose: boolean
2790 @param verbose: whether to use verbose field descriptions or not
2799 if format_override is None:
2800 format_override = {}
2802 stats = dict.fromkeys(constants.RS_ALL, 0)
2804 def _RecordStatus(status):
2809 for fdef in result.fields:
2810 assert fdef.title and fdef.name
2811 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2812 columns.append(TableColumn(fdef.title,
2813 _QueryColumnFormatter(fn, _RecordStatus,
2817 table = FormatTable(result.data, columns, header, separator)
2819 # Collect statistics
2820 assert len(stats) == len(constants.RS_ALL)
2821 assert compat.all(count >= 0 for count in stats.values())
2823 # Determine overall status. If there was no data, unknown fields must be
2824 # detected via the field definitions.
2825 if (stats[constants.RS_UNKNOWN] or
2826 (not result.data and _GetUnknownFields(result.fields))):
2828 elif compat.any(count > 0 for key, count in stats.items()
2829 if key != constants.RS_NORMAL):
2830 status = QR_INCOMPLETE
2834 return (status, table)
2837 def _GetUnknownFields(fdefs):
2838 """Returns list of unknown fields included in C{fdefs}.
2840 @type fdefs: list of L{objects.QueryFieldDefinition}
2843 return [fdef for fdef in fdefs
2844 if fdef.kind == constants.QFT_UNKNOWN]
2847 def _WarnUnknownFields(fdefs):
2848 """Prints a warning to stderr if a query included unknown fields.
2850 @type fdefs: list of L{objects.QueryFieldDefinition}
2853 unknown = _GetUnknownFields(fdefs)
2855 ToStderr("Warning: Queried for unknown fields %s",
2856 utils.CommaJoin(fdef.name for fdef in unknown))
2862 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2863 format_override=None, verbose=False, force_filter=False,
2864 namefield=None, qfilter=None):
2865 """Generic implementation for listing all items of a resource.
2867 @param resource: One of L{constants.QR_VIA_LUXI}
2868 @type fields: list of strings
2869 @param fields: List of fields to query for
2870 @type names: list of strings
2871 @param names: Names of items to query for
2872 @type unit: string or None
2873 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2874 None for automatic choice (human-readable for non-separator usage,
2875 otherwise megabytes); this is a one-letter string
2876 @type separator: string or None
2877 @param separator: String used to separate fields
2879 @param header: Whether to show header row
2880 @type force_filter: bool
2881 @param force_filter: Whether to always treat names as filter
2882 @type format_override: dict
2883 @param format_override: Dictionary for overriding field formatting functions,
2884 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2885 @type verbose: boolean
2886 @param verbose: whether to use verbose field descriptions or not
2887 @type namefield: string
2888 @param namefield: Name of field to use for simple filters (see
2889 L{qlang.MakeFilter} for details)
2890 @type qfilter: list or None
2891 @param qfilter: Query filter (in addition to names)
2897 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield)
2900 qfilter = namefilter
2901 elif namefilter is not None:
2902 qfilter = [qlang.OP_AND, namefilter, qfilter]
2907 response = cl.Query(resource, fields, qfilter)
2909 found_unknown = _WarnUnknownFields(response.fields)
2911 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2913 format_override=format_override,
2919 assert ((found_unknown and status == QR_UNKNOWN) or
2920 (not found_unknown and status != QR_UNKNOWN))
2922 if status == QR_UNKNOWN:
2923 return constants.EXIT_UNKNOWN_FIELD
2925 # TODO: Should the list command fail if not all data could be collected?
2926 return constants.EXIT_SUCCESS
2929 def GenericListFields(resource, fields, separator, header, cl=None):
2930 """Generic implementation for listing fields for a resource.
2932 @param resource: One of L{constants.QR_VIA_LUXI}
2933 @type fields: list of strings
2934 @param fields: List of fields to query for
2935 @type separator: string or None
2936 @param separator: String used to separate fields
2938 @param header: Whether to show header row
2947 response = cl.QueryFields(resource, fields)
2949 found_unknown = _WarnUnknownFields(response.fields)
2952 TableColumn("Name", str, False),
2953 TableColumn("Title", str, False),
2954 TableColumn("Description", str, False),
2957 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2959 for line in FormatTable(rows, columns, header, separator):
2963 return constants.EXIT_UNKNOWN_FIELD
2965 return constants.EXIT_SUCCESS
2969 """Describes a column for L{FormatTable}.
2972 def __init__(self, title, fn, align_right):
2973 """Initializes this class.
2976 @param title: Column title
2978 @param fn: Formatting function
2979 @type align_right: bool
2980 @param align_right: Whether to align values on the right-hand side
2985 self.align_right = align_right
2988 def _GetColFormatString(width, align_right):
2989 """Returns the format string for a field.
2997 return "%%%s%ss" % (sign, width)
3000 def FormatTable(rows, columns, header, separator):
3001 """Formats data as a table.
3003 @type rows: list of lists
3004 @param rows: Row data, one list per row
3005 @type columns: list of L{TableColumn}
3006 @param columns: Column descriptions
3008 @param header: Whether to show header row
3009 @type separator: string or None
3010 @param separator: String used to separate columns
3014 data = [[col.title for col in columns]]
3015 colwidth = [len(col.title) for col in columns]
3018 colwidth = [0 for _ in columns]
3022 assert len(row) == len(columns)
3024 formatted = [col.format(value) for value, col in zip(row, columns)]
3026 if separator is None:
3027 # Update column widths
3028 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3029 # Modifying a list's items while iterating is fine
3030 colwidth[idx] = max(oldwidth, len(value))
3032 data.append(formatted)
3034 if separator is not None:
3035 # Return early if a separator is used
3036 return [separator.join(row) for row in data]
3038 if columns and not columns[-1].align_right:
3039 # Avoid unnecessary spaces at end of line
3042 # Build format string
3043 fmt = " ".join([_GetColFormatString(width, col.align_right)
3044 for col, width in zip(columns, colwidth)])
3046 return [fmt % tuple(row) for row in data]
3049 def FormatTimestamp(ts):
3050 """Formats a given timestamp.
3053 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3056 @return: a string with the formatted timestamp
3059 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3063 return utils.FormatTime(sec, usecs=usecs)
3066 def ParseTimespec(value):
3067 """Parse a time specification.
3069 The following suffixed will be recognized:
3077 Without any suffix, the value will be taken to be in seconds.
3082 raise errors.OpPrereqError("Empty time specification passed",
3091 if value[-1] not in suffix_map:
3094 except (TypeError, ValueError):
3095 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3098 multiplier = suffix_map[value[-1]]
3100 if not value: # no data left after stripping the suffix
3101 raise errors.OpPrereqError("Invalid time specification (only"
3102 " suffix passed)", errors.ECODE_INVAL)
3104 value = int(value) * multiplier
3105 except (TypeError, ValueError):
3106 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3111 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3112 filter_master=False, nodegroup=None):
3113 """Returns the names of online nodes.
3115 This function will also log a warning on stderr with the names of
3118 @param nodes: if not empty, use only this subset of nodes (minus the
3120 @param cl: if not None, luxi client to use
3121 @type nowarn: boolean
3122 @param nowarn: by default, this function will output a note with the
3123 offline nodes that are skipped; if this parameter is True the
3124 note is not displayed
3125 @type secondary_ips: boolean
3126 @param secondary_ips: if True, return the secondary IPs instead of the
3127 names, useful for doing network traffic over the replication interface
3129 @type filter_master: boolean
3130 @param filter_master: if True, do not return the master node in the list
3131 (useful in coordination with secondary_ips where we cannot check our
3132 node name against the list)
3133 @type nodegroup: string
3134 @param nodegroup: If set, only return nodes in this node group
3143 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3145 if nodegroup is not None:
3146 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3147 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3150 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3153 if len(qfilter) > 1:
3154 final_filter = [qlang.OP_AND] + qfilter
3156 assert len(qfilter) == 1
3157 final_filter = qfilter[0]
3161 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3163 def _IsOffline(row):
3164 (_, (_, offline), _) = row
3168 ((_, name), _, _) = row
3172 (_, _, (_, sip)) = row
3175 (offline, online) = compat.partition(result.data, _IsOffline)
3177 if offline and not nowarn:
3178 ToStderr("Note: skipping offline node(s): %s" %
3179 utils.CommaJoin(map(_GetName, offline)))
3186 return map(fn, online)
3189 def _ToStream(stream, txt, *args):
3190 """Write a message to a stream, bypassing the logging system
3192 @type stream: file object
3193 @param stream: the file to which we should write
3195 @param txt: the message
3201 stream.write(txt % args)
3206 except IOError, err:
3207 if err.errno == errno.EPIPE:
3208 # our terminal went away, we'll exit
3209 sys.exit(constants.EXIT_FAILURE)
3214 def ToStdout(txt, *args):
3215 """Write a message to stdout only, bypassing the logging system
3217 This is just a wrapper over _ToStream.
3220 @param txt: the message
3223 _ToStream(sys.stdout, txt, *args)
3226 def ToStderr(txt, *args):
3227 """Write a message to stderr only, bypassing the logging system
3229 This is just a wrapper over _ToStream.
3232 @param txt: the message
3235 _ToStream(sys.stderr, txt, *args)
3238 class JobExecutor(object):
3239 """Class which manages the submission and execution of multiple jobs.
3241 Note that instances of this class should not be reused between
3245 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3250 self.verbose = verbose
3253 self.feedback_fn = feedback_fn
3254 self._counter = itertools.count()
3257 def _IfName(name, fmt):
3258 """Helper function for formatting name.
3266 def QueueJob(self, name, *ops):
3267 """Record a job for later submit.
3270 @param name: a description of the job, will be used in WaitJobSet
3273 SetGenericOpcodeOpts(ops, self.opts)
3274 self.queue.append((self._counter.next(), name, ops))
3276 def AddJobId(self, name, status, job_id):
3277 """Adds a job ID to the internal queue.
3280 self.jobs.append((self._counter.next(), status, job_id, name))
3282 def SubmitPending(self, each=False):
3283 """Submit all pending jobs.
3288 for (_, _, ops) in self.queue:
3289 # SubmitJob will remove the success status, but raise an exception if
3290 # the submission fails, so we'll notice that anyway.
3291 results.append([True, self.cl.SubmitJob(ops)[0]])
3293 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3294 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3295 self.jobs.append((idx, status, data, name))
3297 def _ChooseJob(self):
3298 """Choose a non-waiting/queued job to poll next.
3301 assert self.jobs, "_ChooseJob called with empty job list"
3303 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3307 for job_data, status in zip(self.jobs, result):
3308 if (isinstance(status, list) and status and
3309 status[0] in (constants.JOB_STATUS_QUEUED,
3310 constants.JOB_STATUS_WAITING,
3311 constants.JOB_STATUS_CANCELING)):
3312 # job is still present and waiting
3314 # good candidate found (either running job or lost job)
3315 self.jobs.remove(job_data)
3319 return self.jobs.pop(0)
3321 def GetResults(self):
3322 """Wait for and return the results of all jobs.
3325 @return: list of tuples (success, job results), in the same order
3326 as the submitted jobs; if a job has failed, instead of the result
3327 there will be the error message
3331 self.SubmitPending()
3334 ok_jobs = [row[2] for row in self.jobs if row[1]]
3336 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3338 # first, remove any non-submitted jobs
3339 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3340 for idx, _, jid, name in failures:
3341 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3342 results.append((idx, False, jid))
3345 (idx, _, jid, name) = self._ChooseJob()
3346 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3348 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3350 except errors.JobLost, err:
3351 _, job_result = FormatError(err)
3352 ToStderr("Job %s%s has been archived, cannot check its result",
3353 jid, self._IfName(name, " for %s"))
3355 except (errors.GenericError, luxi.ProtocolError), err:
3356 _, job_result = FormatError(err)
3358 # the error message will always be shown, verbose or not
3359 ToStderr("Job %s%s has failed: %s",
3360 jid, self._IfName(name, " for %s"), job_result)
3362 results.append((idx, success, job_result))
3364 # sort based on the index, then drop it
3366 results = [i[1:] for i in results]
3370 def WaitOrShow(self, wait):
3371 """Wait for job results or only print the job IDs.
3374 @param wait: whether to wait or not
3378 return self.GetResults()
3381 self.SubmitPending()
3382 for _, status, result, name in self.jobs:
3384 ToStdout("%s: %s", result, name)
3386 ToStderr("Failure for %s: %s", name, result)
3387 return [row[1:3] for row in self.jobs]
3390 def FormatParameterDict(buf, param_dict, actual, level=1):
3391 """Formats a parameter dictionary.
3393 @type buf: L{StringIO}
3394 @param buf: the buffer into which to write
3395 @type param_dict: dict
3396 @param param_dict: the own parameters
3398 @param actual: the current parameter set (including defaults)
3399 @param level: Level of indent
3402 indent = " " * level
3404 for key in sorted(actual):
3406 buf.write("%s- %s:" % (indent, key))
3408 if isinstance(data, dict) and data:
3410 FormatParameterDict(buf, param_dict.get(key, {}), data,
3413 val = param_dict.get(key, "default (%s)" % data)
3414 buf.write(" %s\n" % val)
3417 def ConfirmOperation(names, list_type, text, extra=""):
3418 """Ask the user to confirm an operation on a list of list_type.
3420 This function is used to request confirmation for doing an operation
3421 on a given list of list_type.
3424 @param names: the list of names that we display when
3425 we ask for confirmation
3426 @type list_type: str
3427 @param list_type: Human readable name for elements in the list (e.g. nodes)
3429 @param text: the operation that the user should confirm
3431 @return: True or False depending on user's confirmation.
3435 msg = ("The %s will operate on %d %s.\n%s"
3436 "Do you want to continue?" % (text, count, list_type, extra))
3437 affected = (("\nAffected %s:\n" % list_type) +
3438 "\n".join([" %s" % name for name in names]))
3440 choices = [("y", True, "Yes, execute the %s" % text),
3441 ("n", False, "No, abort the %s" % text)]
3444 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3447 question = msg + affected
3449 choice = AskUser(question, choices)
3452 choice = AskUser(msg + affected, choices)
3456 def _MaybeParseUnit(elements):
3457 """Parses and returns an array of potential values with units.
3461 for k, v in elements.items():
3462 if v == constants.VALUE_DEFAULT:
3465 parsed[k] = utils.ParseUnit(v)
3469 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3470 ispecs_cpu_count=None,
3471 ispecs_disk_count=None,
3472 ispecs_disk_size=None,
3473 ispecs_nic_count=None,
3474 ipolicy_disk_templates=None,
3475 ipolicy_vcpu_ratio=None,
3476 ipolicy_spindle_ratio=None,
3477 group_ipolicy=False,
3478 allowed_values=None,
3480 """Creation of instance policy based on command line options.
3482 @param fill_all: whether for cluster policies we should ensure that
3483 all values are filled
3489 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3490 if ispecs_disk_size:
3491 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3492 except (TypeError, ValueError, errors.UnitParseError), err:
3493 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3495 (ispecs_disk_size, ispecs_mem_size, err),
3498 # prepare ipolicy dict
3499 ipolicy_transposed = {
3500 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3501 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3502 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3503 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3504 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3507 # first, check that the values given are correct
3509 forced_type = TISPECS_GROUP_TYPES
3511 forced_type = TISPECS_CLUSTER_TYPES
3513 for specs in ipolicy_transposed.values():
3514 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3517 ipolicy_out = objects.MakeEmptyIPolicy()
3518 for name, specs in ipolicy_transposed.iteritems():
3519 assert name in constants.ISPECS_PARAMETERS
3520 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3521 ipolicy_out[key][name] = val
3523 # no filldict for non-dicts
3524 if not group_ipolicy and fill_all:
3525 if ipolicy_disk_templates is None:
3526 ipolicy_disk_templates = constants.DISK_TEMPLATES
3527 if ipolicy_vcpu_ratio is None:
3528 ipolicy_vcpu_ratio = \
3529 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3530 if ipolicy_spindle_ratio is None:
3531 ipolicy_spindle_ratio = \
3532 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3533 if ipolicy_disk_templates is not None:
3534 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3535 if ipolicy_vcpu_ratio is not None:
3536 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3537 if ipolicy_spindle_ratio is not None:
3538 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3540 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)