4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
54 # Command line options
57 "ADD_RESERVED_IPS_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
87 "FILESTORE_DRIVER_OPT",
95 "GLOBAL_SHARED_FILEDIR_OPT",
100 "DEFAULT_IALLOCATOR_OPT",
101 "IDENTIFY_DEFAULTS_OPT",
102 "IGNORE_CONSIST_OPT",
104 "IGNORE_FAILURES_OPT",
105 "IGNORE_OFFLINE_OPT",
106 "IGNORE_REMOVE_FAILURES_OPT",
107 "IGNORE_SECONDARIES_OPT",
111 "MAINTAIN_NODE_HEALTH_OPT",
113 "MASTER_NETMASK_OPT",
115 "MIGRATION_MODE_OPT",
120 "NEW_CLUSTER_CERT_OPT",
121 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
122 "NEW_CONFD_HMAC_KEY_OPT",
125 "NEW_SPICE_CERT_OPT",
127 "NOCONFLICTSCHECK_OPT",
128 "NODE_FORCE_JOIN_OPT",
130 "NODE_PLACEMENT_OPT",
134 "NODRBD_STORAGE_OPT",
140 "NOMODIFY_ETCHOSTS_OPT",
141 "NOMODIFY_SSH_SETUP_OPT",
145 "NORUNTIME_CHGS_OPT",
148 "NOSSH_KEYCHECK_OPT",
162 "PREALLOC_WIPE_DISKS_OPT",
163 "PRIMARY_IP_VERSION_OPT",
169 "REMOVE_INSTANCE_OPT",
170 "REMOVE_RESERVED_IPS_OPT",
176 "SECONDARY_ONLY_OPT",
181 "SHUTDOWN_TIMEOUT_OPT",
183 "SPECS_CPU_COUNT_OPT",
184 "SPECS_DISK_COUNT_OPT",
185 "SPECS_DISK_SIZE_OPT",
186 "SPECS_MEM_SIZE_OPT",
187 "SPECS_NIC_COUNT_OPT",
188 "IPOLICY_DISK_TEMPLATES",
189 "IPOLICY_VCPU_RATIO",
195 "STARTUP_PAUSED_OPT",
204 "USE_EXTERNAL_MIP_SCRIPT",
212 "IGNORE_IPOLICY_OPT",
213 "INSTANCE_POLICY_OPTS",
214 # Generic functions for CLI programs
216 "CreateIPolicyFromOpts",
218 "GenericInstanceCreate",
224 "JobSubmittedException",
226 "RunWhileClusterStopped",
230 # Formatting functions
231 "ToStderr", "ToStdout",
234 "FormatParameterDict",
243 # command line options support infrastructure
244 "ARGS_MANY_INSTANCES",
247 "ARGS_MANY_NETWORKS",
266 "OPT_COMPL_INST_ADD_NODES",
267 "OPT_COMPL_MANY_NODES",
268 "OPT_COMPL_ONE_IALLOCATOR",
269 "OPT_COMPL_ONE_INSTANCE",
270 "OPT_COMPL_ONE_NODE",
271 "OPT_COMPL_ONE_NODEGROUP",
272 "OPT_COMPL_ONE_NETWORK",
278 "COMMON_CREATE_OPTS",
284 #: Priorities (sorted)
286 ("low", constants.OP_PRIO_LOW),
287 ("normal", constants.OP_PRIO_NORMAL),
288 ("high", constants.OP_PRIO_HIGH),
291 #: Priority dictionary for easier lookup
292 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
293 # we migrate to Python 2.6
294 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
296 # Query result status for clients
299 QR_INCOMPLETE) = range(3)
301 #: Maximum batch size for ChooseJob
305 # constants used to create InstancePolicy dictionary
306 TISPECS_GROUP_TYPES = {
307 constants.ISPECS_MIN: constants.VTYPE_INT,
308 constants.ISPECS_MAX: constants.VTYPE_INT,
311 TISPECS_CLUSTER_TYPES = {
312 constants.ISPECS_MIN: constants.VTYPE_INT,
313 constants.ISPECS_MAX: constants.VTYPE_INT,
314 constants.ISPECS_STD: constants.VTYPE_INT,
319 def __init__(self, min=0, max=None): # pylint: disable=W0622
324 return ("<%s min=%s max=%s>" %
325 (self.__class__.__name__, self.min, self.max))
328 class ArgSuggest(_Argument):
329 """Suggesting argument.
331 Value can be any of the ones passed to the constructor.
334 # pylint: disable=W0622
335 def __init__(self, min=0, max=None, choices=None):
336 _Argument.__init__(self, min=min, max=max)
337 self.choices = choices
340 return ("<%s min=%s max=%s choices=%r>" %
341 (self.__class__.__name__, self.min, self.max, self.choices))
344 class ArgChoice(ArgSuggest):
347 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
348 but value must be one of the choices.
353 class ArgUnknown(_Argument):
354 """Unknown argument to program (e.g. determined at runtime).
359 class ArgInstance(_Argument):
360 """Instances argument.
365 class ArgNode(_Argument):
371 class ArgNetwork(_Argument):
377 class ArgGroup(_Argument):
378 """Node group argument.
383 class ArgJobId(_Argument):
389 class ArgFile(_Argument):
390 """File path argument.
395 class ArgCommand(_Argument):
401 class ArgHost(_Argument):
407 class ArgOs(_Argument):
414 ARGS_MANY_INSTANCES = [ArgInstance()]
415 ARGS_MANY_NETWORKS = [ArgNetwork()]
416 ARGS_MANY_NODES = [ArgNode()]
417 ARGS_MANY_GROUPS = [ArgGroup()]
418 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
419 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
420 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
422 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
423 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
426 def _ExtractTagsObject(opts, args):
427 """Extract the tag type object.
429 Note that this function will modify its args parameter.
432 if not hasattr(opts, "tag_type"):
433 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
435 if kind == constants.TAG_CLUSTER:
437 elif kind in (constants.TAG_NODEGROUP,
439 constants.TAG_NETWORK,
440 constants.TAG_INSTANCE):
442 raise errors.OpPrereqError("no arguments passed to the command",
447 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
451 def _ExtendTags(opts, args):
452 """Extend the args if a source file has been given.
454 This function will extend the tags with the contents of the file
455 passed in the 'tags_source' attribute of the opts parameter. A file
456 named '-' will be replaced by stdin.
459 fname = opts.tags_source
465 new_fh = open(fname, "r")
468 # we don't use the nice 'new_data = [line.strip() for line in fh]'
469 # because of python bug 1633941
471 line = new_fh.readline()
474 new_data.append(line.strip())
477 args.extend(new_data)
480 def ListTags(opts, args):
481 """List the tags on a given object.
483 This is a generic implementation that knows how to deal with all
484 three cases of tag objects (cluster, node, instance). The opts
485 argument is expected to contain a tag_type field denoting what
486 object type we work on.
489 kind, name = _ExtractTagsObject(opts, args)
490 cl = GetClient(query=True)
491 result = cl.QueryTags(kind, name)
492 result = list(result)
498 def AddTags(opts, args):
499 """Add tags on a given object.
501 This is a generic implementation that knows how to deal with all
502 three cases of tag objects (cluster, node, instance). The opts
503 argument is expected to contain a tag_type field denoting what
504 object type we work on.
507 kind, name = _ExtractTagsObject(opts, args)
508 _ExtendTags(opts, args)
510 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
511 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
512 SubmitOrSend(op, opts)
515 def RemoveTags(opts, args):
516 """Remove tags from a given object.
518 This is a generic implementation that knows how to deal with all
519 three cases of tag objects (cluster, node, instance). The opts
520 argument is expected to contain a tag_type field denoting what
521 object type we work on.
524 kind, name = _ExtractTagsObject(opts, args)
525 _ExtendTags(opts, args)
527 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
528 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
529 SubmitOrSend(op, opts)
532 def check_unit(option, opt, value): # pylint: disable=W0613
533 """OptParsers custom converter for units.
537 return utils.ParseUnit(value)
538 except errors.UnitParseError, err:
539 raise OptionValueError("option %s: %s" % (opt, err))
542 def _SplitKeyVal(opt, data):
543 """Convert a KeyVal string into a dict.
545 This function will convert a key=val[,...] string into a dict. Empty
546 values will be converted specially: keys which have the prefix 'no_'
547 will have the value=False and the prefix stripped, the others will
551 @param opt: a string holding the option name for which we process the
552 data, used in building error messages
554 @param data: a string of the format key=val,key=val,...
556 @return: {key=val, key=val}
557 @raises errors.ParameterError: if there are duplicate keys
562 for elem in utils.UnescapeAndSplit(data, sep=","):
564 key, val = elem.split("=", 1)
566 if elem.startswith(NO_PREFIX):
567 key, val = elem[len(NO_PREFIX):], False
568 elif elem.startswith(UN_PREFIX):
569 key, val = elem[len(UN_PREFIX):], None
571 key, val = elem, True
573 raise errors.ParameterError("Duplicate key '%s' in option %s" %
579 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
580 """Custom parser for ident:key=val,key=val options.
582 This will store the parsed values as a tuple (ident, {key: val}). As such,
583 multiple uses of this option via action=append is possible.
587 ident, rest = value, ""
589 ident, rest = value.split(":", 1)
591 if ident.startswith(NO_PREFIX):
593 msg = "Cannot pass options when removing parameter groups: %s" % value
594 raise errors.ParameterError(msg)
595 retval = (ident[len(NO_PREFIX):], False)
596 elif (ident.startswith(UN_PREFIX) and
597 (len(ident) <= len(UN_PREFIX) or
598 not ident[len(UN_PREFIX)][0].isdigit())):
600 msg = "Cannot pass options when removing parameter groups: %s" % value
601 raise errors.ParameterError(msg)
602 retval = (ident[len(UN_PREFIX):], None)
604 kv_dict = _SplitKeyVal(opt, rest)
605 retval = (ident, kv_dict)
609 def check_key_val(option, opt, value): # pylint: disable=W0613
610 """Custom parser class for key=val,key=val options.
612 This will store the parsed values as a dict {key: val}.
615 return _SplitKeyVal(opt, value)
618 def check_bool(option, opt, value): # pylint: disable=W0613
619 """Custom parser for yes/no options.
621 This will store the parsed value as either True or False.
624 value = value.lower()
625 if value == constants.VALUE_FALSE or value == "no":
627 elif value == constants.VALUE_TRUE or value == "yes":
630 raise errors.ParameterError("Invalid boolean value '%s'" % value)
633 def check_list(option, opt, value): # pylint: disable=W0613
634 """Custom parser for comma-separated lists.
637 # we have to make this explicit check since "".split(",") is [""],
638 # not an empty list :(
642 return utils.UnescapeAndSplit(value)
645 def check_maybefloat(option, opt, value): # pylint: disable=W0613
646 """Custom parser for float numbers which might be also defaults.
649 value = value.lower()
651 if value == constants.VALUE_DEFAULT:
657 # completion_suggestion is normally a list. Using numeric values not evaluating
658 # to False for dynamic completion.
659 (OPT_COMPL_MANY_NODES,
661 OPT_COMPL_ONE_INSTANCE,
663 OPT_COMPL_ONE_IALLOCATOR,
664 OPT_COMPL_ONE_NETWORK,
665 OPT_COMPL_INST_ADD_NODES,
666 OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
668 OPT_COMPL_ALL = frozenset([
669 OPT_COMPL_MANY_NODES,
671 OPT_COMPL_ONE_INSTANCE,
673 OPT_COMPL_ONE_IALLOCATOR,
674 OPT_COMPL_ONE_NETWORK,
675 OPT_COMPL_INST_ADD_NODES,
676 OPT_COMPL_ONE_NODEGROUP,
680 class CliOption(Option):
681 """Custom option class for optparse.
684 ATTRS = Option.ATTRS + [
685 "completion_suggest",
687 TYPES = Option.TYPES + (
695 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
696 TYPE_CHECKER["identkeyval"] = check_ident_key_val
697 TYPE_CHECKER["keyval"] = check_key_val
698 TYPE_CHECKER["unit"] = check_unit
699 TYPE_CHECKER["bool"] = check_bool
700 TYPE_CHECKER["list"] = check_list
701 TYPE_CHECKER["maybefloat"] = check_maybefloat
704 # optparse.py sets make_option, so we do it for our own option class, too
705 cli_option = CliOption
710 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
711 help="Increase debugging level")
713 NOHDR_OPT = cli_option("--no-headers", default=False,
714 action="store_true", dest="no_headers",
715 help="Don't display column headers")
717 SEP_OPT = cli_option("--separator", default=None,
718 action="store", dest="separator",
719 help=("Separator between output fields"
720 " (defaults to one space)"))
722 USEUNITS_OPT = cli_option("--units", default=None,
723 dest="units", choices=("h", "m", "g", "t"),
724 help="Specify units for output (one of h/m/g/t)")
726 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
727 type="string", metavar="FIELDS",
728 help="Comma separated list of output fields")
730 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
731 default=False, help="Force the operation")
733 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
734 default=False, help="Do not require confirmation")
736 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
737 action="store_true", default=False,
738 help=("Ignore offline nodes and do as much"
741 TAG_ADD_OPT = cli_option("--tags", dest="tags",
742 default=None, help="Comma-separated list of instance"
745 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
746 default=None, help="File with tag names")
748 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
749 default=False, action="store_true",
750 help=("Submit the job and return the job ID, but"
751 " don't wait for the job to finish"))
753 SYNC_OPT = cli_option("--sync", dest="do_locking",
754 default=False, action="store_true",
755 help=("Grab locks while doing the queries"
756 " in order to ensure more consistent results"))
758 DRY_RUN_OPT = cli_option("--dry-run", default=False,
760 help=("Do not execute the operation, just run the"
761 " check steps and verify if it could be"
764 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
766 help="Increase the verbosity of the operation")
768 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
769 action="store_true", dest="simulate_errors",
770 help="Debugging option that makes the operation"
771 " treat most runtime checks as failed")
773 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
774 default=True, action="store_false",
775 help="Don't wait for sync (DANGEROUS!)")
777 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
778 default=False, action="store_true",
779 help="Wait for disks to sync")
781 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
782 action="store_true", default=False,
783 help="Enable offline instance")
785 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
786 action="store_true", default=False,
787 help="Disable down instance")
789 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
790 help=("Custom disk setup (%s)" %
791 utils.CommaJoin(constants.DISK_TEMPLATES)),
792 default=None, metavar="TEMPL",
793 choices=list(constants.DISK_TEMPLATES))
795 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
796 help="Do not create any network cards for"
799 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
800 help="Relative path under default cluster-wide"
801 " file storage dir to store file-based disks",
802 default=None, metavar="<DIR>")
804 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
805 help="Driver to use for image files",
806 default="loop", metavar="<DRIVER>",
807 choices=list(constants.FILE_DRIVER))
809 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
810 help="Select nodes for the instance automatically"
811 " using the <NAME> iallocator plugin",
812 default=None, type="string",
813 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
815 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
817 help="Set the default instance"
819 default=None, type="string",
820 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
822 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
824 completion_suggest=OPT_COMPL_ONE_OS)
826 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
827 type="keyval", default={},
828 help="OS parameters")
830 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
831 action="store_true", default=False,
832 help="Force an unknown variant")
834 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
835 action="store_true", default=False,
836 help="Do not install the OS (will"
839 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
840 dest="allow_runtime_chgs",
841 default=True, action="store_false",
842 help="Don't allow runtime changes")
844 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
845 type="keyval", default={},
846 help="Backend parameters")
848 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
849 default={}, dest="hvparams",
850 help="Hypervisor parameters")
852 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
853 help="Disk template parameters, in the format"
854 " template:option=value,option=value,...",
855 type="identkeyval", action="append", default=[])
857 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
858 type="keyval", default={},
859 help="Memory size specs: list of key=value,"
860 " where key is one of min, max, std"
861 " (in MB or using a unit)")
863 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
864 type="keyval", default={},
865 help="CPU count specs: list of key=value,"
866 " where key is one of min, max, std")
868 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
869 dest="ispecs_disk_count",
870 type="keyval", default={},
871 help="Disk count specs: list of key=value,"
872 " where key is one of min, max, std")
874 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
875 type="keyval", default={},
876 help="Disk size specs: list of key=value,"
877 " where key is one of min, max, std"
878 " (in MB or using a unit)")
880 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
881 type="keyval", default={},
882 help="NIC count specs: list of key=value,"
883 " where key is one of min, max, std")
885 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
886 dest="ipolicy_disk_templates",
887 type="list", default=None,
888 help="Comma-separated list of"
889 " enabled disk templates")
891 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
892 dest="ipolicy_vcpu_ratio",
893 type="maybefloat", default=None,
894 help="The maximum allowed vcpu-to-cpu ratio")
896 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
897 dest="ipolicy_spindle_ratio",
898 type="maybefloat", default=None,
899 help=("The maximum allowed instances to"
902 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
903 help="Hypervisor and hypervisor options, in the"
904 " format hypervisor:option=value,option=value,...",
905 default=None, type="identkeyval")
907 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
908 help="Hypervisor and hypervisor options, in the"
909 " format hypervisor:option=value,option=value,...",
910 default=[], action="append", type="identkeyval")
912 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
913 action="store_false",
914 help="Don't check that the instance's IP"
917 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
918 default=True, action="store_false",
919 help="Don't check that the instance's name"
922 NET_OPT = cli_option("--net",
923 help="NIC parameters", default=[],
924 dest="nics", action="append", type="identkeyval")
926 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
927 dest="disks", action="append", type="identkeyval")
929 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
930 help="Comma-separated list of disks"
931 " indices to act on (e.g. 0,2) (optional,"
932 " defaults to all disks)")
934 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
935 help="Enforces a single-disk configuration using the"
936 " given disk size, in MiB unless a suffix is used",
937 default=None, type="unit", metavar="<size>")
939 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
940 dest="ignore_consistency",
941 action="store_true", default=False,
942 help="Ignore the consistency of the disks on"
945 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
946 dest="allow_failover",
947 action="store_true", default=False,
948 help="If migration is not possible fallback to"
951 NONLIVE_OPT = cli_option("--non-live", dest="live",
952 default=True, action="store_false",
953 help="Do a non-live migration (this usually means"
954 " freeze the instance, save the state, transfer and"
955 " only then resume running on the secondary node)")
957 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
959 choices=list(constants.HT_MIGRATION_MODES),
960 help="Override default migration mode (choose"
961 " either live or non-live")
963 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
964 help="Target node and optional secondary node",
965 metavar="<pnode>[:<snode>]",
966 completion_suggest=OPT_COMPL_INST_ADD_NODES)
968 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
969 action="append", metavar="<node>",
970 help="Use only this node (can be used multiple"
971 " times, if not given defaults to all nodes)",
972 completion_suggest=OPT_COMPL_ONE_NODE)
974 NODEGROUP_OPT_NAME = "--node-group"
975 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
977 help="Node group (name or uuid)",
978 metavar="<nodegroup>",
979 default=None, type="string",
980 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
982 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
984 completion_suggest=OPT_COMPL_ONE_NODE)
986 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
987 action="store_false",
988 help="Don't start the instance after creation")
990 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
991 action="store_true", default=False,
992 help="Show command instead of executing it")
994 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
995 default=False, action="store_true",
996 help="Instead of performing the migration, try to"
997 " recover from a failed cleanup. This is safe"
998 " to run even if the instance is healthy, but it"
999 " will create extra replication traffic and "
1000 " disrupt briefly the replication (like during the"
1003 STATIC_OPT = cli_option("-s", "--static", dest="static",
1004 action="store_true", default=False,
1005 help="Only show configuration data, not runtime data")
1007 ALL_OPT = cli_option("--all", dest="show_all",
1008 default=False, action="store_true",
1009 help="Show info on all instances on the cluster."
1010 " This can take a long time to run, use wisely")
1012 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1013 action="store_true", default=False,
1014 help="Interactive OS reinstall, lists available"
1015 " OS templates for selection")
1017 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1018 action="store_true", default=False,
1019 help="Remove the instance from the cluster"
1020 " configuration even if there are failures"
1021 " during the removal process")
1023 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1024 dest="ignore_remove_failures",
1025 action="store_true", default=False,
1026 help="Remove the instance from the"
1027 " cluster configuration even if there"
1028 " are failures during the removal"
1031 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1032 action="store_true", default=False,
1033 help="Remove the instance from the cluster")
1035 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1036 help="Specifies the new node for the instance",
1037 metavar="NODE", default=None,
1038 completion_suggest=OPT_COMPL_ONE_NODE)
1040 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1041 help="Specifies the new secondary node",
1042 metavar="NODE", default=None,
1043 completion_suggest=OPT_COMPL_ONE_NODE)
1045 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1046 default=False, action="store_true",
1047 help="Replace the disk(s) on the primary"
1048 " node (applies only to internally mirrored"
1049 " disk templates, e.g. %s)" %
1050 utils.CommaJoin(constants.DTS_INT_MIRROR))
1052 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1053 default=False, action="store_true",
1054 help="Replace the disk(s) on the secondary"
1055 " node (applies only to internally mirrored"
1056 " disk templates, e.g. %s)" %
1057 utils.CommaJoin(constants.DTS_INT_MIRROR))
1059 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1060 default=False, action="store_true",
1061 help="Lock all nodes and auto-promote as needed"
1064 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1065 default=False, action="store_true",
1066 help="Automatically replace faulty disks"
1067 " (applies only to internally mirrored"
1068 " disk templates, e.g. %s)" %
1069 utils.CommaJoin(constants.DTS_INT_MIRROR))
1071 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1072 default=False, action="store_true",
1073 help="Ignore current recorded size"
1074 " (useful for forcing activation when"
1075 " the recorded size is wrong)")
1077 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1079 completion_suggest=OPT_COMPL_ONE_NODE)
1081 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1084 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1085 help="Specify the secondary ip for the node",
1086 metavar="ADDRESS", default=None)
1088 READD_OPT = cli_option("--readd", dest="readd",
1089 default=False, action="store_true",
1090 help="Readd old node after replacing it")
1092 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1093 default=True, action="store_false",
1094 help="Disable SSH key fingerprint checking")
1096 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1097 default=False, action="store_true",
1098 help="Force the joining of a node")
1100 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1101 type="bool", default=None, metavar=_YORNO,
1102 help="Set the master_candidate flag on the node")
1104 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1105 type="bool", default=None,
1106 help=("Set the offline flag on the node"
1107 " (cluster does not communicate with offline"
1110 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1111 type="bool", default=None,
1112 help=("Set the drained flag on the node"
1113 " (excluded from allocation operations)"))
1115 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1116 type="bool", default=None, metavar=_YORNO,
1117 help="Set the master_capable flag on the node")
1119 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1120 type="bool", default=None, metavar=_YORNO,
1121 help="Set the vm_capable flag on the node")
1123 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1124 type="bool", default=None, metavar=_YORNO,
1125 help="Set the allocatable flag on a volume")
1127 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1128 help="Disable support for lvm based instances"
1130 action="store_false", default=True)
1132 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1133 dest="enabled_hypervisors",
1134 help="Comma-separated list of hypervisors",
1135 type="string", default=None)
1137 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1138 type="keyval", default={},
1139 help="NIC parameters")
1141 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1142 dest="candidate_pool_size", type="int",
1143 help="Set the candidate pool size")
1145 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1146 help=("Enables LVM and specifies the volume group"
1147 " name (cluster-wide) for disk allocation"
1148 " [%s]" % constants.DEFAULT_VG),
1149 metavar="VG", default=None)
1151 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1152 help="Destroy cluster", action="store_true")
1154 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1155 help="Skip node agreement check (dangerous)",
1156 action="store_true", default=False)
1158 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1159 help="Specify the mac prefix for the instance IP"
1160 " addresses, in the format XX:XX:XX",
1164 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1165 help="Specify the node interface (cluster-wide)"
1166 " on which the master IP address will be added"
1167 " (cluster init default: %s)" %
1168 constants.DEFAULT_BRIDGE,
1172 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1173 help="Specify the netmask of the master IP",
1177 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1178 dest="use_external_mip_script",
1179 help="Specify whether to run a"
1180 " user-provided script for the master"
1181 " IP address turnup and"
1182 " turndown operations",
1183 type="bool", metavar=_YORNO, default=None)
1185 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1186 help="Specify the default directory (cluster-"
1187 "wide) for storing the file-based disks [%s]" %
1188 pathutils.DEFAULT_FILE_STORAGE_DIR,
1190 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1192 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1193 "--shared-file-storage-dir",
1194 dest="shared_file_storage_dir",
1195 help="Specify the default directory (cluster-wide) for storing the"
1196 " shared file-based disks [%s]" %
1197 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1198 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1200 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1201 help="Don't modify %s" % pathutils.ETC_HOSTS,
1202 action="store_false", default=True)
1204 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1205 help="Don't initialize SSH keys",
1206 action="store_false", default=True)
1208 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1209 help="Enable parseable error messages",
1210 action="store_true", default=False)
1212 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1213 help="Skip N+1 memory redundancy tests",
1214 action="store_true", default=False)
1216 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1217 help="Type of reboot: soft/hard/full",
1218 default=constants.INSTANCE_REBOOT_HARD,
1220 choices=list(constants.REBOOT_TYPES))
1222 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1223 dest="ignore_secondaries",
1224 default=False, action="store_true",
1225 help="Ignore errors from secondaries")
1227 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1228 action="store_false", default=True,
1229 help="Don't shutdown the instance (unsafe)")
1231 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1232 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1233 help="Maximum time to wait")
1235 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1236 dest="shutdown_timeout", type="int",
1237 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1238 help="Maximum time to wait for instance"
1241 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1243 help=("Number of seconds between repetions of the"
1246 EARLY_RELEASE_OPT = cli_option("--early-release",
1247 dest="early_release", default=False,
1248 action="store_true",
1249 help="Release the locks on the secondary"
1252 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1253 dest="new_cluster_cert",
1254 default=False, action="store_true",
1255 help="Generate a new cluster certificate")
1257 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1259 help="File containing new RAPI certificate")
1261 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1262 default=None, action="store_true",
1263 help=("Generate a new self-signed RAPI"
1266 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1268 help="File containing new SPICE certificate")
1270 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1272 help="File containing the certificate of the CA"
1273 " which signed the SPICE certificate")
1275 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1276 dest="new_spice_cert", default=None,
1277 action="store_true",
1278 help=("Generate a new self-signed SPICE"
1281 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1282 dest="new_confd_hmac_key",
1283 default=False, action="store_true",
1284 help=("Create a new HMAC key for %s" %
1287 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1288 dest="cluster_domain_secret",
1290 help=("Load new new cluster domain"
1291 " secret from file"))
1293 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1294 dest="new_cluster_domain_secret",
1295 default=False, action="store_true",
1296 help=("Create a new cluster domain"
1299 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1300 dest="use_replication_network",
1301 help="Whether to use the replication network"
1302 " for talking to the nodes",
1303 action="store_true", default=False)
1305 MAINTAIN_NODE_HEALTH_OPT = \
1306 cli_option("--maintain-node-health", dest="maintain_node_health",
1307 metavar=_YORNO, default=None, type="bool",
1308 help="Configure the cluster to automatically maintain node"
1309 " health, by shutting down unknown instances, shutting down"
1310 " unknown DRBD devices, etc.")
1312 IDENTIFY_DEFAULTS_OPT = \
1313 cli_option("--identify-defaults", dest="identify_defaults",
1314 default=False, action="store_true",
1315 help="Identify which saved instance parameters are equal to"
1316 " the current cluster defaults and set them as such, instead"
1317 " of marking them as overridden")
1319 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1320 action="store", dest="uid_pool",
1321 help=("A list of user-ids or user-id"
1322 " ranges separated by commas"))
1324 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1325 action="store", dest="add_uids",
1326 help=("A list of user-ids or user-id"
1327 " ranges separated by commas, to be"
1328 " added to the user-id pool"))
1330 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1331 action="store", dest="remove_uids",
1332 help=("A list of user-ids or user-id"
1333 " ranges separated by commas, to be"
1334 " removed from the user-id pool"))
1336 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1337 action="store", dest="reserved_lvs",
1338 help=("A comma-separated list of reserved"
1339 " logical volumes names, that will be"
1340 " ignored by cluster verify"))
1342 ROMAN_OPT = cli_option("--roman",
1343 dest="roman_integers", default=False,
1344 action="store_true",
1345 help="Use roman numbers for positive integers")
1347 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1348 action="store", default=None,
1349 help="Specifies usermode helper for DRBD")
1351 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1352 action="store_false", default=True,
1353 help="Disable support for DRBD")
1355 PRIMARY_IP_VERSION_OPT = \
1356 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1357 action="store", dest="primary_ip_version",
1358 metavar="%d|%d" % (constants.IP4_VERSION,
1359 constants.IP6_VERSION),
1360 help="Cluster-wide IP version for primary IP")
1362 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1363 action="store_true",
1364 help="Show machine name for every line in output")
1367 def _PriorityOptionCb(option, _, value, parser):
1368 """Callback for processing C{--priority} option.
1371 value = _PRIONAME_TO_VALUE[value]
1373 setattr(parser.values, option.dest, value)
1376 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1377 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1378 choices=_PRIONAME_TO_VALUE.keys(),
1379 action="callback", type="choice",
1380 callback=_PriorityOptionCb,
1381 help="Priority for opcode processing")
1383 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1384 type="bool", default=None, metavar=_YORNO,
1385 help="Sets the hidden flag on the OS")
1387 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1388 type="bool", default=None, metavar=_YORNO,
1389 help="Sets the blacklisted flag on the OS")
1391 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1392 type="bool", metavar=_YORNO,
1393 dest="prealloc_wipe_disks",
1394 help=("Wipe disks prior to instance"
1397 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1398 type="keyval", default=None,
1399 help="Node parameters")
1401 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1402 action="store", metavar="POLICY", default=None,
1403 help="Allocation policy for the node group")
1405 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1406 type="bool", metavar=_YORNO,
1407 dest="node_powered",
1408 help="Specify if the SoR for node is powered")
1410 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1411 default=constants.OOB_TIMEOUT,
1412 help="Maximum time to wait for out-of-band helper")
1414 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1415 default=constants.OOB_POWER_DELAY,
1416 help="Time in seconds to wait between power-ons")
1418 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1419 action="store_true", default=False,
1420 help=("Whether command argument should be treated"
1423 NO_REMEMBER_OPT = cli_option("--no-remember",
1425 action="store_true", default=False,
1426 help="Perform but do not record the change"
1427 " in the configuration")
1429 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1430 default=False, action="store_true",
1431 help="Evacuate primary instances only")
1433 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1434 default=False, action="store_true",
1435 help="Evacuate secondary instances only"
1436 " (applies only to internally mirrored"
1437 " disk templates, e.g. %s)" %
1438 utils.CommaJoin(constants.DTS_INT_MIRROR))
1440 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1441 action="store_true", default=False,
1442 help="Pause instance at startup")
1444 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1445 help="Destination node group (name or uuid)",
1446 default=None, action="append",
1447 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1449 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1450 action="append", dest="ignore_errors",
1451 choices=list(constants.CV_ALL_ECODES_STRINGS),
1452 help="Error code to be ignored")
1454 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1456 help=("Specify disk state information in the"
1458 " storage_type/identifier:option=value,...;"
1459 " note this is unused for now"),
1462 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1464 help=("Specify hypervisor state information in the"
1465 " format hypervisor:option=value,...;"
1466 " note this is unused for now"),
1469 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1470 action="store_true", default=False,
1471 help="Ignore instance policy violations")
1473 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1474 help="Sets the instance's runtime memory,"
1475 " ballooning it up or down to the new value",
1476 default=None, type="unit", metavar="<size>")
1478 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1479 action="store_true", default=False,
1480 help="Marks the grow as absolute instead of the"
1481 " (default) relative mode")
1483 NETWORK_OPT = cli_option("--network",
1484 action="store", default=None, dest="network",
1485 help="IP network in CIDR notation")
1487 GATEWAY_OPT = cli_option("--gateway",
1488 action="store", default=None, dest="gateway",
1489 help="IP address of the router (gateway)")
1491 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1492 action="store", default=None,
1493 dest="add_reserved_ips",
1494 help="Comma-separated list of"
1495 " reserved IPs to add")
1497 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1498 action="store", default=None,
1499 dest="remove_reserved_ips",
1500 help="Comma-delimited list of"
1501 " reserved IPs to remove")
1503 NETWORK_TYPE_OPT = cli_option("--network-type",
1504 action="store", default=None, dest="network_type",
1505 help="Network type: private, public, None")
1507 NETWORK6_OPT = cli_option("--network6",
1508 action="store", default=None, dest="network6",
1509 help="IP network in CIDR notation")
1511 GATEWAY6_OPT = cli_option("--gateway6",
1512 action="store", default=None, dest="gateway6",
1513 help="IP6 address of the router (gateway)")
1515 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1516 dest="conflicts_check",
1518 action="store_false",
1519 help="Don't check for conflicting IPs")
1521 #: Options provided by all commands
1522 COMMON_OPTS = [DEBUG_OPT]
1524 # common options for creating instances. add and import then add their own
1526 COMMON_CREATE_OPTS = [
1531 FILESTORE_DRIVER_OPT,
1537 NOCONFLICTSCHECK_OPT,
1549 # common instance policy options
1550 INSTANCE_POLICY_OPTS = [
1551 SPECS_CPU_COUNT_OPT,
1552 SPECS_DISK_COUNT_OPT,
1553 SPECS_DISK_SIZE_OPT,
1555 SPECS_NIC_COUNT_OPT,
1556 IPOLICY_DISK_TEMPLATES,
1558 IPOLICY_SPINDLE_RATIO,
1562 class _ShowUsage(Exception):
1563 """Exception class for L{_ParseArgs}.
1566 def __init__(self, exit_error):
1567 """Initializes instances of this class.
1569 @type exit_error: bool
1570 @param exit_error: Whether to report failure on exit
1573 Exception.__init__(self)
1574 self.exit_error = exit_error
1577 class _ShowVersion(Exception):
1578 """Exception class for L{_ParseArgs}.
1583 def _ParseArgs(binary, argv, commands, aliases, env_override):
1584 """Parser for the command line arguments.
1586 This function parses the arguments and returns the function which
1587 must be executed together with its (modified) arguments.
1589 @param binary: Script name
1590 @param argv: Command line arguments
1591 @param commands: Dictionary containing command definitions
1592 @param aliases: dictionary with command aliases {"alias": "target", ...}
1593 @param env_override: list of env variables allowed for default args
1594 @raise _ShowUsage: If usage description should be shown
1595 @raise _ShowVersion: If version should be shown
1598 assert not (env_override - set(commands))
1599 assert not (set(aliases.keys()) & set(commands.keys()))
1604 # No option or command given
1605 raise _ShowUsage(exit_error=True)
1607 if cmd == "--version":
1608 raise _ShowVersion()
1609 elif cmd == "--help":
1610 raise _ShowUsage(exit_error=False)
1611 elif not (cmd in commands or cmd in aliases):
1612 raise _ShowUsage(exit_error=True)
1614 # get command, unalias it, and look it up in commands
1616 if aliases[cmd] not in commands:
1617 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1618 " command '%s'" % (cmd, aliases[cmd]))
1622 if cmd in env_override:
1623 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1624 env_args = os.environ.get(args_env_name)
1626 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1628 func, args_def, parser_opts, usage, description = commands[cmd]
1629 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1630 description=description,
1631 formatter=TitledHelpFormatter(),
1632 usage="%%prog %s %s" % (cmd, usage))
1633 parser.disable_interspersed_args()
1634 options, args = parser.parse_args(args=argv[2:])
1636 if not _CheckArguments(cmd, args_def, args):
1637 return None, None, None
1639 return func, options, args
1642 def _FormatUsage(binary, commands):
1643 """Generates a nice description of all commands.
1645 @param binary: Script name
1646 @param commands: Dictionary containing command definitions
1649 # compute the max line length for cmd + usage
1650 mlen = min(60, max(map(len, commands)))
1652 yield "Usage: %s {command} [options...] [argument...]" % binary
1653 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1657 # and format a nice command list
1658 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1659 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1660 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1661 for line in help_lines:
1662 yield " %-*s %s" % (mlen, "", line)
1667 def _CheckArguments(cmd, args_def, args):
1668 """Verifies the arguments using the argument definition.
1672 1. Abort with error if values specified by user but none expected.
1674 1. For each argument in definition
1676 1. Keep running count of minimum number of values (min_count)
1677 1. Keep running count of maximum number of values (max_count)
1678 1. If it has an unlimited number of values
1680 1. Abort with error if it's not the last argument in the definition
1682 1. If last argument has limited number of values
1684 1. Abort with error if number of values doesn't match or is too large
1686 1. Abort with error if user didn't pass enough values (min_count)
1689 if args and not args_def:
1690 ToStderr("Error: Command %s expects no arguments", cmd)
1697 last_idx = len(args_def) - 1
1699 for idx, arg in enumerate(args_def):
1700 if min_count is None:
1702 elif arg.min is not None:
1703 min_count += arg.min
1705 if max_count is None:
1707 elif arg.max is not None:
1708 max_count += arg.max
1711 check_max = (arg.max is not None)
1713 elif arg.max is None:
1714 raise errors.ProgrammerError("Only the last argument can have max=None")
1717 # Command with exact number of arguments
1718 if (min_count is not None and max_count is not None and
1719 min_count == max_count and len(args) != min_count):
1720 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1723 # Command with limited number of arguments
1724 if max_count is not None and len(args) > max_count:
1725 ToStderr("Error: Command %s expects only %d argument(s)",
1729 # Command with some required arguments
1730 if min_count is not None and len(args) < min_count:
1731 ToStderr("Error: Command %s expects at least %d argument(s)",
1738 def SplitNodeOption(value):
1739 """Splits the value of a --node option.
1742 if value and ":" in value:
1743 return value.split(":", 1)
1745 return (value, None)
1748 def CalculateOSNames(os_name, os_variants):
1749 """Calculates all the names an OS can be called, according to its variants.
1751 @type os_name: string
1752 @param os_name: base name of the os
1753 @type os_variants: list or None
1754 @param os_variants: list of supported variants
1756 @return: list of valid names
1760 return ["%s+%s" % (os_name, v) for v in os_variants]
1765 def ParseFields(selected, default):
1766 """Parses the values of "--field"-like options.
1768 @type selected: string or None
1769 @param selected: User-selected options
1771 @param default: Default fields
1774 if selected is None:
1777 if selected.startswith("+"):
1778 return default + selected[1:].split(",")
1780 return selected.split(",")
1783 UsesRPC = rpc.RunWithRPC
1786 def AskUser(text, choices=None):
1787 """Ask the user a question.
1789 @param text: the question to ask
1791 @param choices: list with elements tuples (input_char, return_value,
1792 description); if not given, it will default to: [('y', True,
1793 'Perform the operation'), ('n', False, 'Do no do the operation')];
1794 note that the '?' char is reserved for help
1796 @return: one of the return values from the choices list; if input is
1797 not possible (i.e. not running with a tty, we return the last
1802 choices = [("y", True, "Perform the operation"),
1803 ("n", False, "Do not perform the operation")]
1804 if not choices or not isinstance(choices, list):
1805 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1806 for entry in choices:
1807 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1808 raise errors.ProgrammerError("Invalid choices element to AskUser")
1810 answer = choices[-1][1]
1812 for line in text.splitlines():
1813 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1814 text = "\n".join(new_text)
1816 f = file("/dev/tty", "a+")
1820 chars = [entry[0] for entry in choices]
1821 chars[-1] = "[%s]" % chars[-1]
1823 maps = dict([(entry[0], entry[1]) for entry in choices])
1827 f.write("/".join(chars))
1829 line = f.readline(2).strip().lower()
1834 for entry in choices:
1835 f.write(" %s - %s\n" % (entry[0], entry[2]))
1843 class JobSubmittedException(Exception):
1844 """Job was submitted, client should exit.
1846 This exception has one argument, the ID of the job that was
1847 submitted. The handler should print this ID.
1849 This is not an error, just a structured way to exit from clients.
1854 def SendJob(ops, cl=None):
1855 """Function to submit an opcode without waiting for the results.
1858 @param ops: list of opcodes
1859 @type cl: luxi.Client
1860 @param cl: the luxi client to use for communicating with the master;
1861 if None, a new client will be created
1867 job_id = cl.SubmitJob(ops)
1872 def GenericPollJob(job_id, cbs, report_cbs):
1873 """Generic job-polling function.
1875 @type job_id: number
1876 @param job_id: Job ID
1877 @type cbs: Instance of L{JobPollCbBase}
1878 @param cbs: Data callbacks
1879 @type report_cbs: Instance of L{JobPollReportCbBase}
1880 @param report_cbs: Reporting callbacks
1883 prev_job_info = None
1884 prev_logmsg_serial = None
1889 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1892 # job not found, go away!
1893 raise errors.JobLost("Job with id %s lost" % job_id)
1895 if result == constants.JOB_NOTCHANGED:
1896 report_cbs.ReportNotChanged(job_id, status)
1901 # Split result, a tuple of (field values, log entries)
1902 (job_info, log_entries) = result
1903 (status, ) = job_info
1906 for log_entry in log_entries:
1907 (serial, timestamp, log_type, message) = log_entry
1908 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1910 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1912 # TODO: Handle canceled and archived jobs
1913 elif status in (constants.JOB_STATUS_SUCCESS,
1914 constants.JOB_STATUS_ERROR,
1915 constants.JOB_STATUS_CANCELING,
1916 constants.JOB_STATUS_CANCELED):
1919 prev_job_info = job_info
1921 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1923 raise errors.JobLost("Job with id %s lost" % job_id)
1925 status, opstatus, result = jobs[0]
1927 if status == constants.JOB_STATUS_SUCCESS:
1930 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1931 raise errors.OpExecError("Job was canceled")
1934 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1935 if status == constants.OP_STATUS_SUCCESS:
1937 elif status == constants.OP_STATUS_ERROR:
1938 errors.MaybeRaise(msg)
1941 raise errors.OpExecError("partial failure (opcode %d): %s" %
1944 raise errors.OpExecError(str(msg))
1946 # default failure mode
1947 raise errors.OpExecError(result)
1950 class JobPollCbBase:
1951 """Base class for L{GenericPollJob} callbacks.
1955 """Initializes this class.
1959 def WaitForJobChangeOnce(self, job_id, fields,
1960 prev_job_info, prev_log_serial):
1961 """Waits for changes on a job.
1964 raise NotImplementedError()
1966 def QueryJobs(self, job_ids, fields):
1967 """Returns the selected fields for the selected job IDs.
1969 @type job_ids: list of numbers
1970 @param job_ids: Job IDs
1971 @type fields: list of strings
1972 @param fields: Fields
1975 raise NotImplementedError()
1978 class JobPollReportCbBase:
1979 """Base class for L{GenericPollJob} reporting callbacks.
1983 """Initializes this class.
1987 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1988 """Handles a log message.
1991 raise NotImplementedError()
1993 def ReportNotChanged(self, job_id, status):
1994 """Called for if a job hasn't changed in a while.
1996 @type job_id: number
1997 @param job_id: Job ID
1998 @type status: string or None
1999 @param status: Job status if available
2002 raise NotImplementedError()
2005 class _LuxiJobPollCb(JobPollCbBase):
2006 def __init__(self, cl):
2007 """Initializes this class.
2010 JobPollCbBase.__init__(self)
2013 def WaitForJobChangeOnce(self, job_id, fields,
2014 prev_job_info, prev_log_serial):
2015 """Waits for changes on a job.
2018 return self.cl.WaitForJobChangeOnce(job_id, fields,
2019 prev_job_info, prev_log_serial)
2021 def QueryJobs(self, job_ids, fields):
2022 """Returns the selected fields for the selected job IDs.
2025 return self.cl.QueryJobs(job_ids, fields)
2028 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2029 def __init__(self, feedback_fn):
2030 """Initializes this class.
2033 JobPollReportCbBase.__init__(self)
2035 self.feedback_fn = feedback_fn
2037 assert callable(feedback_fn)
2039 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2040 """Handles a log message.
2043 self.feedback_fn((timestamp, log_type, log_msg))
2045 def ReportNotChanged(self, job_id, status):
2046 """Called if a job hasn't changed in a while.
2052 class StdioJobPollReportCb(JobPollReportCbBase):
2054 """Initializes this class.
2057 JobPollReportCbBase.__init__(self)
2059 self.notified_queued = False
2060 self.notified_waitlock = False
2062 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2063 """Handles a log message.
2066 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2067 FormatLogMessage(log_type, log_msg))
2069 def ReportNotChanged(self, job_id, status):
2070 """Called if a job hasn't changed in a while.
2076 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2077 ToStderr("Job %s is waiting in queue", job_id)
2078 self.notified_queued = True
2080 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2081 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2082 self.notified_waitlock = True
2085 def FormatLogMessage(log_type, log_msg):
2086 """Formats a job message according to its type.
2089 if log_type != constants.ELOG_MESSAGE:
2090 log_msg = str(log_msg)
2092 return utils.SafeEncode(log_msg)
2095 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2096 """Function to poll for the result of a job.
2098 @type job_id: job identified
2099 @param job_id: the job to poll for results
2100 @type cl: luxi.Client
2101 @param cl: the luxi client to use for communicating with the master;
2102 if None, a new client will be created
2108 if reporter is None:
2110 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2112 reporter = StdioJobPollReportCb()
2114 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2116 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2119 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2120 """Legacy function to submit an opcode.
2122 This is just a simple wrapper over the construction of the processor
2123 instance. It should be extended to better handle feedback and
2124 interaction functions.
2130 SetGenericOpcodeOpts([op], opts)
2132 job_id = SendJob([op], cl=cl)
2134 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2137 return op_results[0]
2140 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2141 """Wrapper around SubmitOpCode or SendJob.
2143 This function will decide, based on the 'opts' parameter, whether to
2144 submit and wait for the result of the opcode (and return it), or
2145 whether to just send the job and print its identifier. It is used in
2146 order to simplify the implementation of the '--submit' option.
2148 It will also process the opcodes if we're sending the via SendJob
2149 (otherwise SubmitOpCode does it).
2152 if opts and opts.submit_only:
2154 SetGenericOpcodeOpts(job, opts)
2155 job_id = SendJob(job, cl=cl)
2156 raise JobSubmittedException(job_id)
2158 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2161 def SetGenericOpcodeOpts(opcode_list, options):
2162 """Processor for generic options.
2164 This function updates the given opcodes based on generic command
2165 line options (like debug, dry-run, etc.).
2167 @param opcode_list: list of opcodes
2168 @param options: command line options or None
2169 @return: None (in-place modification)
2174 for op in opcode_list:
2175 op.debug_level = options.debug
2176 if hasattr(options, "dry_run"):
2177 op.dry_run = options.dry_run
2178 if getattr(options, "priority", None) is not None:
2179 op.priority = options.priority
2182 def GetClient(query=False):
2183 """Connects to the a luxi socket and returns a client.
2185 @type query: boolean
2186 @param query: this signifies that the client will only be
2187 used for queries; if the build-time parameter
2188 enable-split-queries is enabled, then the client will be
2189 connected to the query socket instead of the masterd socket
2192 if query and constants.ENABLE_SPLIT_QUERY:
2193 address = pathutils.QUERY_SOCKET
2196 # TODO: Cache object?
2198 client = luxi.Client(address=address)
2199 except luxi.NoMasterError:
2200 ss = ssconf.SimpleStore()
2202 # Try to read ssconf file
2205 except errors.ConfigurationError:
2206 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2207 " not part of a cluster",
2210 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2211 if master != myself:
2212 raise errors.OpPrereqError("This is not the master node, please connect"
2213 " to node '%s' and rerun the command" %
2214 master, errors.ECODE_INVAL)
2219 def FormatError(err):
2220 """Return a formatted error message for a given error.
2222 This function takes an exception instance and returns a tuple
2223 consisting of two values: first, the recommended exit code, and
2224 second, a string describing the error message (not
2225 newline-terminated).
2231 if isinstance(err, errors.ConfigurationError):
2232 txt = "Corrupt configuration file: %s" % msg
2234 obuf.write(txt + "\n")
2235 obuf.write("Aborting.")
2237 elif isinstance(err, errors.HooksAbort):
2238 obuf.write("Failure: hooks execution failed:\n")
2239 for node, script, out in err.args[0]:
2241 obuf.write(" node: %s, script: %s, output: %s\n" %
2242 (node, script, out))
2244 obuf.write(" node: %s, script: %s (no output)\n" %
2246 elif isinstance(err, errors.HooksFailure):
2247 obuf.write("Failure: hooks general failure: %s" % msg)
2248 elif isinstance(err, errors.ResolverError):
2249 this_host = netutils.Hostname.GetSysName()
2250 if err.args[0] == this_host:
2251 msg = "Failure: can't resolve my own hostname ('%s')"
2253 msg = "Failure: can't resolve hostname '%s'"
2254 obuf.write(msg % err.args[0])
2255 elif isinstance(err, errors.OpPrereqError):
2256 if len(err.args) == 2:
2257 obuf.write("Failure: prerequisites not met for this"
2258 " operation:\nerror type: %s, error details:\n%s" %
2259 (err.args[1], err.args[0]))
2261 obuf.write("Failure: prerequisites not met for this"
2262 " operation:\n%s" % msg)
2263 elif isinstance(err, errors.OpExecError):
2264 obuf.write("Failure: command execution error:\n%s" % msg)
2265 elif isinstance(err, errors.TagError):
2266 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2267 elif isinstance(err, errors.JobQueueDrainError):
2268 obuf.write("Failure: the job queue is marked for drain and doesn't"
2269 " accept new requests\n")
2270 elif isinstance(err, errors.JobQueueFull):
2271 obuf.write("Failure: the job queue is full and doesn't accept new"
2272 " job submissions until old jobs are archived\n")
2273 elif isinstance(err, errors.TypeEnforcementError):
2274 obuf.write("Parameter Error: %s" % msg)
2275 elif isinstance(err, errors.ParameterError):
2276 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2277 elif isinstance(err, luxi.NoMasterError):
2278 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2279 " and listening for connections?")
2280 elif isinstance(err, luxi.TimeoutError):
2281 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2282 " been submitted and will continue to run even if the call"
2283 " timed out. Useful commands in this situation are \"gnt-job"
2284 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2286 elif isinstance(err, luxi.PermissionError):
2287 obuf.write("It seems you don't have permissions to connect to the"
2288 " master daemon.\nPlease retry as a different user.")
2289 elif isinstance(err, luxi.ProtocolError):
2290 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2292 elif isinstance(err, errors.JobLost):
2293 obuf.write("Error checking job status: %s" % msg)
2294 elif isinstance(err, errors.QueryFilterParseError):
2295 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2296 obuf.write("\n".join(err.GetDetails()))
2297 elif isinstance(err, errors.GenericError):
2298 obuf.write("Unhandled Ganeti error: %s" % msg)
2299 elif isinstance(err, JobSubmittedException):
2300 obuf.write("JobID: %s\n" % err.args[0])
2303 obuf.write("Unhandled exception: %s" % msg)
2304 return retcode, obuf.getvalue().rstrip("\n")
2307 def GenericMain(commands, override=None, aliases=None,
2308 env_override=frozenset()):
2309 """Generic main function for all the gnt-* commands.
2311 @param commands: a dictionary with a special structure, see the design doc
2312 for command line handling.
2313 @param override: if not None, we expect a dictionary with keys that will
2314 override command line options; this can be used to pass
2315 options from the scripts to generic functions
2316 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2317 @param env_override: list of environment names which are allowed to submit
2318 default args for commands
2321 # save the program name and the entire command line for later logging
2323 binary = os.path.basename(sys.argv[0])
2325 binary = sys.argv[0]
2327 if len(sys.argv) >= 2:
2328 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2332 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2334 binary = "<unknown program>"
2335 cmdline = "<unknown>"
2341 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2343 except _ShowVersion:
2344 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2345 constants.RELEASE_VERSION)
2346 return constants.EXIT_SUCCESS
2347 except _ShowUsage, err:
2348 for line in _FormatUsage(binary, commands):
2352 return constants.EXIT_FAILURE
2354 return constants.EXIT_SUCCESS
2355 except errors.ParameterError, err:
2356 result, err_msg = FormatError(err)
2360 if func is None: # parse error
2363 if override is not None:
2364 for key, val in override.iteritems():
2365 setattr(options, key, val)
2367 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2368 stderr_logging=True)
2370 logging.info("Command line: %s", cmdline)
2373 result = func(options, args)
2374 except (errors.GenericError, luxi.ProtocolError,
2375 JobSubmittedException), err:
2376 result, err_msg = FormatError(err)
2377 logging.exception("Error during command processing")
2379 except KeyboardInterrupt:
2380 result = constants.EXIT_FAILURE
2381 ToStderr("Aborted. Note that if the operation created any jobs, they"
2382 " might have been submitted and"
2383 " will continue to run in the background.")
2384 except IOError, err:
2385 if err.errno == errno.EPIPE:
2386 # our terminal went away, we'll exit
2387 sys.exit(constants.EXIT_FAILURE)
2394 def ParseNicOption(optvalue):
2395 """Parses the value of the --net option(s).
2399 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2400 except (TypeError, ValueError), err:
2401 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2404 nics = [{}] * nic_max
2405 for nidx, ndict in optvalue:
2408 if not isinstance(ndict, dict):
2409 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2410 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2412 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2419 def GenericInstanceCreate(mode, opts, args):
2420 """Add an instance to the cluster via either creation or import.
2422 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2423 @param opts: the command line options selected by the user
2425 @param args: should contain only one element, the new instance name
2427 @return: the desired exit code
2432 (pnode, snode) = SplitNodeOption(opts.node)
2437 hypervisor, hvparams = opts.hypervisor
2440 nics = ParseNicOption(opts.nics)
2444 elif mode == constants.INSTANCE_CREATE:
2445 # default of one nic, all auto
2451 if opts.disk_template == constants.DT_DISKLESS:
2452 if opts.disks or opts.sd_size is not None:
2453 raise errors.OpPrereqError("Diskless instance but disk"
2454 " information passed", errors.ECODE_INVAL)
2457 if (not opts.disks and not opts.sd_size
2458 and mode == constants.INSTANCE_CREATE):
2459 raise errors.OpPrereqError("No disk information specified",
2461 if opts.disks and opts.sd_size is not None:
2462 raise errors.OpPrereqError("Please use either the '--disk' or"
2463 " '-s' option", errors.ECODE_INVAL)
2464 if opts.sd_size is not None:
2465 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2469 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2470 except ValueError, err:
2471 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2473 disks = [{}] * disk_max
2476 for didx, ddict in opts.disks:
2478 if not isinstance(ddict, dict):
2479 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2480 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2481 elif constants.IDISK_SIZE in ddict:
2482 if constants.IDISK_ADOPT in ddict:
2483 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2484 " (disk %d)" % didx, errors.ECODE_INVAL)
2486 ddict[constants.IDISK_SIZE] = \
2487 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2488 except ValueError, err:
2489 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2490 (didx, err), errors.ECODE_INVAL)
2491 elif constants.IDISK_ADOPT in ddict:
2492 if mode == constants.INSTANCE_IMPORT:
2493 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2494 " import", errors.ECODE_INVAL)
2495 ddict[constants.IDISK_SIZE] = 0
2497 raise errors.OpPrereqError("Missing size or adoption source for"
2498 " disk %d" % didx, errors.ECODE_INVAL)
2501 if opts.tags is not None:
2502 tags = opts.tags.split(",")
2506 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2507 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2509 if mode == constants.INSTANCE_CREATE:
2512 force_variant = opts.force_variant
2515 no_install = opts.no_install
2516 identify_defaults = False
2517 elif mode == constants.INSTANCE_IMPORT:
2520 force_variant = False
2521 src_node = opts.src_node
2522 src_path = opts.src_dir
2524 identify_defaults = opts.identify_defaults
2526 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2528 op = opcodes.OpInstanceCreate(instance_name=instance,
2530 disk_template=opts.disk_template,
2532 conflicts_check=opts.conflicts_check,
2533 pnode=pnode, snode=snode,
2534 ip_check=opts.ip_check,
2535 name_check=opts.name_check,
2536 wait_for_sync=opts.wait_for_sync,
2537 file_storage_dir=opts.file_storage_dir,
2538 file_driver=opts.file_driver,
2539 iallocator=opts.iallocator,
2540 hypervisor=hypervisor,
2542 beparams=opts.beparams,
2543 osparams=opts.osparams,
2547 force_variant=force_variant,
2551 no_install=no_install,
2552 identify_defaults=identify_defaults,
2553 ignore_ipolicy=opts.ignore_ipolicy)
2555 SubmitOrSend(op, opts)
2559 class _RunWhileClusterStoppedHelper:
2560 """Helper class for L{RunWhileClusterStopped} to simplify state management
2563 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2564 """Initializes this class.
2566 @type feedback_fn: callable
2567 @param feedback_fn: Feedback function
2568 @type cluster_name: string
2569 @param cluster_name: Cluster name
2570 @type master_node: string
2571 @param master_node Master node name
2572 @type online_nodes: list
2573 @param online_nodes: List of names of online nodes
2576 self.feedback_fn = feedback_fn
2577 self.cluster_name = cluster_name
2578 self.master_node = master_node
2579 self.online_nodes = online_nodes
2581 self.ssh = ssh.SshRunner(self.cluster_name)
2583 self.nonmaster_nodes = [name for name in online_nodes
2584 if name != master_node]
2586 assert self.master_node not in self.nonmaster_nodes
2588 def _RunCmd(self, node_name, cmd):
2589 """Runs a command on the local or a remote machine.
2591 @type node_name: string
2592 @param node_name: Machine name
2597 if node_name is None or node_name == self.master_node:
2598 # No need to use SSH
2599 result = utils.RunCmd(cmd)
2601 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2602 utils.ShellQuoteArgs(cmd))
2605 errmsg = ["Failed to run command %s" % result.cmd]
2607 errmsg.append("on node %s" % node_name)
2608 errmsg.append(": exitcode %s and error %s" %
2609 (result.exit_code, result.output))
2610 raise errors.OpExecError(" ".join(errmsg))
2612 def Call(self, fn, *args):
2613 """Call function while all daemons are stopped.
2616 @param fn: Function to be called
2619 # Pause watcher by acquiring an exclusive lock on watcher state file
2620 self.feedback_fn("Blocking watcher")
2621 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2623 # TODO: Currently, this just blocks. There's no timeout.
2624 # TODO: Should it be a shared lock?
2625 watcher_block.Exclusive(blocking=True)
2627 # Stop master daemons, so that no new jobs can come in and all running
2629 self.feedback_fn("Stopping master daemons")
2630 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2632 # Stop daemons on all nodes
2633 for node_name in self.online_nodes:
2634 self.feedback_fn("Stopping daemons on %s" % node_name)
2635 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2637 # All daemons are shut down now
2639 return fn(self, *args)
2640 except Exception, err:
2641 _, errmsg = FormatError(err)
2642 logging.exception("Caught exception")
2643 self.feedback_fn(errmsg)
2646 # Start cluster again, master node last
2647 for node_name in self.nonmaster_nodes + [self.master_node]:
2648 self.feedback_fn("Starting daemons on %s" % node_name)
2649 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2652 watcher_block.Close()
2655 def RunWhileClusterStopped(feedback_fn, fn, *args):
2656 """Calls a function while all cluster daemons are stopped.
2658 @type feedback_fn: callable
2659 @param feedback_fn: Feedback function
2661 @param fn: Function to be called when daemons are stopped
2664 feedback_fn("Gathering cluster information")
2666 # This ensures we're running on the master daemon
2669 (cluster_name, master_node) = \
2670 cl.QueryConfigValues(["cluster_name", "master_node"])
2672 online_nodes = GetOnlineNodes([], cl=cl)
2674 # Don't keep a reference to the client. The master daemon will go away.
2677 assert master_node in online_nodes
2679 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2680 online_nodes).Call(fn, *args)
2683 def GenerateTable(headers, fields, separator, data,
2684 numfields=None, unitfields=None,
2686 """Prints a table with headers and different fields.
2689 @param headers: dictionary mapping field names to headers for
2692 @param fields: the field names corresponding to each row in
2694 @param separator: the separator to be used; if this is None,
2695 the default 'smart' algorithm is used which computes optimal
2696 field width, otherwise just the separator is used between
2699 @param data: a list of lists, each sublist being one row to be output
2700 @type numfields: list
2701 @param numfields: a list with the fields that hold numeric
2702 values and thus should be right-aligned
2703 @type unitfields: list
2704 @param unitfields: a list with the fields that hold numeric
2705 values that should be formatted with the units field
2706 @type units: string or None
2707 @param units: the units we should use for formatting, or None for
2708 automatic choice (human-readable for non-separator usage, otherwise
2709 megabytes); this is a one-letter string
2718 if numfields is None:
2720 if unitfields is None:
2723 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2724 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2727 for field in fields:
2728 if headers and field not in headers:
2729 # TODO: handle better unknown fields (either revert to old
2730 # style of raising exception, or deal more intelligently with
2732 headers[field] = field
2733 if separator is not None:
2734 format_fields.append("%s")
2735 elif numfields.Matches(field):
2736 format_fields.append("%*s")
2738 format_fields.append("%-*s")
2740 if separator is None:
2741 mlens = [0 for name in fields]
2742 format_str = " ".join(format_fields)
2744 format_str = separator.replace("%", "%%").join(format_fields)
2749 for idx, val in enumerate(row):
2750 if unitfields.Matches(fields[idx]):
2753 except (TypeError, ValueError):
2756 val = row[idx] = utils.FormatUnit(val, units)
2757 val = row[idx] = str(val)
2758 if separator is None:
2759 mlens[idx] = max(mlens[idx], len(val))
2764 for idx, name in enumerate(fields):
2766 if separator is None:
2767 mlens[idx] = max(mlens[idx], len(hdr))
2768 args.append(mlens[idx])
2770 result.append(format_str % tuple(args))
2772 if separator is None:
2773 assert len(mlens) == len(fields)
2775 if fields and not numfields.Matches(fields[-1]):
2781 line = ["-" for _ in fields]
2782 for idx in range(len(fields)):
2783 if separator is None:
2784 args.append(mlens[idx])
2785 args.append(line[idx])
2786 result.append(format_str % tuple(args))
2791 def _FormatBool(value):
2792 """Formats a boolean value as a string.
2800 #: Default formatting for query results; (callback, align right)
2801 _DEFAULT_FORMAT_QUERY = {
2802 constants.QFT_TEXT: (str, False),
2803 constants.QFT_BOOL: (_FormatBool, False),
2804 constants.QFT_NUMBER: (str, True),
2805 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2806 constants.QFT_OTHER: (str, False),
2807 constants.QFT_UNKNOWN: (str, False),
2811 def _GetColumnFormatter(fdef, override, unit):
2812 """Returns formatting function for a field.
2814 @type fdef: L{objects.QueryFieldDefinition}
2815 @type override: dict
2816 @param override: Dictionary for overriding field formatting functions,
2817 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2819 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2820 @rtype: tuple; (callable, bool)
2821 @return: Returns the function to format a value (takes one parameter) and a
2822 boolean for aligning the value on the right-hand side
2825 fmt = override.get(fdef.name, None)
2829 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2831 if fdef.kind == constants.QFT_UNIT:
2832 # Can't keep this information in the static dictionary
2833 return (lambda value: utils.FormatUnit(value, unit), True)
2835 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2839 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2842 class _QueryColumnFormatter:
2843 """Callable class for formatting fields of a query.
2846 def __init__(self, fn, status_fn, verbose):
2847 """Initializes this class.
2850 @param fn: Formatting function
2851 @type status_fn: callable
2852 @param status_fn: Function to report fields' status
2853 @type verbose: boolean
2854 @param verbose: whether to use verbose field descriptions or not
2858 self._status_fn = status_fn
2859 self._verbose = verbose
2861 def __call__(self, data):
2862 """Returns a field's string representation.
2865 (status, value) = data
2868 self._status_fn(status)
2870 if status == constants.RS_NORMAL:
2871 return self._fn(value)
2873 assert value is None, \
2874 "Found value %r for abnormal status %s" % (value, status)
2876 return FormatResultError(status, self._verbose)
2879 def FormatResultError(status, verbose):
2880 """Formats result status other than L{constants.RS_NORMAL}.
2882 @param status: The result status
2883 @type verbose: boolean
2884 @param verbose: Whether to return the verbose text
2885 @return: Text of result status
2888 assert status != constants.RS_NORMAL, \
2889 "FormatResultError called with status equal to constants.RS_NORMAL"
2891 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2893 raise NotImplementedError("Unknown status %s" % status)
2900 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2901 header=False, verbose=False):
2902 """Formats data in L{objects.QueryResponse}.
2904 @type result: L{objects.QueryResponse}
2905 @param result: result of query operation
2907 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2908 see L{utils.text.FormatUnit}
2909 @type format_override: dict
2910 @param format_override: Dictionary for overriding field formatting functions,
2911 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2912 @type separator: string or None
2913 @param separator: String used to separate fields
2915 @param header: Whether to output header row
2916 @type verbose: boolean
2917 @param verbose: whether to use verbose field descriptions or not
2926 if format_override is None:
2927 format_override = {}
2929 stats = dict.fromkeys(constants.RS_ALL, 0)
2931 def _RecordStatus(status):
2936 for fdef in result.fields:
2937 assert fdef.title and fdef.name
2938 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2939 columns.append(TableColumn(fdef.title,
2940 _QueryColumnFormatter(fn, _RecordStatus,
2944 table = FormatTable(result.data, columns, header, separator)
2946 # Collect statistics
2947 assert len(stats) == len(constants.RS_ALL)
2948 assert compat.all(count >= 0 for count in stats.values())
2950 # Determine overall status. If there was no data, unknown fields must be
2951 # detected via the field definitions.
2952 if (stats[constants.RS_UNKNOWN] or
2953 (not result.data and _GetUnknownFields(result.fields))):
2955 elif compat.any(count > 0 for key, count in stats.items()
2956 if key != constants.RS_NORMAL):
2957 status = QR_INCOMPLETE
2961 return (status, table)
2964 def _GetUnknownFields(fdefs):
2965 """Returns list of unknown fields included in C{fdefs}.
2967 @type fdefs: list of L{objects.QueryFieldDefinition}
2970 return [fdef for fdef in fdefs
2971 if fdef.kind == constants.QFT_UNKNOWN]
2974 def _WarnUnknownFields(fdefs):
2975 """Prints a warning to stderr if a query included unknown fields.
2977 @type fdefs: list of L{objects.QueryFieldDefinition}
2980 unknown = _GetUnknownFields(fdefs)
2982 ToStderr("Warning: Queried for unknown fields %s",
2983 utils.CommaJoin(fdef.name for fdef in unknown))
2989 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2990 format_override=None, verbose=False, force_filter=False,
2991 namefield=None, qfilter=None, isnumeric=False):
2992 """Generic implementation for listing all items of a resource.
2994 @param resource: One of L{constants.QR_VIA_LUXI}
2995 @type fields: list of strings
2996 @param fields: List of fields to query for
2997 @type names: list of strings
2998 @param names: Names of items to query for
2999 @type unit: string or None
3000 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3001 None for automatic choice (human-readable for non-separator usage,
3002 otherwise megabytes); this is a one-letter string
3003 @type separator: string or None
3004 @param separator: String used to separate fields
3006 @param header: Whether to show header row
3007 @type force_filter: bool
3008 @param force_filter: Whether to always treat names as filter
3009 @type format_override: dict
3010 @param format_override: Dictionary for overriding field formatting functions,
3011 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3012 @type verbose: boolean
3013 @param verbose: whether to use verbose field descriptions or not
3014 @type namefield: string
3015 @param namefield: Name of field to use for simple filters (see
3016 L{qlang.MakeFilter} for details)
3017 @type qfilter: list or None
3018 @param qfilter: Query filter (in addition to names)
3019 @param isnumeric: bool
3020 @param isnumeric: Whether the namefield's type is numeric, and therefore
3021 any simple filters built by namefield should use integer values to
3028 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3029 isnumeric=isnumeric)
3032 qfilter = namefilter
3033 elif namefilter is not None:
3034 qfilter = [qlang.OP_AND, namefilter, qfilter]
3039 response = cl.Query(resource, fields, qfilter)
3041 found_unknown = _WarnUnknownFields(response.fields)
3043 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3045 format_override=format_override,
3051 assert ((found_unknown and status == QR_UNKNOWN) or
3052 (not found_unknown and status != QR_UNKNOWN))
3054 if status == QR_UNKNOWN:
3055 return constants.EXIT_UNKNOWN_FIELD
3057 # TODO: Should the list command fail if not all data could be collected?
3058 return constants.EXIT_SUCCESS
3061 def GenericListFields(resource, fields, separator, header, cl=None):
3062 """Generic implementation for listing fields for a resource.
3064 @param resource: One of L{constants.QR_VIA_LUXI}
3065 @type fields: list of strings
3066 @param fields: List of fields to query for
3067 @type separator: string or None
3068 @param separator: String used to separate fields
3070 @param header: Whether to show header row
3079 response = cl.QueryFields(resource, fields)
3081 found_unknown = _WarnUnknownFields(response.fields)
3084 TableColumn("Name", str, False),
3085 TableColumn("Title", str, False),
3086 TableColumn("Description", str, False),
3089 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3091 for line in FormatTable(rows, columns, header, separator):
3095 return constants.EXIT_UNKNOWN_FIELD
3097 return constants.EXIT_SUCCESS
3101 """Describes a column for L{FormatTable}.
3104 def __init__(self, title, fn, align_right):
3105 """Initializes this class.
3108 @param title: Column title
3110 @param fn: Formatting function
3111 @type align_right: bool
3112 @param align_right: Whether to align values on the right-hand side
3117 self.align_right = align_right
3120 def _GetColFormatString(width, align_right):
3121 """Returns the format string for a field.
3129 return "%%%s%ss" % (sign, width)
3132 def FormatTable(rows, columns, header, separator):
3133 """Formats data as a table.
3135 @type rows: list of lists
3136 @param rows: Row data, one list per row
3137 @type columns: list of L{TableColumn}
3138 @param columns: Column descriptions
3140 @param header: Whether to show header row
3141 @type separator: string or None
3142 @param separator: String used to separate columns
3146 data = [[col.title for col in columns]]
3147 colwidth = [len(col.title) for col in columns]
3150 colwidth = [0 for _ in columns]
3154 assert len(row) == len(columns)
3156 formatted = [col.format(value) for value, col in zip(row, columns)]
3158 if separator is None:
3159 # Update column widths
3160 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3161 # Modifying a list's items while iterating is fine
3162 colwidth[idx] = max(oldwidth, len(value))
3164 data.append(formatted)
3166 if separator is not None:
3167 # Return early if a separator is used
3168 return [separator.join(row) for row in data]
3170 if columns and not columns[-1].align_right:
3171 # Avoid unnecessary spaces at end of line
3174 # Build format string
3175 fmt = " ".join([_GetColFormatString(width, col.align_right)
3176 for col, width in zip(columns, colwidth)])
3178 return [fmt % tuple(row) for row in data]
3181 def FormatTimestamp(ts):
3182 """Formats a given timestamp.
3185 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3188 @return: a string with the formatted timestamp
3191 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3195 return utils.FormatTime(sec, usecs=usecs)
3198 def ParseTimespec(value):
3199 """Parse a time specification.
3201 The following suffixed will be recognized:
3209 Without any suffix, the value will be taken to be in seconds.
3214 raise errors.OpPrereqError("Empty time specification passed",
3223 if value[-1] not in suffix_map:
3226 except (TypeError, ValueError):
3227 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3230 multiplier = suffix_map[value[-1]]
3232 if not value: # no data left after stripping the suffix
3233 raise errors.OpPrereqError("Invalid time specification (only"
3234 " suffix passed)", errors.ECODE_INVAL)
3236 value = int(value) * multiplier
3237 except (TypeError, ValueError):
3238 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3243 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3244 filter_master=False, nodegroup=None):
3245 """Returns the names of online nodes.
3247 This function will also log a warning on stderr with the names of
3250 @param nodes: if not empty, use only this subset of nodes (minus the
3252 @param cl: if not None, luxi client to use
3253 @type nowarn: boolean
3254 @param nowarn: by default, this function will output a note with the
3255 offline nodes that are skipped; if this parameter is True the
3256 note is not displayed
3257 @type secondary_ips: boolean
3258 @param secondary_ips: if True, return the secondary IPs instead of the
3259 names, useful for doing network traffic over the replication interface
3261 @type filter_master: boolean
3262 @param filter_master: if True, do not return the master node in the list
3263 (useful in coordination with secondary_ips where we cannot check our
3264 node name against the list)
3265 @type nodegroup: string
3266 @param nodegroup: If set, only return nodes in this node group
3275 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3277 if nodegroup is not None:
3278 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3279 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3282 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3285 if len(qfilter) > 1:
3286 final_filter = [qlang.OP_AND] + qfilter
3288 assert len(qfilter) == 1
3289 final_filter = qfilter[0]
3293 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3295 def _IsOffline(row):
3296 (_, (_, offline), _) = row
3300 ((_, name), _, _) = row
3304 (_, _, (_, sip)) = row
3307 (offline, online) = compat.partition(result.data, _IsOffline)
3309 if offline and not nowarn:
3310 ToStderr("Note: skipping offline node(s): %s" %
3311 utils.CommaJoin(map(_GetName, offline)))
3318 return map(fn, online)
3321 def _ToStream(stream, txt, *args):
3322 """Write a message to a stream, bypassing the logging system
3324 @type stream: file object
3325 @param stream: the file to which we should write
3327 @param txt: the message
3333 stream.write(txt % args)
3338 except IOError, err:
3339 if err.errno == errno.EPIPE:
3340 # our terminal went away, we'll exit
3341 sys.exit(constants.EXIT_FAILURE)
3346 def ToStdout(txt, *args):
3347 """Write a message to stdout only, bypassing the logging system
3349 This is just a wrapper over _ToStream.
3352 @param txt: the message
3355 _ToStream(sys.stdout, txt, *args)
3358 def ToStderr(txt, *args):
3359 """Write a message to stderr only, bypassing the logging system
3361 This is just a wrapper over _ToStream.
3364 @param txt: the message
3367 _ToStream(sys.stderr, txt, *args)
3370 class JobExecutor(object):
3371 """Class which manages the submission and execution of multiple jobs.
3373 Note that instances of this class should not be reused between
3377 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3382 self.verbose = verbose
3385 self.feedback_fn = feedback_fn
3386 self._counter = itertools.count()
3389 def _IfName(name, fmt):
3390 """Helper function for formatting name.
3398 def QueueJob(self, name, *ops):
3399 """Record a job for later submit.
3402 @param name: a description of the job, will be used in WaitJobSet
3405 SetGenericOpcodeOpts(ops, self.opts)
3406 self.queue.append((self._counter.next(), name, ops))
3408 def AddJobId(self, name, status, job_id):
3409 """Adds a job ID to the internal queue.
3412 self.jobs.append((self._counter.next(), status, job_id, name))
3414 def SubmitPending(self, each=False):
3415 """Submit all pending jobs.
3420 for (_, _, ops) in self.queue:
3421 # SubmitJob will remove the success status, but raise an exception if
3422 # the submission fails, so we'll notice that anyway.
3423 results.append([True, self.cl.SubmitJob(ops)[0]])
3425 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3426 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3427 self.jobs.append((idx, status, data, name))
3429 def _ChooseJob(self):
3430 """Choose a non-waiting/queued job to poll next.
3433 assert self.jobs, "_ChooseJob called with empty job list"
3435 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3439 for job_data, status in zip(self.jobs, result):
3440 if (isinstance(status, list) and status and
3441 status[0] in (constants.JOB_STATUS_QUEUED,
3442 constants.JOB_STATUS_WAITING,
3443 constants.JOB_STATUS_CANCELING)):
3444 # job is still present and waiting
3446 # good candidate found (either running job or lost job)
3447 self.jobs.remove(job_data)
3451 return self.jobs.pop(0)
3453 def GetResults(self):
3454 """Wait for and return the results of all jobs.
3457 @return: list of tuples (success, job results), in the same order
3458 as the submitted jobs; if a job has failed, instead of the result
3459 there will be the error message
3463 self.SubmitPending()
3466 ok_jobs = [row[2] for row in self.jobs if row[1]]
3468 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3470 # first, remove any non-submitted jobs
3471 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3472 for idx, _, jid, name in failures:
3473 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3474 results.append((idx, False, jid))
3477 (idx, _, jid, name) = self._ChooseJob()
3478 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3480 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3482 except errors.JobLost, err:
3483 _, job_result = FormatError(err)
3484 ToStderr("Job %s%s has been archived, cannot check its result",
3485 jid, self._IfName(name, " for %s"))
3487 except (errors.GenericError, luxi.ProtocolError), err:
3488 _, job_result = FormatError(err)
3490 # the error message will always be shown, verbose or not
3491 ToStderr("Job %s%s has failed: %s",
3492 jid, self._IfName(name, " for %s"), job_result)
3494 results.append((idx, success, job_result))
3496 # sort based on the index, then drop it
3498 results = [i[1:] for i in results]
3502 def WaitOrShow(self, wait):
3503 """Wait for job results or only print the job IDs.
3506 @param wait: whether to wait or not
3510 return self.GetResults()
3513 self.SubmitPending()
3514 for _, status, result, name in self.jobs:
3516 ToStdout("%s: %s", result, name)
3518 ToStderr("Failure for %s: %s", name, result)
3519 return [row[1:3] for row in self.jobs]
3522 def FormatParameterDict(buf, param_dict, actual, level=1):
3523 """Formats a parameter dictionary.
3525 @type buf: L{StringIO}
3526 @param buf: the buffer into which to write
3527 @type param_dict: dict
3528 @param param_dict: the own parameters
3530 @param actual: the current parameter set (including defaults)
3531 @param level: Level of indent
3534 indent = " " * level
3536 for key in sorted(actual):
3538 buf.write("%s- %s:" % (indent, key))
3540 if isinstance(data, dict) and data:
3542 FormatParameterDict(buf, param_dict.get(key, {}), data,
3545 val = param_dict.get(key, "default (%s)" % data)
3546 buf.write(" %s\n" % val)
3549 def ConfirmOperation(names, list_type, text, extra=""):
3550 """Ask the user to confirm an operation on a list of list_type.
3552 This function is used to request confirmation for doing an operation
3553 on a given list of list_type.
3556 @param names: the list of names that we display when
3557 we ask for confirmation
3558 @type list_type: str
3559 @param list_type: Human readable name for elements in the list (e.g. nodes)
3561 @param text: the operation that the user should confirm
3563 @return: True or False depending on user's confirmation.
3567 msg = ("The %s will operate on %d %s.\n%s"
3568 "Do you want to continue?" % (text, count, list_type, extra))
3569 affected = (("\nAffected %s:\n" % list_type) +
3570 "\n".join([" %s" % name for name in names]))
3572 choices = [("y", True, "Yes, execute the %s" % text),
3573 ("n", False, "No, abort the %s" % text)]
3576 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3579 question = msg + affected
3581 choice = AskUser(question, choices)
3584 choice = AskUser(msg + affected, choices)
3588 def _MaybeParseUnit(elements):
3589 """Parses and returns an array of potential values with units.
3593 for k, v in elements.items():
3594 if v == constants.VALUE_DEFAULT:
3597 parsed[k] = utils.ParseUnit(v)
3601 def CreateIPolicyFromOpts(ispecs_mem_size=None,
3602 ispecs_cpu_count=None,
3603 ispecs_disk_count=None,
3604 ispecs_disk_size=None,
3605 ispecs_nic_count=None,
3606 ipolicy_disk_templates=None,
3607 ipolicy_vcpu_ratio=None,
3608 ipolicy_spindle_ratio=None,
3609 group_ipolicy=False,
3610 allowed_values=None,
3612 """Creation of instance policy based on command line options.
3614 @param fill_all: whether for cluster policies we should ensure that
3615 all values are filled
3621 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3622 if ispecs_disk_size:
3623 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3624 except (TypeError, ValueError, errors.UnitParseError), err:
3625 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3627 (ispecs_disk_size, ispecs_mem_size, err),
3630 # prepare ipolicy dict
3631 ipolicy_transposed = {
3632 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3633 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3634 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3635 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3636 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3639 # first, check that the values given are correct
3641 forced_type = TISPECS_GROUP_TYPES
3643 forced_type = TISPECS_CLUSTER_TYPES
3645 for specs in ipolicy_transposed.values():
3646 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3649 ipolicy_out = objects.MakeEmptyIPolicy()
3650 for name, specs in ipolicy_transposed.iteritems():
3651 assert name in constants.ISPECS_PARAMETERS
3652 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3653 ipolicy_out[key][name] = val
3655 # no filldict for non-dicts
3656 if not group_ipolicy and fill_all:
3657 if ipolicy_disk_templates is None:
3658 ipolicy_disk_templates = constants.DISK_TEMPLATES
3659 if ipolicy_vcpu_ratio is None:
3660 ipolicy_vcpu_ratio = \
3661 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
3662 if ipolicy_spindle_ratio is None:
3663 ipolicy_spindle_ratio = \
3664 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
3665 if ipolicy_disk_templates is not None:
3666 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3667 if ipolicy_vcpu_ratio is not None:
3668 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3669 if ipolicy_spindle_ratio is not None:
3670 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3672 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)