X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/8f9069e5a1c176a416e144b1e6fb8bfbb0fa85c0..247ee81f3e3970506f082530f1eaf8f407bfa85f:/lib/cli.py diff --git a/lib/cli.py b/lib/cli.py index 435b400..4aeb162 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -27,6 +27,8 @@ import textwrap import os.path import time import logging +import errno +import itertools from cStringIO import StringIO from ganeti import utils @@ -39,6 +41,7 @@ from ganeti import rpc from ganeti import ssh from ganeti import compat from ganeti import netutils +from ganeti import qlang from optparse import (OptionParser, TitledHelpFormatter, Option, OptionValueError) @@ -48,10 +51,15 @@ __all__ = [ # Command line options "ADD_UIDS_OPT", "ALLOCATABLE_OPT", + "ALLOC_POLICY_OPT", "ALL_OPT", + "ALLOW_FAILOVER_OPT", "AUTO_PROMOTE_OPT", "AUTO_REPLACE_OPT", "BACKEND_OPT", + "BLK_OS_OPT", + "CAPAB_MASTER_OPT", + "CAPAB_VM_OPT", "CLEANUP_OPT", "CLUSTER_DOMAIN_SECRET_OPT", "CONFIRM_OPT", @@ -64,15 +72,19 @@ __all__ = [ "DRAINED_OPT", "DRY_RUN_OPT", "DRBD_HELPER_OPT", + "DST_NODE_OPT", "EARLY_RELEASE_OPT", "ENABLED_HV_OPT", "ERROR_CODES_OPT", "FIELDS_OPT", "FILESTORE_DIR_OPT", "FILESTORE_DRIVER_OPT", + "FORCE_FILTER_OPT", "FORCE_OPT", "FORCE_VARIANT_OPT", "GLOBAL_FILEDIR_OPT", + "HID_OS_OPT", + "GLOBAL_SHARED_FILEDIR_OPT", "HVLIST_OPT", "HVOPTS_OPT", "HYPERVISOR_OPT", @@ -81,9 +93,11 @@ __all__ = [ "IDENTIFY_DEFAULTS_OPT", "IGNORE_CONSIST_OPT", "IGNORE_FAILURES_OPT", + "IGNORE_OFFLINE_OPT", "IGNORE_REMOVE_FAILURES_OPT", "IGNORE_SECONDARIES_OPT", "IGNORE_SIZE_OPT", + "INTERVAL_OPT", "MAC_PREFIX_OPT", "MAINTAIN_NODE_HEALTH_OPT", "MASTER_NETDEV_OPT", @@ -96,8 +110,12 @@ __all__ = [ "NEW_RAPI_CERT_OPT", "NEW_SECONDARY_OPT", "NIC_PARAMS_OPT", + "NODE_FORCE_JOIN_OPT", "NODE_LIST_OPT", "NODE_PLACEMENT_OPT", + "NODEGROUP_OPT", + "NODE_PARAMS_OPT", + "NODE_POWERED_OPT", "NODRBD_STORAGE_OPT", "NOHDR_OPT", "NOIPCHECK_OPT", @@ -113,6 +131,7 @@ __all__ = [ "NOSTART_OPT", "NOSSH_KEYCHECK_OPT", "NOVOTING_OPT", + "NO_REMEMBER_OPT", "NWSYNC_OPT", "ON_PRIMARY_OPT", "ON_SECONDARY_OPT", @@ -120,7 +139,12 @@ __all__ = [ "OSPARAMS_OPT", "OS_OPT", "OS_SIZE_OPT", + "OOB_TIMEOUT_OPT", + "POWER_DELAY_OPT", + "PREALLOC_WIPE_DISKS_OPT", "PRIMARY_IP_VERSION_OPT", + "PRIMARY_ONLY_OPT", + "PRIORITY_OPT", "RAPI_CERT_OPT", "READD_OPT", "REBOOT_TYPE_OPT", @@ -129,6 +153,7 @@ __all__ = [ "RESERVED_LVS_OPT", "ROMAN_OPT", "SECONDARY_IP_OPT", + "SECONDARY_ONLY_OPT", "SELECT_OS_OPT", "SEP_OPT", "SHOWCMD_OPT", @@ -137,10 +162,13 @@ __all__ = [ "SRC_DIR_OPT", "SRC_NODE_OPT", "SUBMIT_OPT", + "STARTUP_PAUSED_OPT", "STATIC_OPT", "SYNC_OPT", + "TAG_ADD_OPT", "TAG_SRC_OPT", "TIMEOUT_OPT", + "TO_GROUP_OPT", "UIDPOOL_OPT", "USEUNITS_OPT", "USE_REPL_NET_OPT", @@ -148,8 +176,11 @@ __all__ = [ "VG_NAME_OPT", "YES_DOIT_OPT", # Generic functions for CLI programs + "ConfirmOperation", "GenericMain", "GenericInstanceCreate", + "GenericList", + "GenericListFields", "GetClient", "GetOnlineNodes", "JobExecutor", @@ -162,6 +193,8 @@ __all__ = [ # Formatting functions "ToStderr", "ToStdout", "FormatError", + "FormatQueryResult", + "FormatParameterDict", "GenerateTable", "AskUser", "FormatTimestamp", @@ -173,13 +206,16 @@ __all__ = [ # command line options support infrastructure "ARGS_MANY_INSTANCES", "ARGS_MANY_NODES", + "ARGS_MANY_GROUPS", "ARGS_NONE", "ARGS_ONE_INSTANCE", "ARGS_ONE_NODE", + "ARGS_ONE_GROUP", "ARGS_ONE_OS", "ArgChoice", "ArgCommand", "ArgFile", + "ArgGroup", "ArgHost", "ArgInstance", "ArgJobId", @@ -192,15 +228,35 @@ __all__ = [ "OPT_COMPL_ONE_IALLOCATOR", "OPT_COMPL_ONE_INSTANCE", "OPT_COMPL_ONE_NODE", + "OPT_COMPL_ONE_NODEGROUP", "OPT_COMPL_ONE_OS", "cli_option", "SplitNodeOption", "CalculateOSNames", + "ParseFields", + "COMMON_CREATE_OPTS", ] NO_PREFIX = "no_" UN_PREFIX = "-" +#: Priorities (sorted) +_PRIORITY_NAMES = [ + ("low", constants.OP_PRIO_LOW), + ("normal", constants.OP_PRIO_NORMAL), + ("high", constants.OP_PRIO_HIGH), + ] + +#: Priority dictionary for easier lookup +# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once +# we migrate to Python 2.6 +_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) + +# Query result status for clients +(QR_NORMAL, + QR_UNKNOWN, + QR_INCOMPLETE) = range(3) + class _Argument: def __init__(self, min=0, max=None): # pylint: disable-msg=W0622 @@ -254,6 +310,13 @@ class ArgNode(_Argument): """ + +class ArgGroup(_Argument): + """Node group argument. + + """ + + class ArgJobId(_Argument): """Job ID argument. @@ -287,8 +350,11 @@ class ArgOs(_Argument): ARGS_NONE = [] ARGS_MANY_INSTANCES = [ArgInstance()] ARGS_MANY_NODES = [ArgNode()] +ARGS_MANY_GROUPS = [ArgGroup()] ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] ARGS_ONE_NODE = [ArgNode(min=1, max=1)] +# TODO +ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] ARGS_ONE_OS = [ArgOs(min=1, max=1)] @@ -303,7 +369,9 @@ def _ExtractTagsObject(opts, args): kind = opts.tag_type if kind == constants.TAG_CLUSTER: retval = kind, kind - elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE: + elif kind in (constants.TAG_NODEGROUP, + constants.TAG_NODE, + constants.TAG_INSTANCE): if not args: raise errors.OpPrereqError("no arguments passed to the command") name = args.pop(0) @@ -373,8 +441,8 @@ def AddTags(opts, args): _ExtendTags(opts, args) if not args: raise errors.OpPrereqError("No tags to be added") - op = opcodes.OpAddTags(kind=kind, name=name, tags=args) - SubmitOpCode(op) + op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) + SubmitOpCode(op, opts=opts) def RemoveTags(opts, args): @@ -390,8 +458,8 @@ def RemoveTags(opts, args): _ExtendTags(opts, args) if not args: raise errors.OpPrereqError("No tags to be removed") - op = opcodes.OpDelTags(kind=kind, name=name, tags=args) - SubmitOpCode(op) + op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) + SubmitOpCode(op, opts=opts) def check_unit(option, opt, value): # pylint: disable-msg=W0613 @@ -449,7 +517,7 @@ def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613 """ if ":" not in value: - ident, rest = value, '' + ident, rest = value, "" else: ident, rest = value.split(":", 1) @@ -500,7 +568,8 @@ def check_bool(option, opt, value): # pylint: disable-msg=W0613 OPT_COMPL_ONE_INSTANCE, OPT_COMPL_ONE_OS, OPT_COMPL_ONE_IALLOCATOR, - OPT_COMPL_INST_ADD_NODES) = range(100, 106) + OPT_COMPL_INST_ADD_NODES, + OPT_COMPL_ONE_NODEGROUP) = range(100, 107) OPT_COMPL_ALL = frozenset([ OPT_COMPL_MANY_NODES, @@ -509,6 +578,7 @@ OPT_COMPL_ALL = frozenset([ OPT_COMPL_ONE_OS, OPT_COMPL_ONE_IALLOCATOR, OPT_COMPL_INST_ADD_NODES, + OPT_COMPL_ONE_NODEGROUP, ]) @@ -551,8 +621,8 @@ SEP_OPT = cli_option("--separator", default=None, " (defaults to one space)")) USEUNITS_OPT = cli_option("--units", default=None, - dest="units", choices=('h', 'm', 'g', 't'), - help="Specify units for output (one of hmgt)") + dest="units", choices=("h", "m", "g", "t"), + help="Specify units for output (one of h/m/g/t)") FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", type="string", metavar="FIELDS", @@ -564,6 +634,15 @@ FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", default=False, help="Do not require confirmation") +IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", + action="store_true", default=False, + help=("Ignore offline nodes and do as much" + " as possible")) + +TAG_ADD_OPT = cli_option("--tags", dest="tags", + default=None, help="Comma-separated list of instance" + " tags") + TAG_SRC_OPT = cli_option("--from", dest="tags_source", default=None, help="File with tag names") @@ -597,8 +676,8 @@ NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", help="Don't wait for sync (DANGEROUS!)") DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", - help="Custom disk setup (diskless, file," - " plain or drbd)", + help=("Custom disk setup (%s)" % + utils.CommaJoin(constants.DISK_TEMPLATES)), default=None, metavar="TEMPL", choices=list(constants.DISK_TEMPLATES)) @@ -696,6 +775,12 @@ IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", help="Ignore the consistency of the disks on" " the secondary") +ALLOW_FAILOVER_OPT = cli_option("--allow-failover", + dest="allow_failover", + action="store_true", default=False, + help="If migration is not possible fallback to" + " failover") + NONLIVE_OPT = cli_option("--non-live", dest="live", default=True, action="store_false", help="Do a non-live migration (this usually means" @@ -719,6 +804,14 @@ NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], " times, if not given defaults to all nodes)", completion_suggest=OPT_COMPL_ONE_NODE) +NODEGROUP_OPT_NAME = "--node-group" +NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME, + dest="nodegroup", + help="Node group (name or uuid)", + metavar="", + default=None, type="string", + completion_suggest=OPT_COMPL_ONE_NODEGROUP) + SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", metavar="", completion_suggest=OPT_COMPL_ONE_NODE) @@ -772,6 +865,11 @@ REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", action="store_true", default=False, help="Remove the instance from the cluster") +DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", + help="Specifies the new node for the instance", + metavar="NODE", default=None, + completion_suggest=OPT_COMPL_ONE_NODE) + NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", help="Specifies the new secondary node", metavar="NODE", default=None, @@ -780,12 +878,16 @@ NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", default=False, action="store_true", help="Replace the disk(s) on the primary" - " node (only for the drbd template)") + " node (applies only to internally mirrored" + " disk templates, e.g. %s)" % + utils.CommaJoin(constants.DTS_INT_MIRROR)) ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", default=False, action="store_true", help="Replace the disk(s) on the secondary" - " node (only for the drbd template)") + " node (applies only to internally mirrored" + " disk templates, e.g. %s)" % + utils.CommaJoin(constants.DTS_INT_MIRROR)) AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", default=False, action="store_true", @@ -795,7 +897,9 @@ AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", default=False, action="store_true", help="Automatically replace faulty disks" - " (only for the drbd template)") + " (applies only to internally mirrored" + " disk templates, e.g. %s)" % + utils.CommaJoin(constants.DTS_INT_MIRROR)) IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", default=False, action="store_true", @@ -822,6 +926,9 @@ NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", default=True, action="store_false", help="Disable SSH key fingerprint checking") +NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", + default=False, action="store_true", + help="Force the joining of a node") MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", type="bool", default=None, metavar=_YORNO, @@ -829,11 +936,22 @@ MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, type="bool", default=None, - help="Set the offline flag on the node") + help=("Set the offline flag on the node" + " (cluster does not communicate with offline" + " nodes)")) DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, type="bool", default=None, - help="Set the drained flag on the node") + help=("Set the drained flag on the node" + " (excluded from allocation operations)")) + +CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", + type="bool", default=None, metavar=_YORNO, + help="Set the master_capable flag on the node") + +CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", + type="bool", default=None, metavar=_YORNO, + help="Set the vm_capable flag on the node") ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", type="bool", default=None, metavar=_YORNO, @@ -857,12 +975,13 @@ CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, dest="candidate_pool_size", type="int", help="Set the candidate pool size") -VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name", - help="Enables LVM and specifies the volume group" - " name (cluster-wide) for disk allocation [xenvg]", +VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", + help=("Enables LVM and specifies the volume group" + " name (cluster-wide) for disk allocation" + " [%s]" % constants.DEFAULT_VG), metavar="VG", default=None) -YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it", +YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it", help="Destroy cluster", action="store_true") NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", @@ -877,10 +996,11 @@ MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", help="Specify the node interface (cluster-wide)" - " on which the master IP address will be added " - " [%s]" % constants.DEFAULT_BRIDGE, + " on which the master IP address will be added" + " (cluster init default: %s)" % + constants.DEFAULT_BRIDGE, metavar="NETDEV", - default=constants.DEFAULT_BRIDGE) + default=None) GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", help="Specify the default directory (cluster-" @@ -889,6 +1009,15 @@ GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", metavar="DIR", default=constants.DEFAULT_FILE_STORAGE_DIR) +GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir", + dest="shared_file_storage_dir", + help="Specify the default directory (cluster-" + "wide) for storing the shared file-based" + " disks [%s]" % + constants.DEFAULT_SHARED_FILE_STORAGE_DIR, + metavar="SHAREDDIR", + default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR) + NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", help="Don't modify /etc/hosts", action="store_false", default=True) @@ -929,6 +1058,11 @@ SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", default=constants.DEFAULT_SHUTDOWN_TIMEOUT, help="Maximum time to wait for instance shutdown") +INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", + default=None, + help=("Number of seconds between repetions of the" + " command")) + EARLY_RELEASE_OPT = cli_option("--early-release", dest="early_release", default=False, action="store_true", @@ -1030,6 +1164,105 @@ PRIMARY_IP_VERSION_OPT = \ constants.IP6_VERSION), help="Cluster-wide IP version for primary IP") +PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", + metavar="|".join(name for name, _ in _PRIORITY_NAMES), + choices=_PRIONAME_TO_VALUE.keys(), + help="Priority for opcode processing") + +HID_OS_OPT = cli_option("--hidden", dest="hidden", + type="bool", default=None, metavar=_YORNO, + help="Sets the hidden flag on the OS") + +BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", + type="bool", default=None, metavar=_YORNO, + help="Sets the blacklisted flag on the OS") + +PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, + type="bool", metavar=_YORNO, + dest="prealloc_wipe_disks", + help=("Wipe disks prior to instance" + " creation")) + +NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", + type="keyval", default=None, + help="Node parameters") + +ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", + action="store", metavar="POLICY", default=None, + help="Allocation policy for the node group") + +NODE_POWERED_OPT = cli_option("--node-powered", default=None, + type="bool", metavar=_YORNO, + dest="node_powered", + help="Specify if the SoR for node is powered") + +OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", + default=constants.OOB_TIMEOUT, + help="Maximum time to wait for out-of-band helper") + +POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", + default=constants.OOB_POWER_DELAY, + help="Time in seconds to wait between power-ons") + +FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", + action="store_true", default=False, + help=("Whether command argument should be treated" + " as filter")) + +NO_REMEMBER_OPT = cli_option("--no-remember", + dest="no_remember", + action="store_true", default=False, + help="Perform but do not record the change" + " in the configuration") + +PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only", + default=False, action="store_true", + help="Evacuate primary instances only") + +SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only", + default=False, action="store_true", + help="Evacuate secondary instances only" + " (applies only to internally mirrored" + " disk templates, e.g. %s)" % + utils.CommaJoin(constants.DTS_INT_MIRROR)) + +STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused", + action="store_true", default=False, + help="Pause instance at startup") + +TO_GROUP_OPT = cli_option("--to", dest="to", metavar="", + help="Destination node group (name or uuid)", + default=None, action="append", + completion_suggest=OPT_COMPL_ONE_NODEGROUP) + + +#: Options provided by all commands +COMMON_OPTS = [DEBUG_OPT] + +# common options for creating instances. add and import then add their own +# specific ones. +COMMON_CREATE_OPTS = [ + BACKEND_OPT, + DISK_OPT, + DISK_TEMPLATE_OPT, + FILESTORE_DIR_OPT, + FILESTORE_DRIVER_OPT, + HYPERVISOR_OPT, + IALLOCATOR_OPT, + NET_OPT, + NODE_PLACEMENT_OPT, + NOIPCHECK_OPT, + NONAMECHECK_OPT, + NONICS_OPT, + NWSYNC_OPT, + OSPARAMS_OPT, + OS_SIZE_OPT, + SUBMIT_OPT, + TAG_ADD_OPT, + DRY_RUN_OPT, + PRIORITY_OPT, + ] + def _ParseArgs(argv, commands, aliases): """Parser for the command line arguments. @@ -1097,7 +1330,7 @@ def _ParseArgs(argv, commands, aliases): cmd = aliases[cmd] func, args_def, parser_opts, usage, description = commands[cmd] - parser = OptionParser(option_list=parser_opts + [DEBUG_OPT], + parser = OptionParser(option_list=parser_opts + COMMON_OPTS, description=description, formatter=TitledHelpFormatter(), usage="%%prog %s %s" % (cmd, usage)) @@ -1185,8 +1418,8 @@ def SplitNodeOption(value): """Splits the value of a --node option. """ - if value and ':' in value: - return value.split(':', 1) + if value and ":" in value: + return value.split(":", 1) else: return (value, None) @@ -1203,11 +1436,29 @@ def CalculateOSNames(os_name, os_variants): """ if os_variants: - return ['%s+%s' % (os_name, v) for v in os_variants] + return ["%s+%s" % (os_name, v) for v in os_variants] else: return [os_name] +def ParseFields(selected, default): + """Parses the values of "--field"-like options. + + @type selected: string or None + @param selected: User-selected options + @type default: list + @param default: Default fields + + """ + if selected is None: + return default + + if selected.startswith("+"): + return default + selected[1:].split(",") + + return selected.split(",") + + UsesRPC = rpc.RunWithRPC @@ -1227,12 +1478,12 @@ def AskUser(text, choices=None): """ if choices is None: - choices = [('y', True, 'Perform the operation'), - ('n', False, 'Do not perform the operation')] + choices = [("y", True, "Perform the operation"), + ("n", False, "Do not perform the operation")] if not choices or not isinstance(choices, list): raise errors.ProgrammerError("Invalid choices argument to AskUser") for entry in choices: - if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?': + if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": raise errors.ProgrammerError("Invalid choices element to AskUser") answer = choices[-1][1] @@ -1247,18 +1498,18 @@ def AskUser(text, choices=None): try: chars = [entry[0] for entry in choices] chars[-1] = "[%s]" % chars[-1] - chars.append('?') + chars.append("?") maps = dict([(entry[0], entry[1]) for entry in choices]) while True: f.write(text) - f.write('\n') + f.write("\n") f.write("/".join(chars)) f.write(": ") line = f.readline(2).strip().lower() if line in maps: answer = maps[line] break - elif line == '?': + elif line == "?": for entry in choices: f.write(" %s - %s\n" % (entry[0], entry[2])) f.write("\n") @@ -1505,7 +1756,7 @@ class StdioJobPollReportCb(JobPollReportCbBase): ToStderr("Job %s is waiting in queue", job_id) self.notified_queued = True - elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock: + elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: ToStderr("Job %s is trying to acquire all necessary locks", job_id) self.notified_waitlock = True @@ -1600,9 +1851,11 @@ def SetGenericOpcodeOpts(opcode_list, options): if not options: return for op in opcode_list: + op.debug_level = options.debug if hasattr(options, "dry_run"): op.dry_run = options.dry_run - op.debug_level = options.debug + if getattr(options, "priority", None) is not None: + op.priority = _PRIONAME_TO_VALUE[options.priority] def GetClient(): @@ -1690,8 +1943,11 @@ def FormatError(err): obuf.write("Cannot communicate with the master daemon.\nIs it running" " and listening for connections?") elif isinstance(err, luxi.TimeoutError): - obuf.write("Timeout while talking to the master daemon. Error:\n" - "%s" % msg) + obuf.write("Timeout while talking to the master daemon. Jobs might have" + " been submitted and will continue to run even if the call" + " timed out. Useful commands in this situation are \"gnt-job" + " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") + obuf.write(msg) elif isinstance(err, luxi.PermissionError): obuf.write("It seems you don't have permissions to connect to the" " master daemon.\nPlease retry as a different user.") @@ -1700,6 +1956,9 @@ def FormatError(err): "%s" % msg) elif isinstance(err, errors.JobLost): obuf.write("Error checking job status: %s" % msg) + elif isinstance(err, errors.QueryFilterParseError): + obuf.write("Error while parsing query filter: %s\n" % err.args[0]) + obuf.write("\n".join(err.GetDetails())) elif isinstance(err, errors.GenericError): obuf.write("Unhandled Ganeti error: %s" % msg) elif isinstance(err, JobSubmittedException): @@ -1707,7 +1966,7 @@ def FormatError(err): retcode = 0 else: obuf.write("Unhandled exception: %s" % msg) - return retcode, obuf.getvalue().rstrip('\n') + return retcode, obuf.getvalue().rstrip("\n") def GenericMain(commands, override=None, aliases=None): @@ -1751,8 +2010,8 @@ def GenericMain(commands, override=None, aliases=None): for key, val in override.iteritems(): setattr(options, key, val) - utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug, - stderr_logging=True, program=binary) + utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug, + stderr_logging=True) if old_cmdline: logging.info("run with arguments '%s'", old_cmdline) @@ -1766,10 +2025,45 @@ def GenericMain(commands, override=None, aliases=None): result, err_msg = FormatError(err) logging.exception("Error during command processing") ToStderr(err_msg) + except KeyboardInterrupt: + result = constants.EXIT_FAILURE + ToStderr("Aborted. Note that if the operation created any jobs, they" + " might have been submitted and" + " will continue to run in the background.") + except IOError, err: + if err.errno == errno.EPIPE: + # our terminal went away, we'll exit + sys.exit(constants.EXIT_FAILURE) + else: + raise return result +def ParseNicOption(optvalue): + """Parses the value of the --net option(s). + + """ + try: + nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) + except (TypeError, ValueError), err: + raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) + + nics = [{}] * nic_max + for nidx, ndict in optvalue: + nidx = int(nidx) + + if not isinstance(ndict, dict): + raise errors.OpPrereqError("Invalid nic/%d value: expected dict," + " got %s" % (nidx, ndict)) + + utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) + + nics[nidx] = ndict + + return nics + + def GenericInstanceCreate(mode, opts, args): """Add an instance to the cluster via either creation or import. @@ -1791,17 +2085,7 @@ def GenericInstanceCreate(mode, opts, args): hypervisor, hvparams = opts.hypervisor if opts.nics: - try: - nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics) - except ValueError, err: - raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) - nics = [{}] * nic_max - for nidx, ndict in opts.nics: - nidx = int(nidx) - if not isinstance(ndict, dict): - msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict) - raise errors.OpPrereqError(msg) - nics[nidx] = ndict + nics = ParseNicOption(opts.nics) elif opts.no_nics: # no nics nics = [] @@ -1825,7 +2109,7 @@ def GenericInstanceCreate(mode, opts, args): raise errors.OpPrereqError("Please use either the '--disk' or" " '-s' option") if opts.sd_size is not None: - opts.disks = [(0, {"size": opts.sd_size})] + opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] if opts.disks: try: @@ -1840,25 +2124,31 @@ def GenericInstanceCreate(mode, opts, args): if not isinstance(ddict, dict): msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) raise errors.OpPrereqError(msg) - elif "size" in ddict: - if "adopt" in ddict: + elif constants.IDISK_SIZE in ddict: + if constants.IDISK_ADOPT in ddict: raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" " (disk %d)" % didx) try: - ddict["size"] = utils.ParseUnit(ddict["size"]) + ddict[constants.IDISK_SIZE] = \ + utils.ParseUnit(ddict[constants.IDISK_SIZE]) except ValueError, err: raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % (didx, err)) - elif "adopt" in ddict: + elif constants.IDISK_ADOPT in ddict: if mode == constants.INSTANCE_IMPORT: raise errors.OpPrereqError("Disk adoption not allowed for instance" " import") - ddict["size"] = 0 + ddict[constants.IDISK_SIZE] = 0 else: raise errors.OpPrereqError("Missing size or adoption source for" " disk %d" % didx) disks[didx] = ddict + if opts.tags is not None: + tags = opts.tags.split(",") + else: + tags = [] + utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) @@ -1881,7 +2171,7 @@ def GenericInstanceCreate(mode, opts, args): else: raise errors.ProgrammerError("Invalid creation mode %s" % mode) - op = opcodes.OpCreateInstance(instance_name=instance, + op = opcodes.OpInstanceCreate(instance_name=instance, disks=disks, disk_template=opts.disk_template, nics=nics, @@ -1902,6 +2192,7 @@ def GenericInstanceCreate(mode, opts, args): force_variant=force_variant, src_node=src_node, src_path=src_path, + tags=tags, no_install=no_install, identify_defaults=identify_defaults) @@ -1970,7 +2261,7 @@ class _RunWhileClusterStoppedHelper: """ # Pause watcher by acquiring an exclusive lock on watcher state file self.feedback_fn("Blocking watcher") - watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE) + watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE) try: # TODO: Currently, this just blocks. There's no timeout. # TODO: Should it be a shared lock? @@ -2091,7 +2382,7 @@ def GenerateTable(headers, fields, separator, data, if separator is None: mlens = [0 for name in fields] - format_str = ' '.join(format_fields) + format_str = " ".join(format_fields) else: format_str = separator.replace("%", "%%").join(format_fields) @@ -2130,7 +2421,7 @@ def GenerateTable(headers, fields, separator, data, for line in data: args = [] if line is None: - line = ['-' for _ in fields] + line = ["-" for _ in fields] for idx in range(len(fields)): if separator is None: args.append(mlens[idx]) @@ -2140,6 +2431,380 @@ def GenerateTable(headers, fields, separator, data, return result +def _FormatBool(value): + """Formats a boolean value as a string. + + """ + if value: + return "Y" + return "N" + + +#: Default formatting for query results; (callback, align right) +_DEFAULT_FORMAT_QUERY = { + constants.QFT_TEXT: (str, False), + constants.QFT_BOOL: (_FormatBool, False), + constants.QFT_NUMBER: (str, True), + constants.QFT_TIMESTAMP: (utils.FormatTime, False), + constants.QFT_OTHER: (str, False), + constants.QFT_UNKNOWN: (str, False), + } + + +def _GetColumnFormatter(fdef, override, unit): + """Returns formatting function for a field. + + @type fdef: L{objects.QueryFieldDefinition} + @type override: dict + @param override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type unit: string + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} + @rtype: tuple; (callable, bool) + @return: Returns the function to format a value (takes one parameter) and a + boolean for aligning the value on the right-hand side + + """ + fmt = override.get(fdef.name, None) + if fmt is not None: + return fmt + + assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY + + if fdef.kind == constants.QFT_UNIT: + # Can't keep this information in the static dictionary + return (lambda value: utils.FormatUnit(value, unit), True) + + fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) + if fmt is not None: + return fmt + + raise NotImplementedError("Can't format column type '%s'" % fdef.kind) + + +class _QueryColumnFormatter: + """Callable class for formatting fields of a query. + + """ + def __init__(self, fn, status_fn, verbose): + """Initializes this class. + + @type fn: callable + @param fn: Formatting function + @type status_fn: callable + @param status_fn: Function to report fields' status + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + self._fn = fn + self._status_fn = status_fn + self._verbose = verbose + + def __call__(self, data): + """Returns a field's string representation. + + """ + (status, value) = data + + # Report status + self._status_fn(status) + + if status == constants.RS_NORMAL: + return self._fn(value) + + assert value is None, \ + "Found value %r for abnormal status %s" % (value, status) + + return FormatResultError(status, self._verbose) + + +def FormatResultError(status, verbose): + """Formats result status other than L{constants.RS_NORMAL}. + + @param status: The result status + @type verbose: boolean + @param verbose: Whether to return the verbose text + @return: Text of result status + + """ + assert status != constants.RS_NORMAL, \ + "FormatResultError called with status equal to constants.RS_NORMAL" + try: + (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] + except KeyError: + raise NotImplementedError("Unknown status %s" % status) + else: + if verbose: + return verbose_text + return normal_text + + +def FormatQueryResult(result, unit=None, format_override=None, separator=None, + header=False, verbose=False): + """Formats data in L{objects.QueryResponse}. + + @type result: L{objects.QueryResponse} + @param result: result of query operation + @type unit: string + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, + see L{utils.text.FormatUnit} + @type format_override: dict + @param format_override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to output header row + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + if unit is None: + if separator: + unit = "m" + else: + unit = "h" + + if format_override is None: + format_override = {} + + stats = dict.fromkeys(constants.RS_ALL, 0) + + def _RecordStatus(status): + if status in stats: + stats[status] += 1 + + columns = [] + for fdef in result.fields: + assert fdef.title and fdef.name + (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) + columns.append(TableColumn(fdef.title, + _QueryColumnFormatter(fn, _RecordStatus, + verbose), + align_right)) + + table = FormatTable(result.data, columns, header, separator) + + # Collect statistics + assert len(stats) == len(constants.RS_ALL) + assert compat.all(count >= 0 for count in stats.values()) + + # Determine overall status. If there was no data, unknown fields must be + # detected via the field definitions. + if (stats[constants.RS_UNKNOWN] or + (not result.data and _GetUnknownFields(result.fields))): + status = QR_UNKNOWN + elif compat.any(count > 0 for key, count in stats.items() + if key != constants.RS_NORMAL): + status = QR_INCOMPLETE + else: + status = QR_NORMAL + + return (status, table) + + +def _GetUnknownFields(fdefs): + """Returns list of unknown fields included in C{fdefs}. + + @type fdefs: list of L{objects.QueryFieldDefinition} + + """ + return [fdef for fdef in fdefs + if fdef.kind == constants.QFT_UNKNOWN] + + +def _WarnUnknownFields(fdefs): + """Prints a warning to stderr if a query included unknown fields. + + @type fdefs: list of L{objects.QueryFieldDefinition} + + """ + unknown = _GetUnknownFields(fdefs) + if unknown: + ToStderr("Warning: Queried for unknown fields %s", + utils.CommaJoin(fdef.name for fdef in unknown)) + return True + + return False + + +def GenericList(resource, fields, names, unit, separator, header, cl=None, + format_override=None, verbose=False, force_filter=False): + """Generic implementation for listing all items of a resource. + + @param resource: One of L{constants.QR_VIA_LUXI} + @type fields: list of strings + @param fields: List of fields to query for + @type names: list of strings + @param names: Names of items to query for + @type unit: string or None + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or + None for automatic choice (human-readable for non-separator usage, + otherwise megabytes); this is a one-letter string + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to show header row + @type force_filter: bool + @param force_filter: Whether to always treat names as filter + @type format_override: dict + @param format_override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + if cl is None: + cl = GetClient() + + if not names: + names = None + + filter_ = qlang.MakeFilter(names, force_filter) + + response = cl.Query(resource, fields, filter_) + + found_unknown = _WarnUnknownFields(response.fields) + + (status, data) = FormatQueryResult(response, unit=unit, separator=separator, + header=header, + format_override=format_override, + verbose=verbose) + + for line in data: + ToStdout(line) + + assert ((found_unknown and status == QR_UNKNOWN) or + (not found_unknown and status != QR_UNKNOWN)) + + if status == QR_UNKNOWN: + return constants.EXIT_UNKNOWN_FIELD + + # TODO: Should the list command fail if not all data could be collected? + return constants.EXIT_SUCCESS + + +def GenericListFields(resource, fields, separator, header, cl=None): + """Generic implementation for listing fields for a resource. + + @param resource: One of L{constants.QR_VIA_LUXI} + @type fields: list of strings + @param fields: List of fields to query for + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to show header row + + """ + if cl is None: + cl = GetClient() + + if not fields: + fields = None + + response = cl.QueryFields(resource, fields) + + found_unknown = _WarnUnknownFields(response.fields) + + columns = [ + TableColumn("Name", str, False), + TableColumn("Title", str, False), + TableColumn("Description", str, False), + ] + + rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields] + + for line in FormatTable(rows, columns, header, separator): + ToStdout(line) + + if found_unknown: + return constants.EXIT_UNKNOWN_FIELD + + return constants.EXIT_SUCCESS + + +class TableColumn: + """Describes a column for L{FormatTable}. + + """ + def __init__(self, title, fn, align_right): + """Initializes this class. + + @type title: string + @param title: Column title + @type fn: callable + @param fn: Formatting function + @type align_right: bool + @param align_right: Whether to align values on the right-hand side + + """ + self.title = title + self.format = fn + self.align_right = align_right + + +def _GetColFormatString(width, align_right): + """Returns the format string for a field. + + """ + if align_right: + sign = "" + else: + sign = "-" + + return "%%%s%ss" % (sign, width) + + +def FormatTable(rows, columns, header, separator): + """Formats data as a table. + + @type rows: list of lists + @param rows: Row data, one list per row + @type columns: list of L{TableColumn} + @param columns: Column descriptions + @type header: bool + @param header: Whether to show header row + @type separator: string or None + @param separator: String used to separate columns + + """ + if header: + data = [[col.title for col in columns]] + colwidth = [len(col.title) for col in columns] + else: + data = [] + colwidth = [0 for _ in columns] + + # Format row data + for row in rows: + assert len(row) == len(columns) + + formatted = [col.format(value) for value, col in zip(row, columns)] + + if separator is None: + # Update column widths + for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): + # Modifying a list's items while iterating is fine + colwidth[idx] = max(oldwidth, len(value)) + + data.append(formatted) + + if separator is not None: + # Return early if a separator is used + return [separator.join(row) for row in data] + + if columns and not columns[-1].align_right: + # Avoid unnecessary spaces at end of line + colwidth[-1] = 0 + + # Build format string + fmt = " ".join([_GetColFormatString(width, col.align_right) + for col, width in zip(columns, colwidth)]) + + return [fmt % tuple(row) for row in data] + + def FormatTimestamp(ts): """Formats a given timestamp. @@ -2151,7 +2816,7 @@ def FormatTimestamp(ts): """ if not isinstance (ts, (tuple, list)) or len(ts) != 2: - return '?' + return "?" sec, usec = ts return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec @@ -2174,11 +2839,11 @@ def ParseTimespec(value): if not value: raise errors.OpPrereqError("Empty time specification passed") suffix_map = { - 's': 1, - 'm': 60, - 'h': 3600, - 'd': 86400, - 'w': 604800, + "s": 1, + "m": 60, + "h": 3600, + "d": 86400, + "w": 604800, } if value[-1] not in suffix_map: try: @@ -2199,7 +2864,7 @@ def ParseTimespec(value): def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, - filter_master=False): + filter_master=False, nodegroup=None): """Returns the names of online nodes. This function will also log a warning on stderr with the names of @@ -2220,28 +2885,60 @@ def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, @param filter_master: if True, do not return the master node in the list (useful in coordination with secondary_ips where we cannot check our node name against the list) + @type nodegroup: string + @param nodegroup: If set, only return nodes in this node group """ if cl is None: cl = GetClient() - if secondary_ips: - name_idx = 2 - else: - name_idx = 0 + filter_ = [] + + if nodes: + filter_.append(qlang.MakeSimpleFilter("name", nodes)) + + if nodegroup is not None: + filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], + [qlang.OP_EQUAL, "group.uuid", nodegroup]]) if filter_master: - master_node = cl.QueryConfigValues(["master_node"])[0] - filter_fn = lambda x: x != master_node + filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) + + if filter_: + if len(filter_) > 1: + final_filter = [qlang.OP_AND] + filter_ + else: + assert len(filter_) == 1 + final_filter = filter_[0] else: - filter_fn = lambda _: True + final_filter = None + + result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) + + def _IsOffline(row): + (_, (_, offline), _) = row + return offline + + def _GetName(row): + ((_, name), _, _) = row + return name + + def _GetSip(row): + (_, _, (_, sip)) = row + return sip + + (offline, online) = compat.partition(result.data, _IsOffline) - result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"], - use_locking=False) - offline = [row[0] for row in result if row[1]] if offline and not nowarn: - ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) - return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])] + ToStderr("Note: skipping offline node(s): %s" % + utils.CommaJoin(map(_GetName, offline))) + + if secondary_ips: + fn = _GetSip + else: + fn = _GetName + + return map(fn, online) def _ToStream(stream, txt, *args): @@ -2253,13 +2950,20 @@ def _ToStream(stream, txt, *args): @param txt: the message """ - if args: - args = tuple(args) - stream.write(txt % args) - else: - stream.write(txt) - stream.write('\n') - stream.flush() + try: + if args: + args = tuple(args) + stream.write(txt % args) + else: + stream.write(txt) + stream.write("\n") + stream.flush() + except IOError, err: + if err.errno == errno.EPIPE: + # our terminal went away, we'll exit + sys.exit(constants.EXIT_FAILURE) + else: + raise def ToStdout(txt, *args): @@ -2302,15 +3006,33 @@ class JobExecutor(object): self.jobs = [] self.opts = opts self.feedback_fn = feedback_fn + self._counter = itertools.count() + + @staticmethod + def _IfName(name, fmt): + """Helper function for formatting name. + + """ + if name: + return fmt % name + + return "" def QueueJob(self, name, *ops): """Record a job for later submit. @type name: string @param name: a description of the job, will be used in WaitJobSet + """ SetGenericOpcodeOpts(ops, self.opts) - self.queue.append((name, ops)) + self.queue.append((self._counter.next(), name, ops)) + + def AddJobId(self, name, status, job_id): + """Adds a job ID to the internal queue. + + """ + self.jobs.append((self._counter.next(), status, job_id, name)) def SubmitPending(self, each=False): """Submit all pending jobs. @@ -2318,14 +3040,13 @@ class JobExecutor(object): """ if each: results = [] - for row in self.queue: + for (_, _, ops) in self.queue: # SubmitJob will remove the success status, but raise an exception if # the submission fails, so we'll notice that anyway. - results.append([True, self.cl.SubmitJob(row[1])]) + results.append([True, self.cl.SubmitJob(ops)]) else: - results = self.cl.SubmitManyJobs([row[1] for row in self.queue]) - for (idx, ((status, data), (name, _))) in enumerate(zip(results, - self.queue)): + results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) + for ((status, data), (idx, name, _)) in zip(results, self.queue): self.jobs.append((idx, status, data, name)) def _ChooseJob(self): @@ -2340,7 +3061,7 @@ class JobExecutor(object): for job_data, status in zip(self.jobs, result): if (isinstance(status, list) and status and status[0] in (constants.JOB_STATUS_QUEUED, - constants.JOB_STATUS_WAITLOCK, + constants.JOB_STATUS_WAITING, constants.JOB_STATUS_CANCELING)): # job is still present and waiting continue @@ -2371,25 +3092,26 @@ class JobExecutor(object): # first, remove any non-submitted jobs self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) for idx, _, jid, name in failures: - ToStderr("Failed to submit job for %s: %s", name, jid) + ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) results.append((idx, False, jid)) while self.jobs: (idx, _, jid, name) = self._ChooseJob() - ToStdout("Waiting for job %s for %s...", jid, name) + ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) try: job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) success = True except errors.JobLost, err: _, job_result = FormatError(err) - ToStderr("Job %s for %s has been archived, cannot check its result", - jid, name) + ToStderr("Job %s%s has been archived, cannot check its result", + jid, self._IfName(name, " for %s")) success = False except (errors.GenericError, luxi.ProtocolError), err: _, job_result = FormatError(err) success = False # the error message will always be shown, verbose or not - ToStderr("Job %s for %s has failed: %s", jid, name, job_result) + ToStderr("Job %s%s has failed: %s", + jid, self._IfName(name, " for %s"), job_result) results.append((idx, success, job_result)) @@ -2417,3 +3139,60 @@ class JobExecutor(object): else: ToStderr("Failure for %s: %s", name, result) return [row[1:3] for row in self.jobs] + + +def FormatParameterDict(buf, param_dict, actual, level=1): + """Formats a parameter dictionary. + + @type buf: L{StringIO} + @param buf: the buffer into which to write + @type param_dict: dict + @param param_dict: the own parameters + @type actual: dict + @param actual: the current parameter set (including defaults) + @param level: Level of indent + + """ + indent = " " * level + for key in sorted(actual): + val = param_dict.get(key, "default (%s)" % actual[key]) + buf.write("%s- %s: %s\n" % (indent, key, val)) + + +def ConfirmOperation(names, list_type, text, extra=""): + """Ask the user to confirm an operation on a list of list_type. + + This function is used to request confirmation for doing an operation + on a given list of list_type. + + @type names: list + @param names: the list of names that we display when + we ask for confirmation + @type list_type: str + @param list_type: Human readable name for elements in the list (e.g. nodes) + @type text: str + @param text: the operation that the user should confirm + @rtype: boolean + @return: True or False depending on user's confirmation. + + """ + count = len(names) + msg = ("The %s will operate on %d %s.\n%s" + "Do you want to continue?" % (text, count, list_type, extra)) + affected = (("\nAffected %s:\n" % list_type) + + "\n".join([" %s" % name for name in names])) + + choices = [("y", True, "Yes, execute the %s" % text), + ("n", False, "No, abort the %s" % text)] + + if count > 20: + choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) + question = msg + else: + question = msg + affected + + choice = AskUser(question, choices) + if choice == "v": + choices.pop(1) + choice = AskUser(msg + affected, choices) + return choice