X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/b705c7a61b3e3ba5d6a93c83380b66c4b1fd7890..e3303a4ebcdec1fb257a41b6fbf7031cab810553:/lib/cli.py diff --git a/lib/cli.py b/lib/cli.py index 9d86555..806d35c 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -39,6 +39,7 @@ from ganeti import rpc from ganeti import ssh from ganeti import compat from ganeti import netutils +from ganeti import qlang from optparse import (OptionParser, TitledHelpFormatter, Option, OptionValueError) @@ -48,10 +49,15 @@ __all__ = [ # Command line options "ADD_UIDS_OPT", "ALLOCATABLE_OPT", + "ALLOC_POLICY_OPT", "ALL_OPT", + "ALLOW_FAILOVER_OPT", "AUTO_PROMOTE_OPT", "AUTO_REPLACE_OPT", "BACKEND_OPT", + "BLK_OS_OPT", + "CAPAB_MASTER_OPT", + "CAPAB_VM_OPT", "CLEANUP_OPT", "CLUSTER_DOMAIN_SECRET_OPT", "CONFIRM_OPT", @@ -62,16 +68,21 @@ __all__ = [ "DISK_OPT", "DISK_TEMPLATE_OPT", "DRAINED_OPT", + "DRY_RUN_OPT", "DRBD_HELPER_OPT", + "DST_NODE_OPT", "EARLY_RELEASE_OPT", "ENABLED_HV_OPT", "ERROR_CODES_OPT", "FIELDS_OPT", "FILESTORE_DIR_OPT", "FILESTORE_DRIVER_OPT", + "FORCE_FILTER_OPT", "FORCE_OPT", "FORCE_VARIANT_OPT", "GLOBAL_FILEDIR_OPT", + "HID_OS_OPT", + "GLOBAL_SHARED_FILEDIR_OPT", "HVLIST_OPT", "HVOPTS_OPT", "HYPERVISOR_OPT", @@ -80,9 +91,11 @@ __all__ = [ "IDENTIFY_DEFAULTS_OPT", "IGNORE_CONSIST_OPT", "IGNORE_FAILURES_OPT", + "IGNORE_OFFLINE_OPT", "IGNORE_REMOVE_FAILURES_OPT", "IGNORE_SECONDARIES_OPT", "IGNORE_SIZE_OPT", + "INTERVAL_OPT", "MAC_PREFIX_OPT", "MAINTAIN_NODE_HEALTH_OPT", "MASTER_NETDEV_OPT", @@ -95,8 +108,12 @@ __all__ = [ "NEW_RAPI_CERT_OPT", "NEW_SECONDARY_OPT", "NIC_PARAMS_OPT", + "NODE_FORCE_JOIN_OPT", "NODE_LIST_OPT", "NODE_PLACEMENT_OPT", + "NODEGROUP_OPT", + "NODE_PARAMS_OPT", + "NODE_POWERED_OPT", "NODRBD_STORAGE_OPT", "NOHDR_OPT", "NOIPCHECK_OPT", @@ -119,6 +136,11 @@ __all__ = [ "OSPARAMS_OPT", "OS_OPT", "OS_SIZE_OPT", + "OOB_TIMEOUT_OPT", + "POWER_DELAY_OPT", + "PREALLOC_WIPE_DISKS_OPT", + "PRIMARY_IP_VERSION_OPT", + "PRIORITY_OPT", "RAPI_CERT_OPT", "READD_OPT", "REBOOT_TYPE_OPT", @@ -146,8 +168,11 @@ __all__ = [ "VG_NAME_OPT", "YES_DOIT_OPT", # Generic functions for CLI programs + "ConfirmOperation", "GenericMain", "GenericInstanceCreate", + "GenericList", + "GenericListFields", "GetClient", "GetOnlineNodes", "JobExecutor", @@ -160,6 +185,8 @@ __all__ = [ # Formatting functions "ToStderr", "ToStdout", "FormatError", + "FormatQueryResult", + "FormatParameterDict", "GenerateTable", "AskUser", "FormatTimestamp", @@ -171,13 +198,16 @@ __all__ = [ # command line options support infrastructure "ARGS_MANY_INSTANCES", "ARGS_MANY_NODES", + "ARGS_MANY_GROUPS", "ARGS_NONE", "ARGS_ONE_INSTANCE", "ARGS_ONE_NODE", + "ARGS_ONE_GROUP", "ARGS_ONE_OS", "ArgChoice", "ArgCommand", "ArgFile", + "ArgGroup", "ArgHost", "ArgInstance", "ArgJobId", @@ -190,15 +220,35 @@ __all__ = [ "OPT_COMPL_ONE_IALLOCATOR", "OPT_COMPL_ONE_INSTANCE", "OPT_COMPL_ONE_NODE", + "OPT_COMPL_ONE_NODEGROUP", "OPT_COMPL_ONE_OS", "cli_option", "SplitNodeOption", "CalculateOSNames", + "ParseFields", + "COMMON_CREATE_OPTS", ] NO_PREFIX = "no_" UN_PREFIX = "-" +#: Priorities (sorted) +_PRIORITY_NAMES = [ + ("low", constants.OP_PRIO_LOW), + ("normal", constants.OP_PRIO_NORMAL), + ("high", constants.OP_PRIO_HIGH), + ] + +#: Priority dictionary for easier lookup +# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once +# we migrate to Python 2.6 +_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) + +# Query result status for clients +(QR_NORMAL, + QR_UNKNOWN, + QR_INCOMPLETE) = range(3) + class _Argument: def __init__(self, min=0, max=None): # pylint: disable-msg=W0622 @@ -252,6 +302,13 @@ class ArgNode(_Argument): """ + +class ArgGroup(_Argument): + """Node group argument. + + """ + + class ArgJobId(_Argument): """Job ID argument. @@ -285,8 +342,11 @@ class ArgOs(_Argument): ARGS_NONE = [] ARGS_MANY_INSTANCES = [ArgInstance()] ARGS_MANY_NODES = [ArgNode()] +ARGS_MANY_GROUPS = [ArgGroup()] ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] ARGS_ONE_NODE = [ArgNode(min=1, max=1)] +# TODO +ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] ARGS_ONE_OS = [ArgOs(min=1, max=1)] @@ -301,7 +361,9 @@ def _ExtractTagsObject(opts, args): kind = opts.tag_type if kind == constants.TAG_CLUSTER: retval = kind, kind - elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE: + elif kind in (constants.TAG_NODEGROUP, + constants.TAG_NODE, + constants.TAG_INSTANCE): if not args: raise errors.OpPrereqError("no arguments passed to the command") name = args.pop(0) @@ -371,8 +433,8 @@ def AddTags(opts, args): _ExtendTags(opts, args) if not args: raise errors.OpPrereqError("No tags to be added") - op = opcodes.OpAddTags(kind=kind, name=name, tags=args) - SubmitOpCode(op) + op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) + SubmitOpCode(op, opts=opts) def RemoveTags(opts, args): @@ -388,8 +450,8 @@ def RemoveTags(opts, args): _ExtendTags(opts, args) if not args: raise errors.OpPrereqError("No tags to be removed") - op = opcodes.OpDelTags(kind=kind, name=name, tags=args) - SubmitOpCode(op) + op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) + SubmitOpCode(op, opts=opts) def check_unit(option, opt, value): # pylint: disable-msg=W0613 @@ -498,7 +560,8 @@ def check_bool(option, opt, value): # pylint: disable-msg=W0613 OPT_COMPL_ONE_INSTANCE, OPT_COMPL_ONE_OS, OPT_COMPL_ONE_IALLOCATOR, - OPT_COMPL_INST_ADD_NODES) = range(100, 106) + OPT_COMPL_INST_ADD_NODES, + OPT_COMPL_ONE_NODEGROUP) = range(100, 107) OPT_COMPL_ALL = frozenset([ OPT_COMPL_MANY_NODES, @@ -507,6 +570,7 @@ OPT_COMPL_ALL = frozenset([ OPT_COMPL_ONE_OS, OPT_COMPL_ONE_IALLOCATOR, OPT_COMPL_INST_ADD_NODES, + OPT_COMPL_ONE_NODEGROUP, ]) @@ -550,7 +614,7 @@ SEP_OPT = cli_option("--separator", default=None, USEUNITS_OPT = cli_option("--units", default=None, dest="units", choices=('h', 'm', 'g', 't'), - help="Specify units for output (one of hmgt)") + help="Specify units for output (one of h/m/g/t)") FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", type="string", metavar="FIELDS", @@ -562,6 +626,11 @@ FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", default=False, help="Do not require confirmation") +IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", + action="store_true", default=False, + help=("Ignore offline nodes and do as much" + " as possible")) + TAG_SRC_OPT = cli_option("--from", dest="tags_source", default=None, help="File with tag names") @@ -575,11 +644,11 @@ SYNC_OPT = cli_option("--sync", dest="do_locking", help=("Grab locks while doing the queries" " in order to ensure more consistent results")) -_DRY_RUN_OPT = cli_option("--dry-run", default=False, - action="store_true", - help=("Do not execute the operation, just run the" - " check steps and verify it it could be" - " executed")) +DRY_RUN_OPT = cli_option("--dry-run", default=False, + action="store_true", + help=("Do not execute the operation, just run the" + " check steps and verify it it could be" + " executed")) VERBOSE_OPT = cli_option("-v", "--verbose", default=False, action="store_true", @@ -694,6 +763,12 @@ IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", help="Ignore the consistency of the disks on" " the secondary") +ALLOW_FAILOVER_OPT = cli_option("--allow-failover", + dest="allow_failover", + action="store_true", default=False, + help="If migration is not possible fallback to" + " failover") + NONLIVE_OPT = cli_option("--non-live", dest="live", default=True, action="store_false", help="Do a non-live migration (this usually means" @@ -717,6 +792,13 @@ NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], " times, if not given defaults to all nodes)", completion_suggest=OPT_COMPL_ONE_NODE) +NODEGROUP_OPT = cli_option("-g", "--node-group", + dest="nodegroup", + help="Node group (name or uuid)", + metavar="", + default=None, type="string", + completion_suggest=OPT_COMPL_ONE_NODEGROUP) + SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", metavar="", completion_suggest=OPT_COMPL_ONE_NODE) @@ -770,6 +852,11 @@ REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", action="store_true", default=False, help="Remove the instance from the cluster") +DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", + help="Specifies the new node for the instance", + metavar="NODE", default=None, + completion_suggest=OPT_COMPL_ONE_NODE) + NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", help="Specifies the new secondary node", metavar="NODE", default=None, @@ -820,6 +907,9 @@ NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", default=True, action="store_false", help="Disable SSH key fingerprint checking") +NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", + default=False, action="store_true", + help="Force the joining of a node") MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", type="bool", default=None, metavar=_YORNO, @@ -827,11 +917,22 @@ MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, type="bool", default=None, - help="Set the offline flag on the node") + help=("Set the offline flag on the node" + " (cluster does not communicate with offline" + " nodes)")) DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, type="bool", default=None, - help="Set the drained flag on the node") + help=("Set the drained flag on the node" + " (excluded from allocation operations)")) + +CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", + type="bool", default=None, metavar=_YORNO, + help="Set the master_capable flag on the node") + +CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", + type="bool", default=None, metavar=_YORNO, + help="Set the vm_capable flag on the node") ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", type="bool", default=None, metavar=_YORNO, @@ -855,9 +956,10 @@ CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, dest="candidate_pool_size", type="int", help="Set the candidate pool size") -VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name", - help="Enables LVM and specifies the volume group" - " name (cluster-wide) for disk allocation [xenvg]", +VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", + help=("Enables LVM and specifies the volume group" + " name (cluster-wide) for disk allocation" + " [%s]" % constants.DEFAULT_VG), metavar="VG", default=None) YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it", @@ -875,10 +977,11 @@ MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", help="Specify the node interface (cluster-wide)" - " on which the master IP address will be added " - " [%s]" % constants.DEFAULT_BRIDGE, + " on which the master IP address will be added" + " (cluster init default: %s)" % + constants.DEFAULT_BRIDGE, metavar="NETDEV", - default=constants.DEFAULT_BRIDGE) + default=None) GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", help="Specify the default directory (cluster-" @@ -887,6 +990,15 @@ GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", metavar="DIR", default=constants.DEFAULT_FILE_STORAGE_DIR) +GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir", + dest="shared_file_storage_dir", + help="Specify the default directory (cluster-" + "wide) for storing the shared file-based" + " disks [%s]" % + constants.DEFAULT_SHARED_FILE_STORAGE_DIR, + metavar="SHAREDDIR", + default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR) + NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", help="Don't modify /etc/hosts", action="store_false", default=True) @@ -927,6 +1039,11 @@ SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", default=constants.DEFAULT_SHUTDOWN_TIMEOUT, help="Maximum time to wait for instance shutdown") +INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", + default=None, + help=("Number of seconds between repetions of the" + " command")) + EARLY_RELEASE_OPT = cli_option("--early-release", dest="early_release", default=False, action="store_true", @@ -1021,6 +1138,85 @@ NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", action="store_false", default=True, help="Disable support for DRBD") +PRIMARY_IP_VERSION_OPT = \ + cli_option("--primary-ip-version", default=constants.IP4_VERSION, + action="store", dest="primary_ip_version", + metavar="%d|%d" % (constants.IP4_VERSION, + constants.IP6_VERSION), + help="Cluster-wide IP version for primary IP") + +PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", + metavar="|".join(name for name, _ in _PRIORITY_NAMES), + choices=_PRIONAME_TO_VALUE.keys(), + help="Priority for opcode processing") + +HID_OS_OPT = cli_option("--hidden", dest="hidden", + type="bool", default=None, metavar=_YORNO, + help="Sets the hidden flag on the OS") + +BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", + type="bool", default=None, metavar=_YORNO, + help="Sets the blacklisted flag on the OS") + +PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, + type="bool", metavar=_YORNO, + dest="prealloc_wipe_disks", + help=("Wipe disks prior to instance" + " creation")) + +NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", + type="keyval", default=None, + help="Node parameters") + +ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", + action="store", metavar="POLICY", default=None, + help="Allocation policy for the node group") + +NODE_POWERED_OPT = cli_option("--node-powered", default=None, + type="bool", metavar=_YORNO, + dest="node_powered", + help="Specify if the SoR for node is powered") + +OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", + default=constants.OOB_TIMEOUT, + help="Maximum time to wait for out-of-band helper") + +POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", + default=constants.OOB_POWER_DELAY, + help="Time in seconds to wait between power-ons") + +FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", + action="store_true", default=False, + help=("Whether command argument should be treated" + " as filter")) + + +#: Options provided by all commands +COMMON_OPTS = [DEBUG_OPT] + +# common options for creating instances. add and import then add their own +# specific ones. +COMMON_CREATE_OPTS = [ + BACKEND_OPT, + DISK_OPT, + DISK_TEMPLATE_OPT, + FILESTORE_DIR_OPT, + FILESTORE_DRIVER_OPT, + HYPERVISOR_OPT, + IALLOCATOR_OPT, + NET_OPT, + NODE_PLACEMENT_OPT, + NOIPCHECK_OPT, + NONAMECHECK_OPT, + NONICS_OPT, + NWSYNC_OPT, + OSPARAMS_OPT, + OS_SIZE_OPT, + SUBMIT_OPT, + DRY_RUN_OPT, + PRIORITY_OPT, + ] + def _ParseArgs(argv, commands, aliases): """Parser for the command line arguments. @@ -1088,7 +1284,7 @@ def _ParseArgs(argv, commands, aliases): cmd = aliases[cmd] func, args_def, parser_opts, usage, description = commands[cmd] - parser = OptionParser(option_list=parser_opts + [_DRY_RUN_OPT, DEBUG_OPT], + parser = OptionParser(option_list=parser_opts + COMMON_OPTS, description=description, formatter=TitledHelpFormatter(), usage="%%prog %s %s" % (cmd, usage)) @@ -1199,6 +1395,24 @@ def CalculateOSNames(os_name, os_variants): return [os_name] +def ParseFields(selected, default): + """Parses the values of "--field"-like options. + + @type selected: string or None + @param selected: User-selected options + @type default: list + @param default: Default fields + + """ + if selected is None: + return default + + if selected.startswith("+"): + return default + selected[1:].split(",") + + return selected.split(",") + + UsesRPC = rpc.RunWithRPC @@ -1591,8 +1805,11 @@ def SetGenericOpcodeOpts(opcode_list, options): if not options: return for op in opcode_list: - op.dry_run = options.dry_run op.debug_level = options.debug + if hasattr(options, "dry_run"): + op.dry_run = options.dry_run + if getattr(options, "priority", None) is not None: + op.priority = _PRIONAME_TO_VALUE[options.priority] def GetClient(): @@ -1680,8 +1897,11 @@ def FormatError(err): obuf.write("Cannot communicate with the master daemon.\nIs it running" " and listening for connections?") elif isinstance(err, luxi.TimeoutError): - obuf.write("Timeout while talking to the master daemon. Error:\n" - "%s" % msg) + obuf.write("Timeout while talking to the master daemon. Jobs might have" + " been submitted and will continue to run even if the call" + " timed out. Useful commands in this situation are \"gnt-job" + " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") + obuf.write(msg) elif isinstance(err, luxi.PermissionError): obuf.write("It seems you don't have permissions to connect to the" " master daemon.\nPlease retry as a different user.") @@ -1690,6 +1910,9 @@ def FormatError(err): "%s" % msg) elif isinstance(err, errors.JobLost): obuf.write("Error checking job status: %s" % msg) + elif isinstance(err, errors.QueryFilterParseError): + obuf.write("Error while parsing query filter: %s\n" % err.args[0]) + obuf.write("\n".join(err.GetDetails())) elif isinstance(err, errors.GenericError): obuf.write("Unhandled Ganeti error: %s" % msg) elif isinstance(err, JobSubmittedException): @@ -1741,8 +1964,8 @@ def GenericMain(commands, override=None, aliases=None): for key, val in override.iteritems(): setattr(options, key, val) - utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug, - stderr_logging=True, program=binary) + utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug, + stderr_logging=True) if old_cmdline: logging.info("run with arguments '%s'", old_cmdline) @@ -1756,10 +1979,39 @@ def GenericMain(commands, override=None, aliases=None): result, err_msg = FormatError(err) logging.exception("Error during command processing") ToStderr(err_msg) + except KeyboardInterrupt: + result = constants.EXIT_FAILURE + ToStderr("Aborted. Note that if the operation created any jobs, they" + " might have been submitted and" + " will continue to run in the background.") return result +def ParseNicOption(optvalue): + """Parses the value of the --net option(s). + + """ + try: + nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) + except (TypeError, ValueError), err: + raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) + + nics = [{}] * nic_max + for nidx, ndict in optvalue: + nidx = int(nidx) + + if not isinstance(ndict, dict): + raise errors.OpPrereqError("Invalid nic/%d value: expected dict," + " got %s" % (nidx, ndict)) + + utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) + + nics[nidx] = ndict + + return nics + + def GenericInstanceCreate(mode, opts, args): """Add an instance to the cluster via either creation or import. @@ -1781,17 +2033,7 @@ def GenericInstanceCreate(mode, opts, args): hypervisor, hvparams = opts.hypervisor if opts.nics: - try: - nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics) - except ValueError, err: - raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) - nics = [{}] * nic_max - for nidx, ndict in opts.nics: - nidx = int(nidx) - if not isinstance(ndict, dict): - msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict) - raise errors.OpPrereqError(msg) - nics[nidx] = ndict + nics = ParseNicOption(opts.nics) elif opts.no_nics: # no nics nics = [] @@ -1815,7 +2057,7 @@ def GenericInstanceCreate(mode, opts, args): raise errors.OpPrereqError("Please use either the '--disk' or" " '-s' option") if opts.sd_size is not None: - opts.disks = [(0, {"size": opts.sd_size})] + opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] if opts.disks: try: @@ -1830,20 +2072,21 @@ def GenericInstanceCreate(mode, opts, args): if not isinstance(ddict, dict): msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) raise errors.OpPrereqError(msg) - elif "size" in ddict: - if "adopt" in ddict: + elif constants.IDISK_SIZE in ddict: + if constants.IDISK_ADOPT in ddict: raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" " (disk %d)" % didx) try: - ddict["size"] = utils.ParseUnit(ddict["size"]) + ddict[constants.IDISK_SIZE] = \ + utils.ParseUnit(ddict[constants.IDISK_SIZE]) except ValueError, err: raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % (didx, err)) - elif "adopt" in ddict: + elif constants.IDISK_ADOPT in ddict: if mode == constants.INSTANCE_IMPORT: raise errors.OpPrereqError("Disk adoption not allowed for instance" " import") - ddict["size"] = 0 + ddict[constants.IDISK_SIZE] = 0 else: raise errors.OpPrereqError("Missing size or adoption source for" " disk %d" % didx) @@ -1871,7 +2114,7 @@ def GenericInstanceCreate(mode, opts, args): else: raise errors.ProgrammerError("Invalid creation mode %s" % mode) - op = opcodes.OpCreateInstance(instance_name=instance, + op = opcodes.OpInstanceCreate(instance_name=instance, disks=disks, disk_template=opts.disk_template, nics=nics, @@ -2130,6 +2373,391 @@ def GenerateTable(headers, fields, separator, data, return result +def _FormatBool(value): + """Formats a boolean value as a string. + + """ + if value: + return "Y" + return "N" + + +#: Default formatting for query results; (callback, align right) +_DEFAULT_FORMAT_QUERY = { + constants.QFT_TEXT: (str, False), + constants.QFT_BOOL: (_FormatBool, False), + constants.QFT_NUMBER: (str, True), + constants.QFT_TIMESTAMP: (utils.FormatTime, False), + constants.QFT_OTHER: (str, False), + constants.QFT_UNKNOWN: (str, False), + } + + +def _GetColumnFormatter(fdef, override, unit): + """Returns formatting function for a field. + + @type fdef: L{objects.QueryFieldDefinition} + @type override: dict + @param override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type unit: string + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} + @rtype: tuple; (callable, bool) + @return: Returns the function to format a value (takes one parameter) and a + boolean for aligning the value on the right-hand side + + """ + fmt = override.get(fdef.name, None) + if fmt is not None: + return fmt + + assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY + + if fdef.kind == constants.QFT_UNIT: + # Can't keep this information in the static dictionary + return (lambda value: utils.FormatUnit(value, unit), True) + + fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) + if fmt is not None: + return fmt + + raise NotImplementedError("Can't format column type '%s'" % fdef.kind) + + +class _QueryColumnFormatter: + """Callable class for formatting fields of a query. + + """ + def __init__(self, fn, status_fn, verbose): + """Initializes this class. + + @type fn: callable + @param fn: Formatting function + @type status_fn: callable + @param status_fn: Function to report fields' status + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + self._fn = fn + self._status_fn = status_fn + self._verbose = verbose + + def __call__(self, data): + """Returns a field's string representation. + + """ + (status, value) = data + + # Report status + self._status_fn(status) + + if status == constants.RS_NORMAL: + return self._fn(value) + + assert value is None, \ + "Found value %r for abnormal status %s" % (value, status) + + return FormatResultError(status, self._verbose) + + +def FormatResultError(status, verbose): + """Formats result status other than L{constants.RS_NORMAL}. + + @param status: The result status + @type verbose: boolean + @param verbose: Whether to return the verbose text + @return: Text of result status + + """ + assert status != constants.RS_NORMAL, \ + "FormatResultError called with status equal to constants.RS_NORMAL" + try: + (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] + except KeyError: + raise NotImplementedError("Unknown status %s" % status) + else: + if verbose: + return verbose_text + return normal_text + + +def FormatQueryResult(result, unit=None, format_override=None, separator=None, + header=False, verbose=False): + """Formats data in L{objects.QueryResponse}. + + @type result: L{objects.QueryResponse} + @param result: result of query operation + @type unit: string + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, + see L{utils.text.FormatUnit} + @type format_override: dict + @param format_override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to output header row + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + if unit is None: + if separator: + unit = "m" + else: + unit = "h" + + if format_override is None: + format_override = {} + + stats = dict.fromkeys(constants.RS_ALL, 0) + + def _RecordStatus(status): + if status in stats: + stats[status] += 1 + + columns = [] + for fdef in result.fields: + assert fdef.title and fdef.name + (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) + columns.append(TableColumn(fdef.title, + _QueryColumnFormatter(fn, _RecordStatus, + verbose), + align_right)) + + table = FormatTable(result.data, columns, header, separator) + + # Collect statistics + assert len(stats) == len(constants.RS_ALL) + assert compat.all(count >= 0 for count in stats.values()) + + # Determine overall status. If there was no data, unknown fields must be + # detected via the field definitions. + if (stats[constants.RS_UNKNOWN] or + (not result.data and _GetUnknownFields(result.fields))): + status = QR_UNKNOWN + elif compat.any(count > 0 for key, count in stats.items() + if key != constants.RS_NORMAL): + status = QR_INCOMPLETE + else: + status = QR_NORMAL + + return (status, table) + + +def _GetUnknownFields(fdefs): + """Returns list of unknown fields included in C{fdefs}. + + @type fdefs: list of L{objects.QueryFieldDefinition} + + """ + return [fdef for fdef in fdefs + if fdef.kind == constants.QFT_UNKNOWN] + + +def _WarnUnknownFields(fdefs): + """Prints a warning to stderr if a query included unknown fields. + + @type fdefs: list of L{objects.QueryFieldDefinition} + + """ + unknown = _GetUnknownFields(fdefs) + if unknown: + ToStderr("Warning: Queried for unknown fields %s", + utils.CommaJoin(fdef.name for fdef in unknown)) + return True + + return False + + +def GenericList(resource, fields, names, unit, separator, header, cl=None, + format_override=None, verbose=False, force_filter=False): + """Generic implementation for listing all items of a resource. + + @param resource: One of L{constants.QR_VIA_LUXI} + @type fields: list of strings + @param fields: List of fields to query for + @type names: list of strings + @param names: Names of items to query for + @type unit: string or None + @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or + None for automatic choice (human-readable for non-separator usage, + otherwise megabytes); this is a one-letter string + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to show header row + @type force_filter: bool + @param force_filter: Whether to always treat names as filter + @type format_override: dict + @param format_override: Dictionary for overriding field formatting functions, + indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} + @type verbose: boolean + @param verbose: whether to use verbose field descriptions or not + + """ + if cl is None: + cl = GetClient() + + if not names: + names = None + + if (force_filter or + (names and len(names) == 1 and qlang.MaybeFilter(names[0]))): + try: + (filter_text, ) = names + except ValueError: + raise errors.OpPrereqError("Exactly one argument must be given as a" + " filter") + + logging.debug("Parsing '%s' as filter", filter_text) + filter_ = qlang.ParseFilter(filter_text) + else: + filter_ = qlang.MakeSimpleFilter("name", names) + + response = cl.Query(resource, fields, filter_) + + found_unknown = _WarnUnknownFields(response.fields) + + (status, data) = FormatQueryResult(response, unit=unit, separator=separator, + header=header, + format_override=format_override, + verbose=verbose) + + for line in data: + ToStdout(line) + + assert ((found_unknown and status == QR_UNKNOWN) or + (not found_unknown and status != QR_UNKNOWN)) + + if status == QR_UNKNOWN: + return constants.EXIT_UNKNOWN_FIELD + + # TODO: Should the list command fail if not all data could be collected? + return constants.EXIT_SUCCESS + + +def GenericListFields(resource, fields, separator, header, cl=None): + """Generic implementation for listing fields for a resource. + + @param resource: One of L{constants.QR_VIA_LUXI} + @type fields: list of strings + @param fields: List of fields to query for + @type separator: string or None + @param separator: String used to separate fields + @type header: bool + @param header: Whether to show header row + + """ + if cl is None: + cl = GetClient() + + if not fields: + fields = None + + response = cl.QueryFields(resource, fields) + + found_unknown = _WarnUnknownFields(response.fields) + + columns = [ + TableColumn("Name", str, False), + TableColumn("Title", str, False), + TableColumn("Description", str, False), + ] + + rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields] + + for line in FormatTable(rows, columns, header, separator): + ToStdout(line) + + if found_unknown: + return constants.EXIT_UNKNOWN_FIELD + + return constants.EXIT_SUCCESS + + +class TableColumn: + """Describes a column for L{FormatTable}. + + """ + def __init__(self, title, fn, align_right): + """Initializes this class. + + @type title: string + @param title: Column title + @type fn: callable + @param fn: Formatting function + @type align_right: bool + @param align_right: Whether to align values on the right-hand side + + """ + self.title = title + self.format = fn + self.align_right = align_right + + +def _GetColFormatString(width, align_right): + """Returns the format string for a field. + + """ + if align_right: + sign = "" + else: + sign = "-" + + return "%%%s%ss" % (sign, width) + + +def FormatTable(rows, columns, header, separator): + """Formats data as a table. + + @type rows: list of lists + @param rows: Row data, one list per row + @type columns: list of L{TableColumn} + @param columns: Column descriptions + @type header: bool + @param header: Whether to show header row + @type separator: string or None + @param separator: String used to separate columns + + """ + if header: + data = [[col.title for col in columns]] + colwidth = [len(col.title) for col in columns] + else: + data = [] + colwidth = [0 for _ in columns] + + # Format row data + for row in rows: + assert len(row) == len(columns) + + formatted = [col.format(value) for value, col in zip(row, columns)] + + if separator is None: + # Update column widths + for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): + # Modifying a list's items while iterating is fine + colwidth[idx] = max(oldwidth, len(value)) + + data.append(formatted) + + if separator is not None: + # Return early if a separator is used + return [separator.join(row) for row in data] + + if columns and not columns[-1].align_right: + # Avoid unnecessary spaces at end of line + colwidth[-1] = 0 + + # Build format string + fmt = " ".join([_GetColFormatString(width, col.align_right) + for col, width in zip(columns, colwidth)]) + + return [fmt % tuple(row) for row in data] + + def FormatTimestamp(ts): """Formats a given timestamp. @@ -2407,3 +3035,60 @@ class JobExecutor(object): else: ToStderr("Failure for %s: %s", name, result) return [row[1:3] for row in self.jobs] + + +def FormatParameterDict(buf, param_dict, actual, level=1): + """Formats a parameter dictionary. + + @type buf: L{StringIO} + @param buf: the buffer into which to write + @type param_dict: dict + @param param_dict: the own parameters + @type actual: dict + @param actual: the current parameter set (including defaults) + @param level: Level of indent + + """ + indent = " " * level + for key in sorted(actual): + val = param_dict.get(key, "default (%s)" % actual[key]) + buf.write("%s- %s: %s\n" % (indent, key, val)) + + +def ConfirmOperation(names, list_type, text, extra=""): + """Ask the user to confirm an operation on a list of list_type. + + This function is used to request confirmation for doing an operation + on a given list of list_type. + + @type names: list + @param names: the list of names that we display when + we ask for confirmation + @type list_type: str + @param list_type: Human readable name for elements in the list (e.g. nodes) + @type text: str + @param text: the operation that the user should confirm + @rtype: boolean + @return: True or False depending on user's confirmation. + + """ + count = len(names) + msg = ("The %s will operate on %d %s.\n%s" + "Do you want to continue?" % (text, count, list_type, extra)) + affected = (("\nAffected %s:\n" % list_type) + + "\n".join([" %s" % name for name in names])) + + choices = [("y", True, "Yes, execute the %s" % text), + ("n", False, "No, abort the %s" % text)] + + if count > 20: + choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) + question = msg + else: + question = msg + affected + + choice = AskUser(question, choices) + if choice == "v": + choices.pop(1) + choice = AskUser(msg + affected, choices) + return choice