#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import os.path
import time
import logging
+import errno
+import itertools
+import shlex
from cStringIO import StringIO
from ganeti import utils
from ganeti import ssh
from ganeti import compat
from ganeti import netutils
+from ganeti import qlang
+from ganeti import objects
+from ganeti import pathutils
from optparse import (OptionParser, TitledHelpFormatter,
Option, OptionValueError)
__all__ = [
# Command line options
+ "ABSOLUTE_OPT",
"ADD_UIDS_OPT",
+ "ADD_RESERVED_IPS_OPT",
"ALLOCATABLE_OPT",
+ "ALLOC_POLICY_OPT",
"ALL_OPT",
+ "ALLOW_FAILOVER_OPT",
"AUTO_PROMOTE_OPT",
"AUTO_REPLACE_OPT",
"BACKEND_OPT",
"BLK_OS_OPT",
"CAPAB_MASTER_OPT",
+ "CAPAB_VM_OPT",
"CLEANUP_OPT",
"CLUSTER_DOMAIN_SECRET_OPT",
"CONFIRM_OPT",
"DEBUG_SIMERR_OPT",
"DISKIDX_OPT",
"DISK_OPT",
+ "DISK_PARAMS_OPT",
"DISK_TEMPLATE_OPT",
"DRAINED_OPT",
"DRY_RUN_OPT",
"DRBD_HELPER_OPT",
+ "DST_NODE_OPT",
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
+ "ENABLED_DISK_TEMPLATES_OPT",
"ERROR_CODES_OPT",
+ "FAILURE_ONLY_OPT",
"FIELDS_OPT",
"FILESTORE_DIR_OPT",
"FILESTORE_DRIVER_OPT",
+ "FORCE_FILTER_OPT",
"FORCE_OPT",
"FORCE_VARIANT_OPT",
+ "GATEWAY_OPT",
+ "GATEWAY6_OPT",
"GLOBAL_FILEDIR_OPT",
"HID_OS_OPT",
+ "GLOBAL_SHARED_FILEDIR_OPT",
+ "HOTPLUG_OPT",
"HVLIST_OPT",
"HVOPTS_OPT",
"HYPERVISOR_OPT",
"DEFAULT_IALLOCATOR_OPT",
"IDENTIFY_DEFAULTS_OPT",
"IGNORE_CONSIST_OPT",
+ "IGNORE_ERRORS_OPT",
"IGNORE_FAILURES_OPT",
"IGNORE_OFFLINE_OPT",
"IGNORE_REMOVE_FAILURES_OPT",
"IGNORE_SECONDARIES_OPT",
"IGNORE_SIZE_OPT",
+ "INCLUDEDEFAULTS_OPT",
"INTERVAL_OPT",
"MAC_PREFIX_OPT",
"MAINTAIN_NODE_HEALTH_OPT",
"MASTER_NETDEV_OPT",
+ "MASTER_NETMASK_OPT",
"MC_OPT",
"MIGRATION_MODE_OPT",
+ "MODIFY_ETCHOSTS_OPT",
"NET_OPT",
+ "NETWORK_OPT",
+ "NETWORK6_OPT",
"NEW_CLUSTER_CERT_OPT",
"NEW_CLUSTER_DOMAIN_SECRET_OPT",
"NEW_CONFD_HMAC_KEY_OPT",
"NEW_RAPI_CERT_OPT",
+ "NEW_PRIMARY_OPT",
"NEW_SECONDARY_OPT",
+ "NEW_SPICE_CERT_OPT",
"NIC_PARAMS_OPT",
+ "NOCONFLICTSCHECK_OPT",
+ "NODE_FORCE_JOIN_OPT",
"NODE_LIST_OPT",
"NODE_PLACEMENT_OPT",
"NODEGROUP_OPT",
+ "NODE_PARAMS_OPT",
+ "NODE_POWERED_OPT",
"NODRBD_STORAGE_OPT",
"NOHDR_OPT",
"NOIPCHECK_OPT",
"NONICS_OPT",
"NONLIVE_OPT",
"NONPLUS1_OPT",
+ "NORUNTIME_CHGS_OPT",
"NOSHUTDOWN_OPT",
"NOSTART_OPT",
"NOSSH_KEYCHECK_OPT",
"NOVOTING_OPT",
+ "NO_REMEMBER_OPT",
"NWSYNC_OPT",
+ "OFFLINE_INST_OPT",
+ "ONLINE_INST_OPT",
"ON_PRIMARY_OPT",
"ON_SECONDARY_OPT",
"OFFLINE_OPT",
"OSPARAMS_OPT",
"OS_OPT",
"OS_SIZE_OPT",
+ "OOB_TIMEOUT_OPT",
+ "POWER_DELAY_OPT",
"PREALLOC_WIPE_DISKS_OPT",
"PRIMARY_IP_VERSION_OPT",
+ "PRIMARY_ONLY_OPT",
"PRIORITY_OPT",
"RAPI_CERT_OPT",
"READD_OPT",
+ "REASON_OPT",
"REBOOT_TYPE_OPT",
"REMOVE_INSTANCE_OPT",
+ "REMOVE_RESERVED_IPS_OPT",
"REMOVE_UIDS_OPT",
"RESERVED_LVS_OPT",
+ "RUNTIME_MEM_OPT",
"ROMAN_OPT",
"SECONDARY_IP_OPT",
+ "SECONDARY_ONLY_OPT",
"SELECT_OS_OPT",
"SEP_OPT",
"SHOWCMD_OPT",
+ "SHOW_MACHINE_OPT",
"SHUTDOWN_TIMEOUT_OPT",
"SINGLE_NODE_OPT",
+ "SPECS_CPU_COUNT_OPT",
+ "SPECS_DISK_COUNT_OPT",
+ "SPECS_DISK_SIZE_OPT",
+ "SPECS_MEM_SIZE_OPT",
+ "SPECS_NIC_COUNT_OPT",
+ "SPLIT_ISPECS_OPTS",
+ "IPOLICY_STD_SPECS_OPT",
+ "IPOLICY_DISK_TEMPLATES",
+ "IPOLICY_VCPU_RATIO",
+ "SPICE_CACERT_OPT",
+ "SPICE_CERT_OPT",
"SRC_DIR_OPT",
"SRC_NODE_OPT",
"SUBMIT_OPT",
+ "STARTUP_PAUSED_OPT",
"STATIC_OPT",
"SYNC_OPT",
+ "TAG_ADD_OPT",
"TAG_SRC_OPT",
"TIMEOUT_OPT",
+ "TO_GROUP_OPT",
"UIDPOOL_OPT",
"USEUNITS_OPT",
+ "USE_EXTERNAL_MIP_SCRIPT",
"USE_REPL_NET_OPT",
"VERBOSE_OPT",
"VG_NAME_OPT",
+ "WFSYNC_OPT",
"YES_DOIT_OPT",
+ "DISK_STATE_OPT",
+ "HV_STATE_OPT",
+ "IGNORE_IPOLICY_OPT",
+ "INSTANCE_POLICY_OPTS",
# Generic functions for CLI programs
+ "ConfirmOperation",
+ "CreateIPolicyFromOpts",
"GenericMain",
"GenericInstanceCreate",
+ "GenericList",
+ "GenericListFields",
"GetClient",
"GetOnlineNodes",
"JobExecutor",
# Formatting functions
"ToStderr", "ToStdout",
"FormatError",
+ "FormatQueryResult",
+ "FormatParamsDictInfo",
+ "FormatPolicyInfo",
+ "PrintIPolicyCommand",
+ "PrintGenericInfo",
"GenerateTable",
"AskUser",
"FormatTimestamp",
# command line options support infrastructure
"ARGS_MANY_INSTANCES",
"ARGS_MANY_NODES",
+ "ARGS_MANY_GROUPS",
+ "ARGS_MANY_NETWORKS",
"ARGS_NONE",
"ARGS_ONE_INSTANCE",
"ARGS_ONE_NODE",
+ "ARGS_ONE_GROUP",
"ARGS_ONE_OS",
+ "ARGS_ONE_NETWORK",
"ArgChoice",
"ArgCommand",
"ArgFile",
+ "ArgGroup",
"ArgHost",
"ArgInstance",
"ArgJobId",
+ "ArgNetwork",
"ArgNode",
"ArgOs",
+ "ArgExtStorage",
"ArgSuggest",
"ArgUnknown",
"OPT_COMPL_INST_ADD_NODES",
"OPT_COMPL_ONE_INSTANCE",
"OPT_COMPL_ONE_NODE",
"OPT_COMPL_ONE_NODEGROUP",
+ "OPT_COMPL_ONE_NETWORK",
"OPT_COMPL_ONE_OS",
+ "OPT_COMPL_ONE_EXTSTORAGE",
"cli_option",
"SplitNodeOption",
"CalculateOSNames",
"ParseFields",
+ "COMMON_CREATE_OPTS",
]
NO_PREFIX = "no_"
# we migrate to Python 2.6
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
+# Query result status for clients
+(QR_NORMAL,
+ QR_UNKNOWN,
+ QR_INCOMPLETE) = range(3)
+
+#: Maximum batch size for ChooseJob
+_CHOOSE_BATCH = 25
+
+
+# constants used to create InstancePolicy dictionary
+TISPECS_GROUP_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ }
+
+TISPECS_CLUSTER_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ constants.ISPECS_STD: constants.VTYPE_INT,
+ }
+
+#: User-friendly names for query2 field types
+_QFT_NAMES = {
+ constants.QFT_UNKNOWN: "Unknown",
+ constants.QFT_TEXT: "Text",
+ constants.QFT_BOOL: "Boolean",
+ constants.QFT_NUMBER: "Number",
+ constants.QFT_UNIT: "Storage size",
+ constants.QFT_TIMESTAMP: "Timestamp",
+ constants.QFT_OTHER: "Custom",
+ }
+
class _Argument:
- def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
+ def __init__(self, min=0, max=None): # pylint: disable=W0622
self.min = min
self.max = max
Value can be any of the ones passed to the constructor.
"""
- # pylint: disable-msg=W0622
+ # pylint: disable=W0622
def __init__(self, min=0, max=None, choices=None):
_Argument.__init__(self, min=min, max=max)
self.choices = choices
"""
+
+class ArgNetwork(_Argument):
+ """Network argument.
+
+ """
+
+
+class ArgGroup(_Argument):
+ """Node group argument.
+
+ """
+
+
class ArgJobId(_Argument):
"""Job ID argument.
"""
+class ArgExtStorage(_Argument):
+ """ExtStorage argument.
+
+ """
+
+
ARGS_NONE = []
ARGS_MANY_INSTANCES = [ArgInstance()]
+ARGS_MANY_NETWORKS = [ArgNetwork()]
ARGS_MANY_NODES = [ArgNode()]
+ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
+ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
+# TODO
+ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
kind = opts.tag_type
if kind == constants.TAG_CLUSTER:
- retval = kind, kind
- elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
+ retval = kind, None
+ elif kind in (constants.TAG_NODEGROUP,
+ constants.TAG_NODE,
+ constants.TAG_NETWORK,
+ constants.TAG_INSTANCE):
if not args:
- raise errors.OpPrereqError("no arguments passed to the command")
+ raise errors.OpPrereqError("no arguments passed to the command",
+ errors.ECODE_INVAL)
name = args.pop(0)
retval = kind, name
else:
"""
kind, name = _ExtractTagsObject(opts, args)
- cl = GetClient()
+ cl = GetClient(query=True)
result = cl.QueryTags(kind, name)
result = list(result)
result.sort()
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
- raise errors.OpPrereqError("No tags to be added")
- op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
- SubmitOpCode(op, opts=opts)
+ raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
+ op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
+ SubmitOrSend(op, opts)
def RemoveTags(opts, args):
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
- raise errors.OpPrereqError("No tags to be removed")
- op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
- SubmitOpCode(op, opts=opts)
+ raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
+ op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
+ SubmitOrSend(op, opts)
-def check_unit(option, opt, value): # pylint: disable-msg=W0613
+def check_unit(option, opt, value): # pylint: disable=W0613
"""OptParsers custom converter for units.
"""
raise OptionValueError("option %s: %s" % (opt, err))
-def _SplitKeyVal(opt, data):
+def _SplitKeyVal(opt, data, parse_prefixes):
"""Convert a KeyVal string into a dict.
This function will convert a key=val[,...] string into a dict. Empty
values will be converted specially: keys which have the prefix 'no_'
- will have the value=False and the prefix stripped, the others will
+ will have the value=False and the prefix stripped, keys with the prefix
+ "-" will have value=None and the prefix stripped, and the others will
have value=True.
@type opt: string
data, used in building error messages
@type data: string
@param data: a string of the format key=val,key=val,...
+ @type parse_prefixes: bool
+ @param parse_prefixes: whether to handle prefixes specially
@rtype: dict
@return: {key=val, key=val}
@raises errors.ParameterError: if there are duplicate keys
for elem in utils.UnescapeAndSplit(data, sep=","):
if "=" in elem:
key, val = elem.split("=", 1)
- else:
+ elif parse_prefixes:
if elem.startswith(NO_PREFIX):
key, val = elem[len(NO_PREFIX):], False
elif elem.startswith(UN_PREFIX):
key, val = elem[len(UN_PREFIX):], None
else:
key, val = elem, True
+ else:
+ raise errors.ParameterError("Missing value for key '%s' in option %s" %
+ (elem, opt))
if key in kv_dict:
raise errors.ParameterError("Duplicate key '%s' in option %s" %
(key, opt))
return kv_dict
-def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
- """Custom parser for ident:key=val,key=val options.
+def _SplitIdentKeyVal(opt, value, parse_prefixes):
+ """Helper function to parse "ident:key=val,key=val" options.
- This will store the parsed values as a tuple (ident, {key: val}). As such,
- multiple uses of this option via action=append is possible.
+ @type opt: string
+ @param opt: option name, used in error messages
+ @type value: string
+ @param value: expected to be in the format "ident:key=val,key=val,..."
+ @type parse_prefixes: bool
+ @param parse_prefixes: whether to handle prefixes specially (see
+ L{_SplitKeyVal})
+ @rtype: tuple
+ @return: (ident, {key=val, key=val})
+ @raises errors.ParameterError: in case of duplicates or other parsing errors
"""
if ":" not in value:
- ident, rest = value, ''
+ ident, rest = value, ""
else:
ident, rest = value.split(":", 1)
- if ident.startswith(NO_PREFIX):
+ if parse_prefixes and ident.startswith(NO_PREFIX):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(NO_PREFIX):], False)
- elif ident.startswith(UN_PREFIX):
+ elif (parse_prefixes and ident.startswith(UN_PREFIX) and
+ (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(UN_PREFIX):], None)
else:
- kv_dict = _SplitKeyVal(opt, rest)
+ kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
retval = (ident, kv_dict)
return retval
-def check_key_val(option, opt, value): # pylint: disable-msg=W0613
+def check_ident_key_val(option, opt, value): # pylint: disable=W0613
+ """Custom parser for ident:key=val,key=val options.
+
+ This will store the parsed values as a tuple (ident, {key: val}). As such,
+ multiple uses of this option via action=append is possible.
+
+ """
+ return _SplitIdentKeyVal(opt, value, True)
+
+
+def check_key_val(option, opt, value): # pylint: disable=W0613
"""Custom parser class for key=val,key=val options.
This will store the parsed values as a dict {key: val}.
"""
- return _SplitKeyVal(opt, value)
+ return _SplitKeyVal(opt, value, True)
+
+
+def _SplitListKeyVal(opt, value):
+ retval = {}
+ for elem in value.split("/"):
+ if not elem:
+ raise errors.ParameterError("Empty section in option '%s'" % opt)
+ (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
+ if ident in retval:
+ msg = ("Duplicated parameter '%s' in parsing %s: %s" %
+ (ident, opt, elem))
+ raise errors.ParameterError(msg)
+ retval[ident] = valdict
+ return retval
+
+
+def check_multilist_ident_key_val(_, opt, value):
+ """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
+
+ @rtype: list of dictionary
+ @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
+
+ """
+ retval = []
+ for line in value.split("//"):
+ retval.append(_SplitListKeyVal(opt, line))
+ return retval
-def check_bool(option, opt, value): # pylint: disable-msg=W0613
+def check_bool(option, opt, value): # pylint: disable=W0613
"""Custom parser for yes/no options.
This will store the parsed value as either True or False.
raise errors.ParameterError("Invalid boolean value '%s'" % value)
+def check_list(option, opt, value): # pylint: disable=W0613
+ """Custom parser for comma-separated lists.
+
+ """
+ # we have to make this explicit check since "".split(",") is [""],
+ # not an empty list :(
+ if not value:
+ return []
+ else:
+ return utils.UnescapeAndSplit(value)
+
+
+def check_maybefloat(option, opt, value): # pylint: disable=W0613
+ """Custom parser for float numbers which might be also defaults.
+
+ """
+ value = value.lower()
+
+ if value == constants.VALUE_DEFAULT:
+ return value
+ else:
+ return float(value)
+
+
# completion_suggestion is normally a list. Using numeric values not evaluating
# to False for dynamic completion.
(OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_IALLOCATOR,
+ OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
- OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
+ OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
-OPT_COMPL_ALL = frozenset([
+OPT_COMPL_ALL = compat.UniqueFrozenset([
OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_IALLOCATOR,
+ OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
OPT_COMPL_ONE_NODEGROUP,
])
"completion_suggest",
]
TYPES = Option.TYPES + (
+ "multilistidentkeyval",
"identkeyval",
"keyval",
"unit",
"bool",
+ "list",
+ "maybefloat",
)
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
+ TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
TYPE_CHECKER["identkeyval"] = check_ident_key_val
TYPE_CHECKER["keyval"] = check_key_val
TYPE_CHECKER["unit"] = check_unit
TYPE_CHECKER["bool"] = check_bool
+ TYPE_CHECKER["list"] = check_list
+ TYPE_CHECKER["maybefloat"] = check_maybefloat
# optparse.py sets make_option, so we do it for our own option class, too
" (defaults to one space)"))
USEUNITS_OPT = cli_option("--units", default=None,
- dest="units", choices=('h', 'm', 'g', 't'),
- help="Specify units for output (one of hmgt)")
+ dest="units", choices=("h", "m", "g", "t"),
+ help="Specify units for output (one of h/m/g/t)")
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
type="string", metavar="FIELDS",
help=("Ignore offline nodes and do as much"
" as possible"))
+TAG_ADD_OPT = cli_option("--tags", dest="tags",
+ default=None, help="Comma-separated list of instance"
+ " tags")
+
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
default=None, help="File with tag names")
DRY_RUN_OPT = cli_option("--dry-run", default=False,
action="store_true",
help=("Do not execute the operation, just run the"
- " check steps and verify it it could be"
+ " check steps and verify if it could be"
" executed"))
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
default=True, action="store_false",
help="Don't wait for sync (DANGEROUS!)")
+WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
+ default=False, action="store_true",
+ help="Wait for disks to sync")
+
+ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
+ action="store_true", default=False,
+ help="Enable offline instance")
+
+OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
+ action="store_true", default=False,
+ help="Disable down instance")
+
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
- help="Custom disk setup (diskless, file,"
- " plain or drbd)",
+ help=("Custom disk setup (%s)" %
+ utils.CommaJoin(constants.DISK_TEMPLATES)),
default=None, metavar="TEMPL",
choices=list(constants.DISK_TEMPLATES))
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
help="Driver to use for image files",
- default="loop", metavar="<DRIVER>",
+ default=None, metavar="<DRIVER>",
choices=list(constants.FILE_DRIVER))
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
- metavar="<NAME>",
- help="Set the default instance allocator plugin",
- default=None, type="string",
- completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
+ metavar="<NAME>",
+ help="Set the default instance"
+ " allocator plugin",
+ default=None, type="string",
+ completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
metavar="<os>",
completion_suggest=OPT_COMPL_ONE_OS)
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
- type="keyval", default={},
- help="OS parameters")
+ type="keyval", default={},
+ help="OS parameters")
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
action="store_true", default=False,
help="Do not install the OS (will"
" enable no-start)")
+NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
+ dest="allow_runtime_chgs",
+ default=True, action="store_false",
+ help="Don't allow runtime changes")
+
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
type="keyval", default={},
help="Backend parameters")
-HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
- default={}, dest="hvparams",
- help="Hypervisor parameters")
+HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
+ default={}, dest="hvparams",
+ help="Hypervisor parameters")
+
+DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
+ help="Disk template parameters, in the format"
+ " template:option=value,option=value,...",
+ type="identkeyval", action="append", default=[])
+
+SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
+ type="keyval", default={},
+ help="Memory size specs: list of key=value,"
+ " where key is one of min, max, std"
+ " (in MB or using a unit)")
+
+SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
+ type="keyval", default={},
+ help="CPU count specs: list of key=value,"
+ " where key is one of min, max, std")
+
+SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
+ dest="ispecs_disk_count",
+ type="keyval", default={},
+ help="Disk count specs: list of key=value,"
+ " where key is one of min, max, std")
+
+SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
+ type="keyval", default={},
+ help="Disk size specs: list of key=value,"
+ " where key is one of min, max, std"
+ " (in MB or using a unit)")
+
+SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
+ type="keyval", default={},
+ help="NIC count specs: list of key=value,"
+ " where key is one of min, max, std")
+
+IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
+IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
+ dest="ipolicy_bounds_specs",
+ type="multilistidentkeyval", default=None,
+ help="Complete instance specs limits")
+
+IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
+IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
+ dest="ipolicy_std_specs",
+ type="keyval", default=None,
+ help="Complte standard instance specs")
+
+IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
+ dest="ipolicy_disk_templates",
+ type="list", default=None,
+ help="Comma-separated list of"
+ " enabled disk templates")
+
+IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
+ dest="ipolicy_vcpu_ratio",
+ type="maybefloat", default=None,
+ help="The maximum allowed vcpu-to-cpu ratio")
+
+IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
+ dest="ipolicy_spindle_ratio",
+ type="maybefloat", default=None,
+ help=("The maximum allowed instances to"
+ " spindle ratio"))
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
help="Ignore the consistency of the disks on"
" the secondary")
+ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
+ dest="allow_failover",
+ action="store_true", default=False,
+ help="If migration is not possible fallback to"
+ " failover")
+
NONLIVE_OPT = cli_option("--non-live", dest="live",
default=True, action="store_false",
help="Do a non-live migration (this usually means"
" times, if not given defaults to all nodes)",
completion_suggest=OPT_COMPL_ONE_NODE)
-NODEGROUP_OPT = cli_option("-g", "--node-group",
+NODEGROUP_OPT_NAME = "--node-group"
+NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
dest="nodegroup",
help="Node group (name or uuid)",
metavar="<nodegroup>",
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
default=False, action="store_true",
- help="Instead of performing the migration, try to"
- " recover from a failed cleanup. This is safe"
+ help="Instead of performing the migration/failover,"
+ " try to recover from a failed cleanup. This is safe"
" to run even if the instance is healthy, but it"
" will create extra replication traffic and "
" disrupt briefly the replication (like during the"
- " migration")
+ " migration/failover")
STATIC_OPT = cli_option("-s", "--static", dest="static",
action="store_true", default=False,
action="store_true", default=False,
help="Remove the instance from the cluster")
+DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
+ help="Specifies the new node for the instance",
+ metavar="NODE", default=None,
+ completion_suggest=OPT_COMPL_ONE_NODE)
+
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
help="Specifies the new secondary node",
metavar="NODE", default=None,
completion_suggest=OPT_COMPL_ONE_NODE)
+NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
+ help="Specifies the new primary node",
+ metavar="<node>", default=None,
+ completion_suggest=OPT_COMPL_ONE_NODE)
+
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
default=False, action="store_true",
help="Replace the disk(s) on the primary"
- " node (only for the drbd template)")
+ " node (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
default=False, action="store_true",
help="Replace the disk(s) on the secondary"
- " node (only for the drbd template)")
+ " node (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
default=False, action="store_true",
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
default=False, action="store_true",
help="Automatically replace faulty disks"
- " (only for the drbd template)")
+ " (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
default=False, action="store_true",
default=True, action="store_false",
help="Disable SSH key fingerprint checking")
+NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
+ default=False, action="store_true",
+ help="Force the joining of a node")
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
type="bool", default=None, metavar=_YORNO,
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
type="bool", default=None,
- help="Set the offline flag on the node")
+ help=("Set the offline flag on the node"
+ " (cluster does not communicate with offline"
+ " nodes)"))
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
type="bool", default=None,
- help="Set the drained flag on the node")
+ help=("Set the drained flag on the node"
+ " (excluded from allocation operations)"))
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
- type="bool", default=None, metavar=_YORNO,
- help="Set the master_capable flag on the node")
+ type="bool", default=None, metavar=_YORNO,
+ help="Set the master_capable flag on the node")
+
+CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
+ type="bool", default=None, metavar=_YORNO,
+ help="Set the vm_capable flag on the node")
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
type="bool", default=None, metavar=_YORNO,
help="Comma-separated list of hypervisors",
type="string", default=None)
+ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
+ dest="enabled_disk_templates",
+ help="Comma-separated list of "
+ "disk templates",
+ type="string", default=None)
+
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
type="keyval", default={},
help="NIC parameters")
dest="candidate_pool_size", type="int",
help="Set the candidate pool size")
-VG_NAME_OPT = cli_option("-g", "--vg-name", dest="vg_name",
- help="Enables LVM and specifies the volume group"
- " name (cluster-wide) for disk allocation [xenvg]",
+VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
+ help=("Enables LVM and specifies the volume group"
+ " name (cluster-wide) for disk allocation"
+ " [%s]" % constants.DEFAULT_VG),
metavar="VG", default=None)
-YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
+YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
help="Destroy cluster", action="store_true")
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
help="Specify the node interface (cluster-wide)"
- " on which the master IP address will be added "
- " [%s]" % constants.DEFAULT_BRIDGE,
+ " on which the master IP address will be added"
+ " (cluster init default: %s)" %
+ constants.DEFAULT_BRIDGE,
metavar="NETDEV",
- default=constants.DEFAULT_BRIDGE)
+ default=None)
+
+MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
+ help="Specify the netmask of the master IP",
+ metavar="NETMASK",
+ default=None)
+
+USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
+ dest="use_external_mip_script",
+ help="Specify whether to run a"
+ " user-provided script for the master"
+ " IP address turnup and"
+ " turndown operations",
+ type="bool", metavar=_YORNO, default=None)
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the file-based disks [%s]" %
- constants.DEFAULT_FILE_STORAGE_DIR,
+ pathutils.DEFAULT_FILE_STORAGE_DIR,
metavar="DIR",
- default=constants.DEFAULT_FILE_STORAGE_DIR)
+ default=pathutils.DEFAULT_FILE_STORAGE_DIR)
+
+GLOBAL_SHARED_FILEDIR_OPT = cli_option(
+ "--shared-file-storage-dir",
+ dest="shared_file_storage_dir",
+ help="Specify the default directory (cluster-wide) for storing the"
+ " shared file-based disks [%s]" %
+ pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
+ metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
- help="Don't modify /etc/hosts",
+ help="Don't modify %s" % pathutils.ETC_HOSTS,
action="store_false", default=True)
+MODIFY_ETCHOSTS_OPT = \
+ cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
+ default=None, type="bool",
+ help="Defines whether the cluster should autonomously modify"
+ " and keep in sync the /etc/hosts file of the nodes")
+
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
help="Don't initialize SSH keys",
action="store_false", default=True)
help="Maximum time to wait")
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
- dest="shutdown_timeout", type="int",
- default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
- help="Maximum time to wait for instance shutdown")
+ dest="shutdown_timeout", type="int",
+ default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ help="Maximum time to wait for instance"
+ " shutdown")
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
default=None,
help=("Generate a new self-signed RAPI"
" certificate"))
+SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
+ default=None,
+ help="File containing new SPICE certificate")
+
+SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
+ default=None,
+ help="File containing the certificate of the CA"
+ " which signed the SPICE certificate")
+
+NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
+ dest="new_spice_cert", default=None,
+ action="store_true",
+ help=("Generate a new self-signed SPICE"
+ " certificate"))
+
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
dest="new_confd_hmac_key",
default=False, action="store_true",
" removed from the user-id pool"))
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
- action="store", dest="reserved_lvs",
- help=("A comma-separated list of reserved"
- " logical volumes names, that will be"
- " ignored by cluster verify"))
+ action="store", dest="reserved_lvs",
+ help=("A comma-separated list of reserved"
+ " logical volumes names, that will be"
+ " ignored by cluster verify"))
ROMAN_OPT = cli_option("--roman",
dest="roman_integers", default=False,
constants.IP6_VERSION),
help="Cluster-wide IP version for primary IP")
+SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
+ action="store_true",
+ help="Show machine name for every line in output")
+
+FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
+ action="store_true",
+ help=("Hide successful results and show failures"
+ " only (determined by the exit code)"))
+
+REASON_OPT = cli_option("--reason", default=None,
+ help="The reason for executing the command")
+
+
+def _PriorityOptionCb(option, _, value, parser):
+ """Callback for processing C{--priority} option.
+
+ """
+ value = _PRIONAME_TO_VALUE[value]
+
+ setattr(parser.values, option.dest, value)
+
+
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
metavar="|".join(name for name, _ in _PRIORITY_NAMES),
choices=_PRIONAME_TO_VALUE.keys(),
+ action="callback", type="choice",
+ callback=_PriorityOptionCb,
help="Priority for opcode processing")
HID_OS_OPT = cli_option("--hidden", dest="hidden",
help=("Wipe disks prior to instance"
" creation"))
+NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
+ type="keyval", default=None,
+ help="Node parameters")
+
+ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
+ action="store", metavar="POLICY", default=None,
+ help="Allocation policy for the node group")
+
+NODE_POWERED_OPT = cli_option("--node-powered", default=None,
+ type="bool", metavar=_YORNO,
+ dest="node_powered",
+ help="Specify if the SoR for node is powered")
+
+OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
+ default=constants.OOB_TIMEOUT,
+ help="Maximum time to wait for out-of-band helper")
+
+POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
+ default=constants.OOB_POWER_DELAY,
+ help="Time in seconds to wait between power-ons")
+
+FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
+ action="store_true", default=False,
+ help=("Whether command argument should be treated"
+ " as filter"))
+
+NO_REMEMBER_OPT = cli_option("--no-remember",
+ dest="no_remember",
+ action="store_true", default=False,
+ help="Perform but do not record the change"
+ " in the configuration")
+
+PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
+ default=False, action="store_true",
+ help="Evacuate primary instances only")
+
+SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
+ default=False, action="store_true",
+ help="Evacuate secondary instances only"
+ " (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
+
+STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
+ action="store_true", default=False,
+ help="Pause instance at startup")
+
+TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
+ help="Destination node group (name or uuid)",
+ default=None, action="append",
+ completion_suggest=OPT_COMPL_ONE_NODEGROUP)
+
+IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
+ action="append", dest="ignore_errors",
+ choices=list(constants.CV_ALL_ECODES_STRINGS),
+ help="Error code to be ignored")
+
+DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
+ action="append",
+ help=("Specify disk state information in the"
+ " format"
+ " storage_type/identifier:option=value,...;"
+ " note this is unused for now"),
+ type="identkeyval")
+
+HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
+ action="append",
+ help=("Specify hypervisor state information in the"
+ " format hypervisor:option=value,...;"
+ " note this is unused for now"),
+ type="identkeyval")
+
+IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
+ action="store_true", default=False,
+ help="Ignore instance policy violations")
+
+RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
+ help="Sets the instance's runtime memory,"
+ " ballooning it up or down to the new value",
+ default=None, type="unit", metavar="<size>")
+
+ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
+ action="store_true", default=False,
+ help="Marks the grow as absolute instead of the"
+ " (default) relative mode")
+
+NETWORK_OPT = cli_option("--network",
+ action="store", default=None, dest="network",
+ help="IP network in CIDR notation")
+
+GATEWAY_OPT = cli_option("--gateway",
+ action="store", default=None, dest="gateway",
+ help="IP address of the router (gateway)")
+
+ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
+ action="store", default=None,
+ dest="add_reserved_ips",
+ help="Comma-separated list of"
+ " reserved IPs to add")
+
+REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
+ action="store", default=None,
+ dest="remove_reserved_ips",
+ help="Comma-delimited list of"
+ " reserved IPs to remove")
+
+NETWORK6_OPT = cli_option("--network6",
+ action="store", default=None, dest="network6",
+ help="IP network in CIDR notation")
+
+GATEWAY6_OPT = cli_option("--gateway6",
+ action="store", default=None, dest="gateway6",
+ help="IP6 address of the router (gateway)")
+
+NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
+ dest="conflicts_check",
+ default=True,
+ action="store_false",
+ help="Don't check for conflicting IPs")
+
+INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
+ default=False, action="store_true",
+ help="Include default values")
+
+HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
+ action="store_true", default=False,
+ help="Try to hotplug device")
#: Options provided by all commands
-COMMON_OPTS = [DEBUG_OPT]
+COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
+
+# common options for creating instances. add and import then add their own
+# specific ones.
+COMMON_CREATE_OPTS = [
+ BACKEND_OPT,
+ DISK_OPT,
+ DISK_TEMPLATE_OPT,
+ FILESTORE_DIR_OPT,
+ FILESTORE_DRIVER_OPT,
+ HYPERVISOR_OPT,
+ IALLOCATOR_OPT,
+ NET_OPT,
+ NODE_PLACEMENT_OPT,
+ NOIPCHECK_OPT,
+ NOCONFLICTSCHECK_OPT,
+ NONAMECHECK_OPT,
+ NONICS_OPT,
+ NWSYNC_OPT,
+ OSPARAMS_OPT,
+ OS_SIZE_OPT,
+ SUBMIT_OPT,
+ TAG_ADD_OPT,
+ DRY_RUN_OPT,
+ PRIORITY_OPT,
+ ]
+
+# common instance policy options
+INSTANCE_POLICY_OPTS = [
+ IPOLICY_BOUNDS_SPECS_OPT,
+ IPOLICY_DISK_TEMPLATES,
+ IPOLICY_VCPU_RATIO,
+ IPOLICY_SPINDLE_RATIO,
+ ]
+
+# instance policy split specs options
+SPLIT_ISPECS_OPTS = [
+ SPECS_CPU_COUNT_OPT,
+ SPECS_DISK_COUNT_OPT,
+ SPECS_DISK_SIZE_OPT,
+ SPECS_MEM_SIZE_OPT,
+ SPECS_NIC_COUNT_OPT,
+ ]
+
+
+class _ShowUsage(Exception):
+ """Exception class for L{_ParseArgs}.
+
+ """
+ def __init__(self, exit_error):
+ """Initializes instances of this class.
+
+ @type exit_error: bool
+ @param exit_error: Whether to report failure on exit
+
+ """
+ Exception.__init__(self)
+ self.exit_error = exit_error
+
+
+class _ShowVersion(Exception):
+ """Exception class for L{_ParseArgs}.
+
+ """
-def _ParseArgs(argv, commands, aliases):
+def _ParseArgs(binary, argv, commands, aliases, env_override):
"""Parser for the command line arguments.
This function parses the arguments and returns the function which
must be executed together with its (modified) arguments.
- @param argv: the command line
- @param commands: dictionary with special contents, see the design
- doc for cmdline handling
- @param aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param binary: Script name
+ @param argv: Command line arguments
+ @param commands: Dictionary containing command definitions
+ @param aliases: dictionary with command aliases {"alias": "target", ...}
+ @param env_override: list of env variables allowed for default args
+ @raise _ShowUsage: If usage description should be shown
+ @raise _ShowVersion: If version should be shown
"""
- if len(argv) == 0:
- binary = "<command>"
- else:
- binary = argv[0].split("/")[-1]
+ assert not (env_override - set(commands))
+ assert not (set(aliases.keys()) & set(commands.keys()))
- if len(argv) > 1 and argv[1] == "--version":
- ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
- constants.RELEASE_VERSION)
- # Quit right away. That way we don't have to care about this special
- # argument. optparse.py does it the same.
- sys.exit(0)
-
- if len(argv) < 2 or not (argv[1] in commands or
- argv[1] in aliases):
- # let's do a nice thing
- sortedcmds = commands.keys()
- sortedcmds.sort()
-
- ToStdout("Usage: %s {command} [options...] [argument...]", binary)
- ToStdout("%s <command> --help to see details, or man %s", binary, binary)
- ToStdout("")
-
- # compute the max line length for cmd + usage
- mlen = max([len(" %s" % cmd) for cmd in commands])
- mlen = min(60, mlen) # should not get here...
-
- # and format a nice command list
- ToStdout("Commands:")
- for cmd in sortedcmds:
- cmdstr = " %s" % (cmd,)
- help_text = commands[cmd][4]
- help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
- ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
- for line in help_lines:
- ToStdout("%-*s %s", mlen, "", line)
-
- ToStdout("")
+ if len(argv) > 1:
+ cmd = argv[1]
+ else:
+ # No option or command given
+ raise _ShowUsage(exit_error=True)
- return None, None, None
+ if cmd == "--version":
+ raise _ShowVersion()
+ elif cmd == "--help":
+ raise _ShowUsage(exit_error=False)
+ elif not (cmd in commands or cmd in aliases):
+ raise _ShowUsage(exit_error=True)
# get command, unalias it, and look it up in commands
- cmd = argv.pop(1)
if cmd in aliases:
- if cmd in commands:
- raise errors.ProgrammerError("Alias '%s' overrides an existing"
- " command" % cmd)
-
if aliases[cmd] not in commands:
raise errors.ProgrammerError("Alias '%s' maps to non-existing"
" command '%s'" % (cmd, aliases[cmd]))
cmd = aliases[cmd]
+ if cmd in env_override:
+ args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
+ env_args = os.environ.get(args_env_name)
+ if env_args:
+ argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
+
func, args_def, parser_opts, usage, description = commands[cmd]
parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
description=description,
formatter=TitledHelpFormatter(),
usage="%%prog %s %s" % (cmd, usage))
parser.disable_interspersed_args()
- options, args = parser.parse_args()
+ options, args = parser.parse_args(args=argv[2:])
if not _CheckArguments(cmd, args_def, args):
return None, None, None
return func, options, args
+def _FormatUsage(binary, commands):
+ """Generates a nice description of all commands.
+
+ @param binary: Script name
+ @param commands: Dictionary containing command definitions
+
+ """
+ # compute the max line length for cmd + usage
+ mlen = min(60, max(map(len, commands)))
+
+ yield "Usage: %s {command} [options...] [argument...]" % binary
+ yield "%s <command> --help to see details, or man %s" % (binary, binary)
+ yield ""
+ yield "Commands:"
+
+ # and format a nice command list
+ for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
+ help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
+ yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
+ for line in help_lines:
+ yield " %-*s %s" % (mlen, "", line)
+
+ yield ""
+
+
def _CheckArguments(cmd, args_def, args):
"""Verifies the arguments using the argument definition.
"""Splits the value of a --node option.
"""
- if value and ':' in value:
- return value.split(':', 1)
+ if value and ":" in value:
+ return value.split(":", 1)
else:
return (value, None)
"""
if os_variants:
- return ['%s+%s' % (os_name, v) for v in os_variants]
+ return ["%s+%s" % (os_name, v) for v in os_variants]
else:
return [os_name]
"""
if choices is None:
- choices = [('y', True, 'Perform the operation'),
- ('n', False, 'Do not perform the operation')]
+ choices = [("y", True, "Perform the operation"),
+ ("n", False, "Do not perform the operation")]
if not choices or not isinstance(choices, list):
raise errors.ProgrammerError("Invalid choices argument to AskUser")
for entry in choices:
- if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
+ if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
raise errors.ProgrammerError("Invalid choices element to AskUser")
answer = choices[-1][1]
try:
chars = [entry[0] for entry in choices]
chars[-1] = "[%s]" % chars[-1]
- chars.append('?')
+ chars.append("?")
maps = dict([(entry[0], entry[1]) for entry in choices])
while True:
f.write(text)
- f.write('\n')
+ f.write("\n")
f.write("/".join(chars))
f.write(": ")
line = f.readline(2).strip().lower()
if line in maps:
answer = maps[line]
break
- elif line == '?':
+ elif line == "?":
for entry in choices:
f.write(" %s - %s\n" % (entry[0], entry[2]))
f.write("\n")
ToStderr("Job %s is waiting in queue", job_id)
self.notified_queued = True
- elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
+ elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
ToStderr("Job %s is trying to acquire all necessary locks", job_id)
self.notified_waitlock = True
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
+def _InitReasonTrail(op, opts):
+ """Builds the first part of the reason trail
+
+ Builds the initial part of the reason trail, adding the user provided reason
+ (if it exists) and the name of the command starting the operation.
+
+ @param op: the opcode the reason trail will be added to
+ @param opts: the command line options selected by the user
+
+ """
+ assert len(sys.argv) >= 2
+ trail = []
+
+ if opts.reason:
+ trail.append((constants.OPCODE_REASON_SRC_USER,
+ opts.reason,
+ utils.EpochNano()))
+
+ binary = os.path.basename(sys.argv[0])
+ source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
+ command = sys.argv[1]
+ trail.append((source, command, utils.EpochNano()))
+ op.reason = trail
+
+
def SetGenericOpcodeOpts(opcode_list, options):
"""Processor for generic options.
if hasattr(options, "dry_run"):
op.dry_run = options.dry_run
if getattr(options, "priority", None) is not None:
- op.priority = _PRIONAME_TO_VALUE[options.priority]
+ op.priority = options.priority
+ _InitReasonTrail(op, options)
+
+
+def GetClient(query=False):
+ """Connects to the a luxi socket and returns a client.
+ @type query: boolean
+ @param query: this signifies that the client will only be
+ used for queries; if the build-time parameter
+ enable-split-queries is enabled, then the client will be
+ connected to the query socket instead of the masterd socket
-def GetClient():
+ """
+ override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
+ if override_socket:
+ if override_socket == constants.LUXI_OVERRIDE_MASTER:
+ address = pathutils.MASTER_SOCKET
+ elif override_socket == constants.LUXI_OVERRIDE_QUERY:
+ address = pathutils.QUERY_SOCKET
+ else:
+ address = override_socket
+ elif query and constants.ENABLE_SPLIT_QUERY:
+ address = pathutils.QUERY_SOCKET
+ else:
+ address = None
# TODO: Cache object?
try:
- client = luxi.Client()
+ client = luxi.Client(address=address)
except luxi.NoMasterError:
ss = ssconf.SimpleStore()
ss.GetMasterNode()
except errors.ConfigurationError:
raise errors.OpPrereqError("Cluster not initialized or this machine is"
- " not part of a cluster")
+ " not part of a cluster",
+ errors.ECODE_INVAL)
master, myself = ssconf.GetMasterAndMyself(ss=ss)
if master != myself:
raise errors.OpPrereqError("This is not the master node, please connect"
" to node '%s' and rerun the command" %
- master)
+ master, errors.ECODE_INVAL)
raise
return client
elif isinstance(err, errors.OpPrereqError):
if len(err.args) == 2:
obuf.write("Failure: prerequisites not met for this"
- " operation:\nerror type: %s, error details:\n%s" %
+ " operation:\nerror type: %s, error details:\n%s" %
(err.args[1], err.args[0]))
else:
obuf.write("Failure: prerequisites not met for this"
elif isinstance(err, errors.ParameterError):
obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
elif isinstance(err, luxi.NoMasterError):
- obuf.write("Cannot communicate with the master daemon.\nIs it running"
- " and listening for connections?")
+ if err.args[0] == pathutils.MASTER_SOCKET:
+ daemon = "the master daemon"
+ elif err.args[0] == pathutils.QUERY_SOCKET:
+ daemon = "the config daemon"
+ else:
+ daemon = "socket '%s'" % str(err.args[0])
+ obuf.write("Cannot communicate with %s.\nIs the process running"
+ " and listening for connections?" % daemon)
elif isinstance(err, luxi.TimeoutError):
- obuf.write("Timeout while talking to the master daemon. Error:\n"
- "%s" % msg)
+ obuf.write("Timeout while talking to the master daemon. Jobs might have"
+ " been submitted and will continue to run even if the call"
+ " timed out. Useful commands in this situation are \"gnt-job"
+ " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
+ obuf.write(msg)
elif isinstance(err, luxi.PermissionError):
obuf.write("It seems you don't have permissions to connect to the"
" master daemon.\nPlease retry as a different user.")
"%s" % msg)
elif isinstance(err, errors.JobLost):
obuf.write("Error checking job status: %s" % msg)
+ elif isinstance(err, errors.QueryFilterParseError):
+ obuf.write("Error while parsing query filter: %s\n" % err.args[0])
+ obuf.write("\n".join(err.GetDetails()))
elif isinstance(err, errors.GenericError):
obuf.write("Unhandled Ganeti error: %s" % msg)
elif isinstance(err, JobSubmittedException):
retcode = 0
else:
obuf.write("Unhandled exception: %s" % msg)
- return retcode, obuf.getvalue().rstrip('\n')
+ return retcode, obuf.getvalue().rstrip("\n")
-def GenericMain(commands, override=None, aliases=None):
+def GenericMain(commands, override=None, aliases=None,
+ env_override=frozenset()):
"""Generic main function for all the gnt-* commands.
- Arguments:
- - commands: a dictionary with a special structure, see the design doc
- for command line handling.
- - override: if not None, we expect a dictionary with keys that will
- override command line options; this can be used to pass
- options from the scripts to generic functions
- - aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param commands: a dictionary with a special structure, see the design doc
+ for command line handling.
+ @param override: if not None, we expect a dictionary with keys that will
+ override command line options; this can be used to pass
+ options from the scripts to generic functions
+ @param aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param env_override: list of environment names which are allowed to submit
+ default args for commands
"""
# save the program name and the entire command line for later logging
if sys.argv:
- binary = os.path.basename(sys.argv[0]) or sys.argv[0]
+ binary = os.path.basename(sys.argv[0])
+ if not binary:
+ binary = sys.argv[0]
+
if len(sys.argv) >= 2:
- binary += " " + sys.argv[1]
- old_cmdline = " ".join(sys.argv[2:])
+ logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
else:
- old_cmdline = ""
+ logname = binary
+
+ cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
else:
binary = "<unknown program>"
- old_cmdline = ""
+ cmdline = "<unknown>"
if aliases is None:
aliases = {}
try:
- func, options, args = _ParseArgs(sys.argv, commands, aliases)
+ (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
+ env_override)
+ except _ShowVersion:
+ ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
+ constants.RELEASE_VERSION)
+ return constants.EXIT_SUCCESS
+ except _ShowUsage, err:
+ for line in _FormatUsage(binary, commands):
+ ToStdout(line)
+
+ if err.exit_error:
+ return constants.EXIT_FAILURE
+ else:
+ return constants.EXIT_SUCCESS
except errors.ParameterError, err:
result, err_msg = FormatError(err)
ToStderr(err_msg)
for key, val in override.iteritems():
setattr(options, key, val)
- utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
- stderr_logging=True, program=binary)
+ utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
+ stderr_logging=True)
- if old_cmdline:
- logging.info("run with arguments '%s'", old_cmdline)
- else:
- logging.info("run with no arguments")
+ logging.info("Command line: %s", cmdline)
try:
result = func(options, args)
result, err_msg = FormatError(err)
logging.exception("Error during command processing")
ToStderr(err_msg)
+ except KeyboardInterrupt:
+ result = constants.EXIT_FAILURE
+ ToStderr("Aborted. Note that if the operation created any jobs, they"
+ " might have been submitted and"
+ " will continue to run in the background.")
+ except IOError, err:
+ if err.errno == errno.EPIPE:
+ # our terminal went away, we'll exit
+ sys.exit(constants.EXIT_FAILURE)
+ else:
+ raise
return result
try:
nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
+ raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
+ errors.ECODE_INVAL)
nics = [{}] * nic_max
for nidx, ndict in optvalue:
if not isinstance(ndict, dict):
raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
- " got %s" % (nidx, ndict))
+ " got %s" % (nidx, ndict), errors.ECODE_INVAL)
utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
if opts.disk_template == constants.DT_DISKLESS:
if opts.disks or opts.sd_size is not None:
raise errors.OpPrereqError("Diskless instance but disk"
- " information passed")
+ " information passed", errors.ECODE_INVAL)
disks = []
else:
if (not opts.disks and not opts.sd_size
and mode == constants.INSTANCE_CREATE):
- raise errors.OpPrereqError("No disk information specified")
+ raise errors.OpPrereqError("No disk information specified",
+ errors.ECODE_INVAL)
if opts.disks and opts.sd_size is not None:
raise errors.OpPrereqError("Please use either the '--disk' or"
- " '-s' option")
+ " '-s' option", errors.ECODE_INVAL)
if opts.sd_size is not None:
- opts.disks = [(0, {"size": opts.sd_size})]
+ opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
if opts.disks:
try:
disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
except ValueError, err:
- raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
+ raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
+ errors.ECODE_INVAL)
disks = [{}] * disk_max
else:
disks = []
didx = int(didx)
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
- raise errors.OpPrereqError(msg)
- elif "size" in ddict:
- if "adopt" in ddict:
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+ elif constants.IDISK_SIZE in ddict:
+ if constants.IDISK_ADOPT in ddict:
raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
- " (disk %d)" % didx)
+ " (disk %d)" % didx, errors.ECODE_INVAL)
try:
- ddict["size"] = utils.ParseUnit(ddict["size"])
+ ddict[constants.IDISK_SIZE] = \
+ utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
- (didx, err))
- elif "adopt" in ddict:
+ (didx, err), errors.ECODE_INVAL)
+ elif constants.IDISK_ADOPT in ddict:
if mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for instance"
- " import")
- ddict["size"] = 0
+ " import", errors.ECODE_INVAL)
+ ddict[constants.IDISK_SIZE] = 0
else:
raise errors.OpPrereqError("Missing size or adoption source for"
- " disk %d" % didx)
+ " disk %d" % didx, errors.ECODE_INVAL)
disks[didx] = ddict
- utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
+ if opts.tags is not None:
+ tags = opts.tags.split(",")
+ else:
+ tags = []
+
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
if mode == constants.INSTANCE_CREATE:
else:
raise errors.ProgrammerError("Invalid creation mode %s" % mode)
- op = opcodes.OpCreateInstance(instance_name=instance,
+ op = opcodes.OpInstanceCreate(instance_name=instance,
disks=disks,
disk_template=opts.disk_template,
nics=nics,
+ conflicts_check=opts.conflicts_check,
pnode=pnode, snode=snode,
ip_check=opts.ip_check,
name_check=opts.name_check,
force_variant=force_variant,
src_node=src_node,
src_path=src_path,
+ tags=tags,
no_install=no_install,
- identify_defaults=identify_defaults)
+ identify_defaults=identify_defaults,
+ ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts)
return 0
# No need to use SSH
result = utils.RunCmd(cmd)
else:
- result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
+ result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
+ utils.ShellQuoteArgs(cmd))
if result.failed:
errmsg = ["Failed to run command %s" % result.cmd]
"""
# Pause watcher by acquiring an exclusive lock on watcher state file
self.feedback_fn("Blocking watcher")
- watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
+ watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
try:
# TODO: Currently, this just blocks. There's no timeout.
# TODO: Should it be a shared lock?
# Stop master daemons, so that no new jobs can come in and all running
# ones are finished
self.feedback_fn("Stopping master daemons")
- self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
+ self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
try:
# Stop daemons on all nodes
for node_name in self.online_nodes:
self.feedback_fn("Stopping daemons on %s" % node_name)
- self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
+ self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
# All daemons are shut down now
try:
# Start cluster again, master node last
for node_name in self.nonmaster_nodes + [self.master_node]:
self.feedback_fn("Starting daemons on %s" % node_name)
- self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
+ self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
finally:
# Resume watcher
watcher_block.Close()
if unitfields is None:
unitfields = []
- numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
- unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
+ numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
+ unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
format_fields = []
for field in fields:
if separator is None:
mlens = [0 for name in fields]
- format_str = ' '.join(format_fields)
+ format_str = " ".join(format_fields)
else:
format_str = separator.replace("%", "%%").join(format_fields)
for line in data:
args = []
if line is None:
- line = ['-' for _ in fields]
+ line = ["-" for _ in fields]
for idx in range(len(fields)):
if separator is None:
args.append(mlens[idx])
return result
+def _FormatBool(value):
+ """Formats a boolean value as a string.
+
+ """
+ if value:
+ return "Y"
+ return "N"
+
+
+#: Default formatting for query results; (callback, align right)
+_DEFAULT_FORMAT_QUERY = {
+ constants.QFT_TEXT: (str, False),
+ constants.QFT_BOOL: (_FormatBool, False),
+ constants.QFT_NUMBER: (str, True),
+ constants.QFT_TIMESTAMP: (utils.FormatTime, False),
+ constants.QFT_OTHER: (str, False),
+ constants.QFT_UNKNOWN: (str, False),
+ }
+
+
+def _GetColumnFormatter(fdef, override, unit):
+ """Returns formatting function for a field.
+
+ @type fdef: L{objects.QueryFieldDefinition}
+ @type override: dict
+ @param override: Dictionary for overriding field formatting functions,
+ indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
+ @type unit: string
+ @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
+ @rtype: tuple; (callable, bool)
+ @return: Returns the function to format a value (takes one parameter) and a
+ boolean for aligning the value on the right-hand side
+
+ """
+ fmt = override.get(fdef.name, None)
+ if fmt is not None:
+ return fmt
+
+ assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
+
+ if fdef.kind == constants.QFT_UNIT:
+ # Can't keep this information in the static dictionary
+ return (lambda value: utils.FormatUnit(value, unit), True)
+
+ fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
+ if fmt is not None:
+ return fmt
+
+ raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
+
+
+class _QueryColumnFormatter:
+ """Callable class for formatting fields of a query.
+
+ """
+ def __init__(self, fn, status_fn, verbose):
+ """Initializes this class.
+
+ @type fn: callable
+ @param fn: Formatting function
+ @type status_fn: callable
+ @param status_fn: Function to report fields' status
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
+
+ """
+ self._fn = fn
+ self._status_fn = status_fn
+ self._verbose = verbose
+
+ def __call__(self, data):
+ """Returns a field's string representation.
+
+ """
+ (status, value) = data
+
+ # Report status
+ self._status_fn(status)
+
+ if status == constants.RS_NORMAL:
+ return self._fn(value)
+
+ assert value is None, \
+ "Found value %r for abnormal status %s" % (value, status)
+
+ return FormatResultError(status, self._verbose)
+
+
+def FormatResultError(status, verbose):
+ """Formats result status other than L{constants.RS_NORMAL}.
+
+ @param status: The result status
+ @type verbose: boolean
+ @param verbose: Whether to return the verbose text
+ @return: Text of result status
+
+ """
+ assert status != constants.RS_NORMAL, \
+ "FormatResultError called with status equal to constants.RS_NORMAL"
+ try:
+ (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
+ except KeyError:
+ raise NotImplementedError("Unknown status %s" % status)
+ else:
+ if verbose:
+ return verbose_text
+ return normal_text
+
+
+def FormatQueryResult(result, unit=None, format_override=None, separator=None,
+ header=False, verbose=False):
+ """Formats data in L{objects.QueryResponse}.
+
+ @type result: L{objects.QueryResponse}
+ @param result: result of query operation
+ @type unit: string
+ @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
+ see L{utils.text.FormatUnit}
+ @type format_override: dict
+ @param format_override: Dictionary for overriding field formatting functions,
+ indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
+ @type separator: string or None
+ @param separator: String used to separate fields
+ @type header: bool
+ @param header: Whether to output header row
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
+
+ """
+ if unit is None:
+ if separator:
+ unit = "m"
+ else:
+ unit = "h"
+
+ if format_override is None:
+ format_override = {}
+
+ stats = dict.fromkeys(constants.RS_ALL, 0)
+
+ def _RecordStatus(status):
+ if status in stats:
+ stats[status] += 1
+
+ columns = []
+ for fdef in result.fields:
+ assert fdef.title and fdef.name
+ (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
+ columns.append(TableColumn(fdef.title,
+ _QueryColumnFormatter(fn, _RecordStatus,
+ verbose),
+ align_right))
+
+ table = FormatTable(result.data, columns, header, separator)
+
+ # Collect statistics
+ assert len(stats) == len(constants.RS_ALL)
+ assert compat.all(count >= 0 for count in stats.values())
+
+ # Determine overall status. If there was no data, unknown fields must be
+ # detected via the field definitions.
+ if (stats[constants.RS_UNKNOWN] or
+ (not result.data and _GetUnknownFields(result.fields))):
+ status = QR_UNKNOWN
+ elif compat.any(count > 0 for key, count in stats.items()
+ if key != constants.RS_NORMAL):
+ status = QR_INCOMPLETE
+ else:
+ status = QR_NORMAL
+
+ return (status, table)
+
+
+def _GetUnknownFields(fdefs):
+ """Returns list of unknown fields included in C{fdefs}.
+
+ @type fdefs: list of L{objects.QueryFieldDefinition}
+
+ """
+ return [fdef for fdef in fdefs
+ if fdef.kind == constants.QFT_UNKNOWN]
+
+
+def _WarnUnknownFields(fdefs):
+ """Prints a warning to stderr if a query included unknown fields.
+
+ @type fdefs: list of L{objects.QueryFieldDefinition}
+
+ """
+ unknown = _GetUnknownFields(fdefs)
+ if unknown:
+ ToStderr("Warning: Queried for unknown fields %s",
+ utils.CommaJoin(fdef.name for fdef in unknown))
+ return True
+
+ return False
+
+
+def GenericList(resource, fields, names, unit, separator, header, cl=None,
+ format_override=None, verbose=False, force_filter=False,
+ namefield=None, qfilter=None, isnumeric=False):
+ """Generic implementation for listing all items of a resource.
+
+ @param resource: One of L{constants.QR_VIA_LUXI}
+ @type fields: list of strings
+ @param fields: List of fields to query for
+ @type names: list of strings
+ @param names: Names of items to query for
+ @type unit: string or None
+ @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
+ None for automatic choice (human-readable for non-separator usage,
+ otherwise megabytes); this is a one-letter string
+ @type separator: string or None
+ @param separator: String used to separate fields
+ @type header: bool
+ @param header: Whether to show header row
+ @type force_filter: bool
+ @param force_filter: Whether to always treat names as filter
+ @type format_override: dict
+ @param format_override: Dictionary for overriding field formatting functions,
+ indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
+ @type namefield: string
+ @param namefield: Name of field to use for simple filters (see
+ L{qlang.MakeFilter} for details)
+ @type qfilter: list or None
+ @param qfilter: Query filter (in addition to names)
+ @param isnumeric: bool
+ @param isnumeric: Whether the namefield's type is numeric, and therefore
+ any simple filters built by namefield should use integer values to
+ reflect that
+
+ """
+ if not names:
+ names = None
+
+ namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
+ isnumeric=isnumeric)
+
+ if qfilter is None:
+ qfilter = namefilter
+ elif namefilter is not None:
+ qfilter = [qlang.OP_AND, namefilter, qfilter]
+
+ if cl is None:
+ cl = GetClient()
+
+ response = cl.Query(resource, fields, qfilter)
+
+ found_unknown = _WarnUnknownFields(response.fields)
+
+ (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
+ header=header,
+ format_override=format_override,
+ verbose=verbose)
+
+ for line in data:
+ ToStdout(line)
+
+ assert ((found_unknown and status == QR_UNKNOWN) or
+ (not found_unknown and status != QR_UNKNOWN))
+
+ if status == QR_UNKNOWN:
+ return constants.EXIT_UNKNOWN_FIELD
+
+ # TODO: Should the list command fail if not all data could be collected?
+ return constants.EXIT_SUCCESS
+
+
+def _FieldDescValues(fdef):
+ """Helper function for L{GenericListFields} to get query field description.
+
+ @type fdef: L{objects.QueryFieldDefinition}
+ @rtype: list
+
+ """
+ return [
+ fdef.name,
+ _QFT_NAMES.get(fdef.kind, fdef.kind),
+ fdef.title,
+ fdef.doc,
+ ]
+
+
+def GenericListFields(resource, fields, separator, header, cl=None):
+ """Generic implementation for listing fields for a resource.
+
+ @param resource: One of L{constants.QR_VIA_LUXI}
+ @type fields: list of strings
+ @param fields: List of fields to query for
+ @type separator: string or None
+ @param separator: String used to separate fields
+ @type header: bool
+ @param header: Whether to show header row
+
+ """
+ if cl is None:
+ cl = GetClient()
+
+ if not fields:
+ fields = None
+
+ response = cl.QueryFields(resource, fields)
+
+ found_unknown = _WarnUnknownFields(response.fields)
+
+ columns = [
+ TableColumn("Name", str, False),
+ TableColumn("Type", str, False),
+ TableColumn("Title", str, False),
+ TableColumn("Description", str, False),
+ ]
+
+ rows = map(_FieldDescValues, response.fields)
+
+ for line in FormatTable(rows, columns, header, separator):
+ ToStdout(line)
+
+ if found_unknown:
+ return constants.EXIT_UNKNOWN_FIELD
+
+ return constants.EXIT_SUCCESS
+
+
+class TableColumn:
+ """Describes a column for L{FormatTable}.
+
+ """
+ def __init__(self, title, fn, align_right):
+ """Initializes this class.
+
+ @type title: string
+ @param title: Column title
+ @type fn: callable
+ @param fn: Formatting function
+ @type align_right: bool
+ @param align_right: Whether to align values on the right-hand side
+
+ """
+ self.title = title
+ self.format = fn
+ self.align_right = align_right
+
+
+def _GetColFormatString(width, align_right):
+ """Returns the format string for a field.
+
+ """
+ if align_right:
+ sign = ""
+ else:
+ sign = "-"
+
+ return "%%%s%ss" % (sign, width)
+
+
+def FormatTable(rows, columns, header, separator):
+ """Formats data as a table.
+
+ @type rows: list of lists
+ @param rows: Row data, one list per row
+ @type columns: list of L{TableColumn}
+ @param columns: Column descriptions
+ @type header: bool
+ @param header: Whether to show header row
+ @type separator: string or None
+ @param separator: String used to separate columns
+
+ """
+ if header:
+ data = [[col.title for col in columns]]
+ colwidth = [len(col.title) for col in columns]
+ else:
+ data = []
+ colwidth = [0 for _ in columns]
+
+ # Format row data
+ for row in rows:
+ assert len(row) == len(columns)
+
+ formatted = [col.format(value) for value, col in zip(row, columns)]
+
+ if separator is None:
+ # Update column widths
+ for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
+ # Modifying a list's items while iterating is fine
+ colwidth[idx] = max(oldwidth, len(value))
+
+ data.append(formatted)
+
+ if separator is not None:
+ # Return early if a separator is used
+ return [separator.join(row) for row in data]
+
+ if columns and not columns[-1].align_right:
+ # Avoid unnecessary spaces at end of line
+ colwidth[-1] = 0
+
+ # Build format string
+ fmt = " ".join([_GetColFormatString(width, col.align_right)
+ for col, width in zip(columns, colwidth)])
+
+ return [fmt % tuple(row) for row in data]
+
+
def FormatTimestamp(ts):
"""Formats a given timestamp.
@return: a string with the formatted timestamp
"""
- if not isinstance (ts, (tuple, list)) or len(ts) != 2:
- return '?'
- sec, usec = ts
- return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
+ if not isinstance(ts, (tuple, list)) or len(ts) != 2:
+ return "?"
+
+ (sec, usecs) = ts
+ return utils.FormatTime(sec, usecs=usecs)
def ParseTimespec(value):
"""
value = str(value)
if not value:
- raise errors.OpPrereqError("Empty time specification passed")
+ raise errors.OpPrereqError("Empty time specification passed",
+ errors.ECODE_INVAL)
suffix_map = {
- 's': 1,
- 'm': 60,
- 'h': 3600,
- 'd': 86400,
- 'w': 604800,
+ "s": 1,
+ "m": 60,
+ "h": 3600,
+ "d": 86400,
+ "w": 604800,
}
if value[-1] not in suffix_map:
try:
value = int(value)
except (TypeError, ValueError):
- raise errors.OpPrereqError("Invalid time specification '%s'" % value)
+ raise errors.OpPrereqError("Invalid time specification '%s'" % value,
+ errors.ECODE_INVAL)
else:
multiplier = suffix_map[value[-1]]
value = value[:-1]
if not value: # no data left after stripping the suffix
raise errors.OpPrereqError("Invalid time specification (only"
- " suffix passed)")
+ " suffix passed)", errors.ECODE_INVAL)
try:
value = int(value) * multiplier
except (TypeError, ValueError):
- raise errors.OpPrereqError("Invalid time specification '%s'" % value)
+ raise errors.OpPrereqError("Invalid time specification '%s'" % value,
+ errors.ECODE_INVAL)
return value
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
- filter_master=False):
+ filter_master=False, nodegroup=None):
"""Returns the names of online nodes.
This function will also log a warning on stderr with the names of
@param filter_master: if True, do not return the master node in the list
(useful in coordination with secondary_ips where we cannot check our
node name against the list)
+ @type nodegroup: string
+ @param nodegroup: If set, only return nodes in this node group
"""
if cl is None:
cl = GetClient()
- if secondary_ips:
- name_idx = 2
- else:
- name_idx = 0
+ qfilter = []
+
+ if nodes:
+ qfilter.append(qlang.MakeSimpleFilter("name", nodes))
+
+ if nodegroup is not None:
+ qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
+ [qlang.OP_EQUAL, "group.uuid", nodegroup]])
if filter_master:
- master_node = cl.QueryConfigValues(["master_node"])[0]
- filter_fn = lambda x: x != master_node
+ qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
+
+ if qfilter:
+ if len(qfilter) > 1:
+ final_filter = [qlang.OP_AND] + qfilter
+ else:
+ assert len(qfilter) == 1
+ final_filter = qfilter[0]
else:
- filter_fn = lambda _: True
+ final_filter = None
+
+ result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
+
+ def _IsOffline(row):
+ (_, (_, offline), _) = row
+ return offline
+
+ def _GetName(row):
+ ((_, name), _, _) = row
+ return name
+
+ def _GetSip(row):
+ (_, _, (_, sip)) = row
+ return sip
+
+ (offline, online) = compat.partition(result.data, _IsOffline)
- result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
- use_locking=False)
- offline = [row[0] for row in result if row[1]]
if offline and not nowarn:
- ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
- return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
+ ToStderr("Note: skipping offline node(s): %s" %
+ utils.CommaJoin(map(_GetName, offline)))
+
+ if secondary_ips:
+ fn = _GetSip
+ else:
+ fn = _GetName
+
+ return map(fn, online)
def _ToStream(stream, txt, *args):
@param txt: the message
"""
- if args:
- args = tuple(args)
- stream.write(txt % args)
- else:
- stream.write(txt)
- stream.write('\n')
- stream.flush()
+ try:
+ if args:
+ args = tuple(args)
+ stream.write(txt % args)
+ else:
+ stream.write(txt)
+ stream.write("\n")
+ stream.flush()
+ except IOError, err:
+ if err.errno == errno.EPIPE:
+ # our terminal went away, we'll exit
+ sys.exit(constants.EXIT_FAILURE)
+ else:
+ raise
def ToStdout(txt, *args):
self.jobs = []
self.opts = opts
self.feedback_fn = feedback_fn
+ self._counter = itertools.count()
+
+ @staticmethod
+ def _IfName(name, fmt):
+ """Helper function for formatting name.
+
+ """
+ if name:
+ return fmt % name
+
+ return ""
def QueueJob(self, name, *ops):
"""Record a job for later submit.
@type name: string
@param name: a description of the job, will be used in WaitJobSet
+
"""
SetGenericOpcodeOpts(ops, self.opts)
- self.queue.append((name, ops))
+ self.queue.append((self._counter.next(), name, ops))
+
+ def AddJobId(self, name, status, job_id):
+ """Adds a job ID to the internal queue.
+
+ """
+ self.jobs.append((self._counter.next(), status, job_id, name))
def SubmitPending(self, each=False):
"""Submit all pending jobs.
"""
if each:
results = []
- for row in self.queue:
+ for (_, _, ops) in self.queue:
# SubmitJob will remove the success status, but raise an exception if
# the submission fails, so we'll notice that anyway.
- results.append([True, self.cl.SubmitJob(row[1])])
+ results.append([True, self.cl.SubmitJob(ops)[0]])
else:
- results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
- for (idx, ((status, data), (name, _))) in enumerate(zip(results,
- self.queue)):
+ results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
+ for ((status, data), (idx, name, _)) in zip(results, self.queue):
self.jobs.append((idx, status, data, name))
def _ChooseJob(self):
"""
assert self.jobs, "_ChooseJob called with empty job list"
- result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
+ result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
+ ["status"])
assert result
for job_data, status in zip(self.jobs, result):
if (isinstance(status, list) and status and
status[0] in (constants.JOB_STATUS_QUEUED,
- constants.JOB_STATUS_WAITLOCK,
+ constants.JOB_STATUS_WAITING,
constants.JOB_STATUS_CANCELING)):
# job is still present and waiting
continue
# first, remove any non-submitted jobs
self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
for idx, _, jid, name in failures:
- ToStderr("Failed to submit job for %s: %s", name, jid)
+ ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
results.append((idx, False, jid))
while self.jobs:
(idx, _, jid, name) = self._ChooseJob()
- ToStdout("Waiting for job %s for %s...", jid, name)
+ ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
try:
job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
success = True
except errors.JobLost, err:
_, job_result = FormatError(err)
- ToStderr("Job %s for %s has been archived, cannot check its result",
- jid, name)
+ ToStderr("Job %s%s has been archived, cannot check its result",
+ jid, self._IfName(name, " for %s"))
success = False
except (errors.GenericError, luxi.ProtocolError), err:
_, job_result = FormatError(err)
success = False
# the error message will always be shown, verbose or not
- ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
+ ToStderr("Job %s%s has failed: %s",
+ jid, self._IfName(name, " for %s"), job_result)
results.append((idx, success, job_result))
else:
ToStderr("Failure for %s: %s", name, result)
return [row[1:3] for row in self.jobs]
+
+
+def FormatParamsDictInfo(param_dict, actual):
+ """Formats a parameter dictionary.
+
+ @type param_dict: dict
+ @param param_dict: the own parameters
+ @type actual: dict
+ @param actual: the current parameter set (including defaults)
+ @rtype: dict
+ @return: dictionary where the value of each parameter is either a fully
+ formatted string or a dictionary containing formatted strings
+
+ """
+ ret = {}
+ for (key, data) in actual.items():
+ if isinstance(data, dict) and data:
+ ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
+ else:
+ ret[key] = str(param_dict.get(key, "default (%s)" % data))
+ return ret
+
+
+def _FormatListInfoDefault(data, def_data):
+ if data is not None:
+ ret = utils.CommaJoin(data)
+ else:
+ ret = "default (%s)" % utils.CommaJoin(def_data)
+ return ret
+
+
+def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
+ """Formats an instance policy.
+
+ @type custom_ipolicy: dict
+ @param custom_ipolicy: own policy
+ @type eff_ipolicy: dict
+ @param eff_ipolicy: effective policy (including defaults); ignored for
+ cluster
+ @type iscluster: bool
+ @param iscluster: the policy is at cluster level
+ @rtype: list of pairs
+ @return: formatted data, suitable for L{PrintGenericInfo}
+
+ """
+ if iscluster:
+ eff_ipolicy = custom_ipolicy
+
+ minmax_out = []
+ custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
+ if custom_minmax:
+ for (k, minmax) in enumerate(custom_minmax):
+ minmax_out.append([
+ ("%s/%s" % (key, k),
+ FormatParamsDictInfo(minmax[key], minmax[key]))
+ for key in constants.ISPECS_MINMAX_KEYS
+ ])
+ else:
+ for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
+ minmax_out.append([
+ ("%s/%s" % (key, k),
+ FormatParamsDictInfo({}, minmax[key]))
+ for key in constants.ISPECS_MINMAX_KEYS
+ ])
+ ret = [("bounds specs", minmax_out)]
+
+ if iscluster:
+ stdspecs = custom_ipolicy[constants.ISPECS_STD]
+ ret.append(
+ (constants.ISPECS_STD,
+ FormatParamsDictInfo(stdspecs, stdspecs))
+ )
+
+ ret.append(
+ ("allowed disk templates",
+ _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
+ eff_ipolicy[constants.IPOLICY_DTS]))
+ )
+ ret.extend([
+ (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
+ for key in constants.IPOLICY_PARAMETERS
+ ])
+ return ret
+
+
+def _PrintSpecsParameters(buf, specs):
+ values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
+ buf.write(",".join(values))
+
+
+def PrintIPolicyCommand(buf, ipolicy, isgroup):
+ """Print the command option used to generate the given instance policy.
+
+ Currently only the parts dealing with specs are supported.
+
+ @type buf: StringIO
+ @param buf: stream to write into
+ @type ipolicy: dict
+ @param ipolicy: instance policy
+ @type isgroup: bool
+ @param isgroup: whether the policy is at group level
+
+ """
+ if not isgroup:
+ stdspecs = ipolicy.get("std")
+ if stdspecs:
+ buf.write(" %s " % IPOLICY_STD_SPECS_STR)
+ _PrintSpecsParameters(buf, stdspecs)
+ minmaxes = ipolicy.get("minmax", [])
+ first = True
+ for minmax in minmaxes:
+ minspecs = minmax.get("min")
+ maxspecs = minmax.get("max")
+ if minspecs and maxspecs:
+ if first:
+ buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
+ first = False
+ else:
+ buf.write("//")
+ buf.write("min:")
+ _PrintSpecsParameters(buf, minspecs)
+ buf.write("/max:")
+ _PrintSpecsParameters(buf, maxspecs)
+
+
+def ConfirmOperation(names, list_type, text, extra=""):
+ """Ask the user to confirm an operation on a list of list_type.
+
+ This function is used to request confirmation for doing an operation
+ on a given list of list_type.
+
+ @type names: list
+ @param names: the list of names that we display when
+ we ask for confirmation
+ @type list_type: str
+ @param list_type: Human readable name for elements in the list (e.g. nodes)
+ @type text: str
+ @param text: the operation that the user should confirm
+ @rtype: boolean
+ @return: True or False depending on user's confirmation.
+
+ """
+ count = len(names)
+ msg = ("The %s will operate on %d %s.\n%s"
+ "Do you want to continue?" % (text, count, list_type, extra))
+ affected = (("\nAffected %s:\n" % list_type) +
+ "\n".join([" %s" % name for name in names]))
+
+ choices = [("y", True, "Yes, execute the %s" % text),
+ ("n", False, "No, abort the %s" % text)]
+
+ if count > 20:
+ choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
+ question = msg
+ else:
+ question = msg + affected
+
+ choice = AskUser(question, choices)
+ if choice == "v":
+ choices.pop(1)
+ choice = AskUser(msg + affected, choices)
+ return choice
+
+
+def _MaybeParseUnit(elements):
+ """Parses and returns an array of potential values with units.
+
+ """
+ parsed = {}
+ for k, v in elements.items():
+ if v == constants.VALUE_DEFAULT:
+ parsed[k] = v
+ else:
+ parsed[k] = utils.ParseUnit(v)
+ return parsed
+
+
+def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
+ ispecs_disk_count, ispecs_disk_size,
+ ispecs_nic_count, group_ipolicy, fill_all):
+ try:
+ if ispecs_mem_size:
+ ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
+ if ispecs_disk_size:
+ ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
+ except (TypeError, ValueError, errors.UnitParseError), err:
+ raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
+ " in policy: %s" %
+ (ispecs_disk_size, ispecs_mem_size, err),
+ errors.ECODE_INVAL)
+
+ # prepare ipolicy dict
+ ispecs_transposed = {
+ constants.ISPEC_MEM_SIZE: ispecs_mem_size,
+ constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
+ constants.ISPEC_DISK_COUNT: ispecs_disk_count,
+ constants.ISPEC_DISK_SIZE: ispecs_disk_size,
+ constants.ISPEC_NIC_COUNT: ispecs_nic_count,
+ }
+
+ # first, check that the values given are correct
+ if group_ipolicy:
+ forced_type = TISPECS_GROUP_TYPES
+ else:
+ forced_type = TISPECS_CLUSTER_TYPES
+ for specs in ispecs_transposed.values():
+ assert type(specs) is dict
+ utils.ForceDictType(specs, forced_type)
+
+ # then transpose
+ ispecs = {
+ constants.ISPECS_MIN: {},
+ constants.ISPECS_MAX: {},
+ constants.ISPECS_STD: {},
+ }
+ for (name, specs) in ispecs_transposed.iteritems():
+ assert name in constants.ISPECS_PARAMETERS
+ for key, val in specs.items(): # {min: .. ,max: .., std: ..}
+ assert key in ispecs
+ ispecs[key][name] = val
+ minmax_out = {}
+ for key in constants.ISPECS_MINMAX_KEYS:
+ if fill_all:
+ minmax_out[key] = \
+ objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
+ else:
+ minmax_out[key] = ispecs[key]
+ ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
+ if fill_all:
+ ipolicy[constants.ISPECS_STD] = \
+ objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
+ ispecs[constants.ISPECS_STD])
+ else:
+ ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
+
+
+def _ParseSpecUnit(spec, keyname):
+ ret = spec.copy()
+ for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
+ if k in ret:
+ try:
+ ret[k] = utils.ParseUnit(ret[k])
+ except (TypeError, ValueError, errors.UnitParseError), err:
+ raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
+ " specs: %s" % (k, ret[k], keyname, err)),
+ errors.ECODE_INVAL)
+ return ret
+
+
+def _ParseISpec(spec, keyname, required):
+ ret = _ParseSpecUnit(spec, keyname)
+ utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
+ missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
+ if required and missing:
+ raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
+ (keyname, utils.CommaJoin(missing)),
+ errors.ECODE_INVAL)
+ return ret
+
+
+def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
+ ret = None
+ if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
+ len(minmax_ispecs[0]) == 1):
+ for (key, spec) in minmax_ispecs[0].items():
+ # This loop is executed exactly once
+ if key in allowed_values and not spec:
+ ret = key
+ return ret
+
+
+def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+ group_ipolicy, allowed_values):
+ found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
+ if found_allowed is not None:
+ ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
+ elif minmax_ispecs is not None:
+ minmax_out = []
+ for mmpair in minmax_ispecs:
+ mmpair_out = {}
+ for (key, spec) in mmpair.items():
+ if key not in constants.ISPECS_MINMAX_KEYS:
+ msg = "Invalid key in bounds instance specifications: %s" % key
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+ mmpair_out[key] = _ParseISpec(spec, key, True)
+ minmax_out.append(mmpair_out)
+ ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
+ if std_ispecs is not None:
+ assert not group_ipolicy # This is not an option for gnt-group
+ ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
+
+
+def CreateIPolicyFromOpts(ispecs_mem_size=None,
+ ispecs_cpu_count=None,
+ ispecs_disk_count=None,
+ ispecs_disk_size=None,
+ ispecs_nic_count=None,
+ minmax_ispecs=None,
+ std_ispecs=None,
+ ipolicy_disk_templates=None,
+ ipolicy_vcpu_ratio=None,
+ ipolicy_spindle_ratio=None,
+ group_ipolicy=False,
+ allowed_values=None,
+ fill_all=False):
+ """Creation of instance policy based on command line options.
+
+ @param fill_all: whether for cluster policies we should ensure that
+ all values are filled
+
+ """
+ assert not (fill_all and allowed_values)
+
+ split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
+ ispecs_disk_size or ispecs_nic_count)
+ if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
+ raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
+ " together with any --ipolicy-xxx-specs option",
+ errors.ECODE_INVAL)
+
+ ipolicy_out = objects.MakeEmptyIPolicy()
+ if split_specs:
+ assert fill_all
+ _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
+ ispecs_disk_count, ispecs_disk_size,
+ ispecs_nic_count, group_ipolicy, fill_all)
+ elif (minmax_ispecs is not None or std_ispecs is not None):
+ _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+ group_ipolicy, allowed_values)
+
+ if ipolicy_disk_templates is not None:
+ if allowed_values and ipolicy_disk_templates in allowed_values:
+ ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
+ else:
+ ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
+ if ipolicy_vcpu_ratio is not None:
+ ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
+ if ipolicy_spindle_ratio is not None:
+ ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
+
+ assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
+
+ if not group_ipolicy and fill_all:
+ ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
+
+ return ipolicy_out
+
+
+def _SerializeGenericInfo(buf, data, level, afterkey=False):
+ """Formatting core of L{PrintGenericInfo}.
+
+ @param buf: (string) stream to accumulate the result into
+ @param data: data to format
+ @type level: int
+ @param level: depth in the data hierarchy, used for indenting
+ @type afterkey: bool
+ @param afterkey: True when we are in the middle of a line after a key (used
+ to properly add newlines or indentation)
+
+ """
+ baseind = " "
+ if isinstance(data, dict):
+ if not data:
+ buf.write("\n")
+ else:
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for key in sorted(data):
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write(key)
+ buf.write(": ")
+ _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
+ elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
+ # list of tuples (an ordered dictionary)
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for (key, val) in data:
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write(key)
+ buf.write(": ")
+ _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
+ elif isinstance(data, list):
+ if not data:
+ buf.write("\n")
+ else:
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for item in data:
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write("-")
+ buf.write(baseind[1:])
+ _SerializeGenericInfo(buf, item, level + 1)
+ else:
+ # This branch should be only taken for strings, but it's practically
+ # impossible to guarantee that no other types are produced somewhere
+ buf.write(str(data))
+ buf.write("\n")
+
+
+def PrintGenericInfo(data):
+ """Print information formatted according to the hierarchy.
+
+ The output is a valid YAML string.
+
+ @param data: the data to print. It's a hierarchical structure whose elements
+ can be:
+ - dictionaries, where keys are strings and values are of any of the
+ types listed here
+ - lists of pairs (key, value), where key is a string and value is of
+ any of the types listed here; it's a way to encode ordered
+ dictionaries
+ - lists of any of the types listed here
+ - strings
+
+ """
+ buf = StringIO()
+ _SerializeGenericInfo(buf, data, 0)
+ ToStdout(buf.getvalue().rstrip("\n"))