#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import compat
from ganeti import netutils
from ganeti import qlang
+from ganeti import objects
+from ganeti import pathutils
from optparse import (OptionParser, TitledHelpFormatter,
Option, OptionValueError)
__all__ = [
# Command line options
+ "ABSOLUTE_OPT",
"ADD_UIDS_OPT",
+ "ADD_RESERVED_IPS_OPT",
"ALLOCATABLE_OPT",
"ALLOC_POLICY_OPT",
"ALL_OPT",
"DST_NODE_OPT",
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
+ "ENABLED_DISK_TEMPLATES_OPT",
"ERROR_CODES_OPT",
+ "FAILURE_ONLY_OPT",
"FIELDS_OPT",
"FILESTORE_DIR_OPT",
"FILESTORE_DRIVER_OPT",
"FORCE_FILTER_OPT",
"FORCE_OPT",
"FORCE_VARIANT_OPT",
+ "GATEWAY_OPT",
+ "GATEWAY6_OPT",
"GLOBAL_FILEDIR_OPT",
"HID_OS_OPT",
"GLOBAL_SHARED_FILEDIR_OPT",
"IGNORE_REMOVE_FAILURES_OPT",
"IGNORE_SECONDARIES_OPT",
"IGNORE_SIZE_OPT",
+ "INCLUDEDEFAULTS_OPT",
"INTERVAL_OPT",
"MAC_PREFIX_OPT",
"MAINTAIN_NODE_HEALTH_OPT",
"MC_OPT",
"MIGRATION_MODE_OPT",
"NET_OPT",
+ "NETWORK_OPT",
+ "NETWORK6_OPT",
"NEW_CLUSTER_CERT_OPT",
"NEW_CLUSTER_DOMAIN_SECRET_OPT",
"NEW_CONFD_HMAC_KEY_OPT",
"NEW_RAPI_CERT_OPT",
+ "NEW_PRIMARY_OPT",
"NEW_SECONDARY_OPT",
"NEW_SPICE_CERT_OPT",
"NIC_PARAMS_OPT",
+ "NOCONFLICTSCHECK_OPT",
"NODE_FORCE_JOIN_OPT",
"NODE_LIST_OPT",
"NODE_PLACEMENT_OPT",
"NONICS_OPT",
"NONLIVE_OPT",
"NONPLUS1_OPT",
+ "NORUNTIME_CHGS_OPT",
"NOSHUTDOWN_OPT",
"NOSTART_OPT",
"NOSSH_KEYCHECK_OPT",
"PREALLOC_WIPE_DISKS_OPT",
"PRIMARY_IP_VERSION_OPT",
"PRIMARY_ONLY_OPT",
+ "PRINT_JOBID_OPT",
"PRIORITY_OPT",
"RAPI_CERT_OPT",
"READD_OPT",
+ "REASON_OPT",
"REBOOT_TYPE_OPT",
"REMOVE_INSTANCE_OPT",
+ "REMOVE_RESERVED_IPS_OPT",
"REMOVE_UIDS_OPT",
"RESERVED_LVS_OPT",
+ "RUNTIME_MEM_OPT",
"ROMAN_OPT",
"SECONDARY_IP_OPT",
"SECONDARY_ONLY_OPT",
"SELECT_OS_OPT",
"SEP_OPT",
"SHOWCMD_OPT",
+ "SHOW_MACHINE_OPT",
"SHUTDOWN_TIMEOUT_OPT",
"SINGLE_NODE_OPT",
"SPECS_CPU_COUNT_OPT",
"SPECS_DISK_SIZE_OPT",
"SPECS_MEM_SIZE_OPT",
"SPECS_NIC_COUNT_OPT",
+ "SPLIT_ISPECS_OPTS",
+ "IPOLICY_STD_SPECS_OPT",
+ "IPOLICY_DISK_TEMPLATES",
+ "IPOLICY_VCPU_RATIO",
"SPICE_CACERT_OPT",
"SPICE_CERT_OPT",
"SRC_DIR_OPT",
"SRC_NODE_OPT",
"SUBMIT_OPT",
+ "SUBMIT_OPTS",
"STARTUP_PAUSED_OPT",
"STATIC_OPT",
"SYNC_OPT",
"USE_REPL_NET_OPT",
"VERBOSE_OPT",
"VG_NAME_OPT",
+ "WFSYNC_OPT",
"YES_DOIT_OPT",
"DISK_STATE_OPT",
"HV_STATE_OPT",
"IGNORE_IPOLICY_OPT",
+ "INSTANCE_POLICY_OPTS",
# Generic functions for CLI programs
"ConfirmOperation",
+ "CreateIPolicyFromOpts",
"GenericMain",
"GenericInstanceCreate",
"GenericList",
"ToStderr", "ToStdout",
"FormatError",
"FormatQueryResult",
- "FormatParameterDict",
+ "FormatParamsDictInfo",
+ "FormatPolicyInfo",
+ "PrintIPolicyCommand",
+ "PrintGenericInfo",
"GenerateTable",
"AskUser",
"FormatTimestamp",
"ARGS_MANY_INSTANCES",
"ARGS_MANY_NODES",
"ARGS_MANY_GROUPS",
+ "ARGS_MANY_NETWORKS",
"ARGS_NONE",
"ARGS_ONE_INSTANCE",
"ARGS_ONE_NODE",
"ARGS_ONE_GROUP",
"ARGS_ONE_OS",
+ "ARGS_ONE_NETWORK",
"ArgChoice",
"ArgCommand",
"ArgFile",
"ArgHost",
"ArgInstance",
"ArgJobId",
+ "ArgNetwork",
"ArgNode",
"ArgOs",
+ "ArgExtStorage",
"ArgSuggest",
"ArgUnknown",
"OPT_COMPL_INST_ADD_NODES",
"OPT_COMPL_ONE_INSTANCE",
"OPT_COMPL_ONE_NODE",
"OPT_COMPL_ONE_NODEGROUP",
+ "OPT_COMPL_ONE_NETWORK",
"OPT_COMPL_ONE_OS",
+ "OPT_COMPL_ONE_EXTSTORAGE",
"cli_option",
"SplitNodeOption",
"CalculateOSNames",
_CHOOSE_BATCH = 25
+# constants used to create InstancePolicy dictionary
+TISPECS_GROUP_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ }
+
+TISPECS_CLUSTER_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ constants.ISPECS_STD: constants.VTYPE_INT,
+ }
+
+#: User-friendly names for query2 field types
+_QFT_NAMES = {
+ constants.QFT_UNKNOWN: "Unknown",
+ constants.QFT_TEXT: "Text",
+ constants.QFT_BOOL: "Boolean",
+ constants.QFT_NUMBER: "Number",
+ constants.QFT_UNIT: "Storage size",
+ constants.QFT_TIMESTAMP: "Timestamp",
+ constants.QFT_OTHER: "Custom",
+ }
+
+
class _Argument:
def __init__(self, min=0, max=None): # pylint: disable=W0622
self.min = min
"""
+class ArgNetwork(_Argument):
+ """Network argument.
+
+ """
+
+
class ArgGroup(_Argument):
"""Node group argument.
"""
+class ArgExtStorage(_Argument):
+ """ExtStorage argument.
+
+ """
+
+
ARGS_NONE = []
ARGS_MANY_INSTANCES = [ArgInstance()]
+ARGS_MANY_NETWORKS = [ArgNetwork()]
ARGS_MANY_NODES = [ArgNode()]
ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
+ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
# TODO
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
kind = opts.tag_type
if kind == constants.TAG_CLUSTER:
- retval = kind, kind
+ retval = kind, None
elif kind in (constants.TAG_NODEGROUP,
constants.TAG_NODE,
+ constants.TAG_NETWORK,
constants.TAG_INSTANCE):
if not args:
- raise errors.OpPrereqError("no arguments passed to the command")
+ raise errors.OpPrereqError("no arguments passed to the command",
+ errors.ECODE_INVAL)
name = args.pop(0)
retval = kind, name
else:
"""
kind, name = _ExtractTagsObject(opts, args)
- cl = GetClient()
+ cl = GetClient(query=True)
result = cl.QueryTags(kind, name)
result = list(result)
result.sort()
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
- raise errors.OpPrereqError("No tags to be added")
+ raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
- SubmitOpCode(op, opts=opts)
+ SubmitOrSend(op, opts)
def RemoveTags(opts, args):
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
- raise errors.OpPrereqError("No tags to be removed")
+ raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
- SubmitOpCode(op, opts=opts)
+ SubmitOrSend(op, opts)
def check_unit(option, opt, value): # pylint: disable=W0613
raise OptionValueError("option %s: %s" % (opt, err))
-def _SplitKeyVal(opt, data):
+def _SplitKeyVal(opt, data, parse_prefixes):
"""Convert a KeyVal string into a dict.
This function will convert a key=val[,...] string into a dict. Empty
values will be converted specially: keys which have the prefix 'no_'
- will have the value=False and the prefix stripped, the others will
+ will have the value=False and the prefix stripped, keys with the prefix
+ "-" will have value=None and the prefix stripped, and the others will
have value=True.
@type opt: string
data, used in building error messages
@type data: string
@param data: a string of the format key=val,key=val,...
+ @type parse_prefixes: bool
+ @param parse_prefixes: whether to handle prefixes specially
@rtype: dict
@return: {key=val, key=val}
@raises errors.ParameterError: if there are duplicate keys
for elem in utils.UnescapeAndSplit(data, sep=","):
if "=" in elem:
key, val = elem.split("=", 1)
- else:
+ elif parse_prefixes:
if elem.startswith(NO_PREFIX):
key, val = elem[len(NO_PREFIX):], False
elif elem.startswith(UN_PREFIX):
key, val = elem[len(UN_PREFIX):], None
else:
key, val = elem, True
+ else:
+ raise errors.ParameterError("Missing value for key '%s' in option %s" %
+ (elem, opt))
if key in kv_dict:
raise errors.ParameterError("Duplicate key '%s' in option %s" %
(key, opt))
return kv_dict
-def check_ident_key_val(option, opt, value): # pylint: disable=W0613
- """Custom parser for ident:key=val,key=val options.
+def _SplitIdentKeyVal(opt, value, parse_prefixes):
+ """Helper function to parse "ident:key=val,key=val" options.
- This will store the parsed values as a tuple (ident, {key: val}). As such,
- multiple uses of this option via action=append is possible.
+ @type opt: string
+ @param opt: option name, used in error messages
+ @type value: string
+ @param value: expected to be in the format "ident:key=val,key=val,..."
+ @type parse_prefixes: bool
+ @param parse_prefixes: whether to handle prefixes specially (see
+ L{_SplitKeyVal})
+ @rtype: tuple
+ @return: (ident, {key=val, key=val})
+ @raises errors.ParameterError: in case of duplicates or other parsing errors
"""
if ":" not in value:
else:
ident, rest = value.split(":", 1)
- if ident.startswith(NO_PREFIX):
+ if parse_prefixes and ident.startswith(NO_PREFIX):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(NO_PREFIX):], False)
- elif ident.startswith(UN_PREFIX):
+ elif (parse_prefixes and ident.startswith(UN_PREFIX) and
+ (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(UN_PREFIX):], None)
else:
- kv_dict = _SplitKeyVal(opt, rest)
+ kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
retval = (ident, kv_dict)
return retval
+def check_ident_key_val(option, opt, value): # pylint: disable=W0613
+ """Custom parser for ident:key=val,key=val options.
+
+ This will store the parsed values as a tuple (ident, {key: val}). As such,
+ multiple uses of this option via action=append is possible.
+
+ """
+ return _SplitIdentKeyVal(opt, value, True)
+
+
def check_key_val(option, opt, value): # pylint: disable=W0613
"""Custom parser class for key=val,key=val options.
This will store the parsed values as a dict {key: val}.
"""
- return _SplitKeyVal(opt, value)
+ return _SplitKeyVal(opt, value, True)
+
+
+def _SplitListKeyVal(opt, value):
+ retval = {}
+ for elem in value.split("/"):
+ if not elem:
+ raise errors.ParameterError("Empty section in option '%s'" % opt)
+ (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
+ if ident in retval:
+ msg = ("Duplicated parameter '%s' in parsing %s: %s" %
+ (ident, opt, elem))
+ raise errors.ParameterError(msg)
+ retval[ident] = valdict
+ return retval
+
+
+def check_multilist_ident_key_val(_, opt, value):
+ """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
+
+ @rtype: list of dictionary
+ @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
+
+ """
+ retval = []
+ for line in value.split("//"):
+ retval.append(_SplitListKeyVal(opt, line))
+ return retval
def check_bool(option, opt, value): # pylint: disable=W0613
raise errors.ParameterError("Invalid boolean value '%s'" % value)
+def check_list(option, opt, value): # pylint: disable=W0613
+ """Custom parser for comma-separated lists.
+
+ """
+ # we have to make this explicit check since "".split(",") is [""],
+ # not an empty list :(
+ if not value:
+ return []
+ else:
+ return utils.UnescapeAndSplit(value)
+
+
+def check_maybefloat(option, opt, value): # pylint: disable=W0613
+ """Custom parser for float numbers which might be also defaults.
+
+ """
+ value = value.lower()
+
+ if value == constants.VALUE_DEFAULT:
+ return value
+ else:
+ return float(value)
+
+
# completion_suggestion is normally a list. Using numeric values not evaluating
# to False for dynamic completion.
(OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_IALLOCATOR,
+ OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
- OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
+ OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
-OPT_COMPL_ALL = frozenset([
+OPT_COMPL_ALL = compat.UniqueFrozenset([
OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_IALLOCATOR,
+ OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
OPT_COMPL_ONE_NODEGROUP,
])
"completion_suggest",
]
TYPES = Option.TYPES + (
+ "multilistidentkeyval",
"identkeyval",
"keyval",
"unit",
"bool",
+ "list",
+ "maybefloat",
)
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
+ TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
TYPE_CHECKER["identkeyval"] = check_ident_key_val
TYPE_CHECKER["keyval"] = check_key_val
TYPE_CHECKER["unit"] = check_unit
TYPE_CHECKER["bool"] = check_bool
+ TYPE_CHECKER["list"] = check_list
+ TYPE_CHECKER["maybefloat"] = check_maybefloat
# optparse.py sets make_option, so we do it for our own option class, too
help=("Submit the job and return the job ID, but"
" don't wait for the job to finish"))
+PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
+ default=False, action="store_true",
+ help=("Additionally print the job as first line"
+ " on stdout (for scripting)."))
+
SYNC_OPT = cli_option("--sync", dest="do_locking",
default=False, action="store_true",
help=("Grab locks while doing the queries"
DRY_RUN_OPT = cli_option("--dry-run", default=False,
action="store_true",
help=("Do not execute the operation, just run the"
- " check steps and verify it it could be"
+ " check steps and verify if it could be"
" executed"))
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
default=True, action="store_false",
help="Don't wait for sync (DANGEROUS!)")
+WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
+ default=False, action="store_true",
+ help="Wait for disks to sync")
+
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
action="store_true", default=False,
help="Enable offline instance")
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
- metavar="<NAME>",
- help="Set the default instance allocator plugin",
- default=None, type="string",
- completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
+ metavar="<NAME>",
+ help="Set the default instance"
+ " allocator plugin",
+ default=None, type="string",
+ completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
metavar="<os>",
completion_suggest=OPT_COMPL_ONE_OS)
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
- type="keyval", default={},
- help="OS parameters")
+ type="keyval", default={},
+ help="OS parameters")
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
action="store_true", default=False,
help="Do not install the OS (will"
" enable no-start)")
+NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
+ dest="allow_runtime_chgs",
+ default=True, action="store_false",
+ help="Don't allow runtime changes")
+
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
type="keyval", default={},
help="Backend parameters")
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
type="keyval", default={},
- help="Memory count specs: min, max, std"
- " (in MB)")
+ help="Memory size specs: list of key=value,"
+ " where key is one of min, max, std"
+ " (in MB or using a unit)")
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
type="keyval", default={},
- help="CPU count specs: min, max, std")
+ help="CPU count specs: list of key=value,"
+ " where key is one of min, max, std")
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
dest="ispecs_disk_count",
type="keyval", default={},
- help="Disk count specs: min, max, std")
+ help="Disk count specs: list of key=value,"
+ " where key is one of min, max, std")
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
type="keyval", default={},
- help="Disk size specs: min, max, std (in MB)")
+ help="Disk size specs: list of key=value,"
+ " where key is one of min, max, std"
+ " (in MB or using a unit)")
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
type="keyval", default={},
- help="NIC count specs: min, max, std")
+ help="NIC count specs: list of key=value,"
+ " where key is one of min, max, std")
+
+IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
+IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
+ dest="ipolicy_bounds_specs",
+ type="multilistidentkeyval", default=None,
+ help="Complete instance specs limits")
+
+IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
+IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
+ dest="ipolicy_std_specs",
+ type="keyval", default=None,
+ help="Complte standard instance specs")
+
+IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
+ dest="ipolicy_disk_templates",
+ type="list", default=None,
+ help="Comma-separated list of"
+ " enabled disk templates")
+
+IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
+ dest="ipolicy_vcpu_ratio",
+ type="maybefloat", default=None,
+ help="The maximum allowed vcpu-to-cpu ratio")
+
+IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
+ dest="ipolicy_spindle_ratio",
+ type="maybefloat", default=None,
+ help=("The maximum allowed instances to"
+ " spindle ratio"))
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
metavar="NODE", default=None,
completion_suggest=OPT_COMPL_ONE_NODE)
+NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
+ help="Specifies the new primary node",
+ metavar="<node>", default=None,
+ completion_suggest=OPT_COMPL_ONE_NODE)
+
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
default=False, action="store_true",
help="Replace the disk(s) on the primary"
" (excluded from allocation operations)"))
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
- type="bool", default=None, metavar=_YORNO,
- help="Set the master_capable flag on the node")
+ type="bool", default=None, metavar=_YORNO,
+ help="Set the master_capable flag on the node")
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
- type="bool", default=None, metavar=_YORNO,
- help="Set the vm_capable flag on the node")
+ type="bool", default=None, metavar=_YORNO,
+ help="Set the vm_capable flag on the node")
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
type="bool", default=None, metavar=_YORNO,
help="Comma-separated list of hypervisors",
type="string", default=None)
+ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
+ dest="enabled_disk_templates",
+ help="Comma-separated list of "
+ "disk templates",
+ type="string", default=None)
+
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
type="keyval", default={},
help="NIC parameters")
default=None)
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
- dest="use_external_mip_script",
- help="Specify whether to run a user-provided"
- " script for the master IP address turnup and"
- " turndown operations",
- type="bool", metavar=_YORNO, default=None)
+ dest="use_external_mip_script",
+ help="Specify whether to run a"
+ " user-provided script for the master"
+ " IP address turnup and"
+ " turndown operations",
+ type="bool", metavar=_YORNO, default=None)
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the file-based disks [%s]" %
- constants.DEFAULT_FILE_STORAGE_DIR,
+ pathutils.DEFAULT_FILE_STORAGE_DIR,
metavar="DIR",
- default=constants.DEFAULT_FILE_STORAGE_DIR)
+ default=pathutils.DEFAULT_FILE_STORAGE_DIR)
-GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
- dest="shared_file_storage_dir",
- help="Specify the default directory (cluster-"
- "wide) for storing the shared file-based"
- " disks [%s]" %
- constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
- metavar="SHAREDDIR",
- default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
+GLOBAL_SHARED_FILEDIR_OPT = cli_option(
+ "--shared-file-storage-dir",
+ dest="shared_file_storage_dir",
+ help="Specify the default directory (cluster-wide) for storing the"
+ " shared file-based disks [%s]" %
+ pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
+ metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
- help="Don't modify /etc/hosts",
+ help="Don't modify %s" % pathutils.ETC_HOSTS,
action="store_false", default=True)
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
help="Maximum time to wait")
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
- dest="shutdown_timeout", type="int",
- default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
- help="Maximum time to wait for instance shutdown")
+ dest="shutdown_timeout", type="int",
+ default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ help="Maximum time to wait for instance"
+ " shutdown")
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
default=None,
" certificate"))
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
- default=None,
- help="File containing new SPICE certificate")
+ default=None,
+ help="File containing new SPICE certificate")
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
- default=None,
- help="File containing the certificate of the CA"
- " which signed the SPICE certificate")
+ default=None,
+ help="File containing the certificate of the CA"
+ " which signed the SPICE certificate")
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
- dest="new_spice_cert", default=None,
- action="store_true",
- help=("Generate a new self-signed SPICE"
- " certificate"))
+ dest="new_spice_cert", default=None,
+ action="store_true",
+ help=("Generate a new self-signed SPICE"
+ " certificate"))
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
dest="new_confd_hmac_key",
" removed from the user-id pool"))
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
- action="store", dest="reserved_lvs",
- help=("A comma-separated list of reserved"
- " logical volumes names, that will be"
- " ignored by cluster verify"))
+ action="store", dest="reserved_lvs",
+ help=("A comma-separated list of reserved"
+ " logical volumes names, that will be"
+ " ignored by cluster verify"))
ROMAN_OPT = cli_option("--roman",
dest="roman_integers", default=False,
constants.IP6_VERSION),
help="Cluster-wide IP version for primary IP")
+SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
+ action="store_true",
+ help="Show machine name for every line in output")
+
+FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
+ action="store_true",
+ help=("Hide successful results and show failures"
+ " only (determined by the exit code)"))
+
+REASON_OPT = cli_option("--reason", default=None,
+ help="The reason for executing the command")
+
+
+def _PriorityOptionCb(option, _, value, parser):
+ """Callback for processing C{--priority} option.
+
+ """
+ value = _PRIONAME_TO_VALUE[value]
+
+ setattr(parser.values, option.dest, value)
+
+
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
metavar="|".join(name for name, _ in _PRIORITY_NAMES),
choices=_PRIONAME_TO_VALUE.keys(),
+ action="callback", type="choice",
+ callback=_PriorityOptionCb,
help="Priority for opcode processing")
HID_OS_OPT = cli_option("--hidden", dest="hidden",
help="Specify if the SoR for node is powered")
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
- default=constants.OOB_TIMEOUT,
- help="Maximum time to wait for out-of-band helper")
+ default=constants.OOB_TIMEOUT,
+ help="Maximum time to wait for out-of-band helper")
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
default=constants.OOB_POWER_DELAY,
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
action="append",
- help=("Specify disk state information in the format"
- " storage_type/identifier:option=value,..."),
+ help=("Specify disk state information in the"
+ " format"
+ " storage_type/identifier:option=value,...;"
+ " note this is unused for now"),
type="identkeyval")
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
action="append",
help=("Specify hypervisor state information in the"
- " format hypervisor:option=value,..."),
+ " format hypervisor:option=value,...;"
+ " note this is unused for now"),
type="identkeyval")
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
action="store_true", default=False,
help="Ignore instance policy violations")
+RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
+ help="Sets the instance's runtime memory,"
+ " ballooning it up or down to the new value",
+ default=None, type="unit", metavar="<size>")
+
+ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
+ action="store_true", default=False,
+ help="Marks the grow as absolute instead of the"
+ " (default) relative mode")
+
+NETWORK_OPT = cli_option("--network",
+ action="store", default=None, dest="network",
+ help="IP network in CIDR notation")
+
+GATEWAY_OPT = cli_option("--gateway",
+ action="store", default=None, dest="gateway",
+ help="IP address of the router (gateway)")
+
+ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
+ action="store", default=None,
+ dest="add_reserved_ips",
+ help="Comma-separated list of"
+ " reserved IPs to add")
+
+REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
+ action="store", default=None,
+ dest="remove_reserved_ips",
+ help="Comma-delimited list of"
+ " reserved IPs to remove")
+
+NETWORK6_OPT = cli_option("--network6",
+ action="store", default=None, dest="network6",
+ help="IP network in CIDR notation")
+
+GATEWAY6_OPT = cli_option("--gateway6",
+ action="store", default=None, dest="gateway6",
+ help="IP6 address of the router (gateway)")
+
+NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
+ dest="conflicts_check",
+ default=True,
+ action="store_false",
+ help="Don't check for conflicting IPs")
+
+INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
+ default=False, action="store_true",
+ help="Include default values")
#: Options provided by all commands
-COMMON_OPTS = [DEBUG_OPT]
+COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
+
+# options related to asynchronous job handling
+
+SUBMIT_OPTS = [
+ SUBMIT_OPT,
+ PRINT_JOBID_OPT,
+ ]
# common options for creating instances. add and import then add their own
# specific ones.
NET_OPT,
NODE_PLACEMENT_OPT,
NOIPCHECK_OPT,
+ NOCONFLICTSCHECK_OPT,
NONAMECHECK_OPT,
NONICS_OPT,
NWSYNC_OPT,
OSPARAMS_OPT,
OS_SIZE_OPT,
SUBMIT_OPT,
+ PRINT_JOBID_OPT,
TAG_ADD_OPT,
DRY_RUN_OPT,
PRIORITY_OPT,
]
+# common instance policy options
+INSTANCE_POLICY_OPTS = [
+ IPOLICY_BOUNDS_SPECS_OPT,
+ IPOLICY_DISK_TEMPLATES,
+ IPOLICY_VCPU_RATIO,
+ IPOLICY_SPINDLE_RATIO,
+ ]
+
+# instance policy split specs options
+SPLIT_ISPECS_OPTS = [
+ SPECS_CPU_COUNT_OPT,
+ SPECS_DISK_COUNT_OPT,
+ SPECS_DISK_SIZE_OPT,
+ SPECS_MEM_SIZE_OPT,
+ SPECS_NIC_COUNT_OPT,
+ ]
+
+
+class _ShowUsage(Exception):
+ """Exception class for L{_ParseArgs}.
+
+ """
+ def __init__(self, exit_error):
+ """Initializes instances of this class.
+
+ @type exit_error: bool
+ @param exit_error: Whether to report failure on exit
+
+ """
+ Exception.__init__(self)
+ self.exit_error = exit_error
-def _ParseArgs(argv, commands, aliases, env_override):
+
+class _ShowVersion(Exception):
+ """Exception class for L{_ParseArgs}.
+
+ """
+
+
+def _ParseArgs(binary, argv, commands, aliases, env_override):
"""Parser for the command line arguments.
This function parses the arguments and returns the function which
must be executed together with its (modified) arguments.
- @param argv: the command line
- @param commands: dictionary with special contents, see the design
- doc for cmdline handling
- @param aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param binary: Script name
+ @param argv: Command line arguments
+ @param commands: Dictionary containing command definitions
+ @param aliases: dictionary with command aliases {"alias": "target", ...}
@param env_override: list of env variables allowed for default args
+ @raise _ShowUsage: If usage description should be shown
+ @raise _ShowVersion: If version should be shown
"""
assert not (env_override - set(commands))
+ assert not (set(aliases.keys()) & set(commands.keys()))
- if len(argv) == 0:
- binary = "<command>"
+ if len(argv) > 1:
+ cmd = argv[1]
else:
- binary = argv[0].split("/")[-1]
+ # No option or command given
+ raise _ShowUsage(exit_error=True)
- if len(argv) > 1 and argv[1] == "--version":
- ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
- constants.RELEASE_VERSION)
- # Quit right away. That way we don't have to care about this special
- # argument. optparse.py does it the same.
- sys.exit(0)
-
- if len(argv) < 2 or not (argv[1] in commands or
- argv[1] in aliases):
- # let's do a nice thing
- sortedcmds = commands.keys()
- sortedcmds.sort()
-
- ToStdout("Usage: %s {command} [options...] [argument...]", binary)
- ToStdout("%s <command> --help to see details, or man %s", binary, binary)
- ToStdout("")
-
- # compute the max line length for cmd + usage
- mlen = max([len(" %s" % cmd) for cmd in commands])
- mlen = min(60, mlen) # should not get here...
-
- # and format a nice command list
- ToStdout("Commands:")
- for cmd in sortedcmds:
- cmdstr = " %s" % (cmd,)
- help_text = commands[cmd][4]
- help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
- ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
- for line in help_lines:
- ToStdout("%-*s %s", mlen, "", line)
-
- ToStdout("")
-
- return None, None, None
+ if cmd == "--version":
+ raise _ShowVersion()
+ elif cmd == "--help":
+ raise _ShowUsage(exit_error=False)
+ elif not (cmd in commands or cmd in aliases):
+ raise _ShowUsage(exit_error=True)
# get command, unalias it, and look it up in commands
- cmd = argv.pop(1)
if cmd in aliases:
- if cmd in commands:
- raise errors.ProgrammerError("Alias '%s' overrides an existing"
- " command" % cmd)
-
if aliases[cmd] not in commands:
raise errors.ProgrammerError("Alias '%s' maps to non-existing"
" command '%s'" % (cmd, aliases[cmd]))
args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
env_args = os.environ.get(args_env_name)
if env_args:
- argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
+ argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
func, args_def, parser_opts, usage, description = commands[cmd]
parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
formatter=TitledHelpFormatter(),
usage="%%prog %s %s" % (cmd, usage))
parser.disable_interspersed_args()
- options, args = parser.parse_args(args=argv[1:])
+ options, args = parser.parse_args(args=argv[2:])
if not _CheckArguments(cmd, args_def, args):
return None, None, None
return func, options, args
+def _FormatUsage(binary, commands):
+ """Generates a nice description of all commands.
+
+ @param binary: Script name
+ @param commands: Dictionary containing command definitions
+
+ """
+ # compute the max line length for cmd + usage
+ mlen = min(60, max(map(len, commands)))
+
+ yield "Usage: %s {command} [options...] [argument...]" % binary
+ yield "%s <command> --help to see details, or man %s" % (binary, binary)
+ yield ""
+ yield "Commands:"
+
+ # and format a nice command list
+ for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
+ help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
+ yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
+ for line in help_lines:
+ yield " %-*s %s" % (mlen, "", line)
+
+ yield ""
+
+
def _CheckArguments(cmd, args_def, args):
"""Verifies the arguments using the argument definition.
SetGenericOpcodeOpts([op], opts)
job_id = SendJob([op], cl=cl)
+ if hasattr(opts, "print_jobid") and opts.print_jobid:
+ ToStdout("%d" % job_id)
op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
reporter=reporter)
job = [op]
SetGenericOpcodeOpts(job, opts)
job_id = SendJob(job, cl=cl)
+ if opts.print_jobid:
+ ToStdout("%d" % job_id)
raise JobSubmittedException(job_id)
else:
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
+def _InitReasonTrail(op, opts):
+ """Builds the first part of the reason trail
+
+ Builds the initial part of the reason trail, adding the user provided reason
+ (if it exists) and the name of the command starting the operation.
+
+ @param op: the opcode the reason trail will be added to
+ @param opts: the command line options selected by the user
+
+ """
+ assert len(sys.argv) >= 2
+ trail = []
+
+ if opts.reason:
+ trail.append((constants.OPCODE_REASON_SRC_USER,
+ opts.reason,
+ utils.EpochNano()))
+
+ binary = os.path.basename(sys.argv[0])
+ source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
+ command = sys.argv[1]
+ trail.append((source, command, utils.EpochNano()))
+ op.reason = trail
+
+
def SetGenericOpcodeOpts(opcode_list, options):
"""Processor for generic options.
if hasattr(options, "dry_run"):
op.dry_run = options.dry_run
if getattr(options, "priority", None) is not None:
- op.priority = _PRIONAME_TO_VALUE[options.priority]
+ op.priority = options.priority
+ _InitReasonTrail(op, options)
+
+
+def GetClient(query=False):
+ """Connects to the a luxi socket and returns a client.
+ @type query: boolean
+ @param query: this signifies that the client will only be
+ used for queries; if the build-time parameter
+ enable-split-queries is enabled, then the client will be
+ connected to the query socket instead of the masterd socket
-def GetClient():
+ """
+ override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
+ if override_socket:
+ if override_socket == constants.LUXI_OVERRIDE_MASTER:
+ address = pathutils.MASTER_SOCKET
+ elif override_socket == constants.LUXI_OVERRIDE_QUERY:
+ address = pathutils.QUERY_SOCKET
+ else:
+ address = override_socket
+ elif query and constants.ENABLE_SPLIT_QUERY:
+ address = pathutils.QUERY_SOCKET
+ else:
+ address = None
# TODO: Cache object?
try:
- client = luxi.Client()
+ client = luxi.Client(address=address)
except luxi.NoMasterError:
ss = ssconf.SimpleStore()
ss.GetMasterNode()
except errors.ConfigurationError:
raise errors.OpPrereqError("Cluster not initialized or this machine is"
- " not part of a cluster")
+ " not part of a cluster",
+ errors.ECODE_INVAL)
master, myself = ssconf.GetMasterAndMyself(ss=ss)
if master != myself:
raise errors.OpPrereqError("This is not the master node, please connect"
" to node '%s' and rerun the command" %
- master)
+ master, errors.ECODE_INVAL)
raise
return client
elif isinstance(err, errors.OpPrereqError):
if len(err.args) == 2:
obuf.write("Failure: prerequisites not met for this"
- " operation:\nerror type: %s, error details:\n%s" %
+ " operation:\nerror type: %s, error details:\n%s" %
(err.args[1], err.args[0]))
else:
obuf.write("Failure: prerequisites not met for this"
elif isinstance(err, errors.ParameterError):
obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
elif isinstance(err, luxi.NoMasterError):
- obuf.write("Cannot communicate with the master daemon.\nIs it running"
- " and listening for connections?")
+ if err.args[0] == pathutils.MASTER_SOCKET:
+ daemon = "the master daemon"
+ elif err.args[0] == pathutils.QUERY_SOCKET:
+ daemon = "the config daemon"
+ else:
+ daemon = "socket '%s'" % str(err.args[0])
+ obuf.write("Cannot communicate with %s.\nIs the process running"
+ " and listening for connections?" % daemon)
elif isinstance(err, luxi.TimeoutError):
obuf.write("Timeout while talking to the master daemon. Jobs might have"
" been submitted and will continue to run even if the call"
"""
# save the program name and the entire command line for later logging
if sys.argv:
- binary = os.path.basename(sys.argv[0]) or sys.argv[0]
+ binary = os.path.basename(sys.argv[0])
+ if not binary:
+ binary = sys.argv[0]
+
if len(sys.argv) >= 2:
- binary += " " + sys.argv[1]
- old_cmdline = " ".join(sys.argv[2:])
+ logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
else:
- old_cmdline = ""
+ logname = binary
+
+ cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
else:
binary = "<unknown program>"
- old_cmdline = ""
+ cmdline = "<unknown>"
if aliases is None:
aliases = {}
try:
- func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
+ (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
+ env_override)
+ except _ShowVersion:
+ ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
+ constants.RELEASE_VERSION)
+ return constants.EXIT_SUCCESS
+ except _ShowUsage, err:
+ for line in _FormatUsage(binary, commands):
+ ToStdout(line)
+
+ if err.exit_error:
+ return constants.EXIT_FAILURE
+ else:
+ return constants.EXIT_SUCCESS
except errors.ParameterError, err:
result, err_msg = FormatError(err)
ToStderr(err_msg)
for key, val in override.iteritems():
setattr(options, key, val)
- utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
+ utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
stderr_logging=True)
- if old_cmdline:
- logging.info("run with arguments '%s'", old_cmdline)
- else:
- logging.info("run with no arguments")
+ logging.info("Command line: %s", cmdline)
try:
result = func(options, args)
try:
nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
+ raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
+ errors.ECODE_INVAL)
nics = [{}] * nic_max
for nidx, ndict in optvalue:
if not isinstance(ndict, dict):
raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
- " got %s" % (nidx, ndict))
+ " got %s" % (nidx, ndict), errors.ECODE_INVAL)
utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
if opts.disk_template == constants.DT_DISKLESS:
if opts.disks or opts.sd_size is not None:
raise errors.OpPrereqError("Diskless instance but disk"
- " information passed")
+ " information passed", errors.ECODE_INVAL)
disks = []
else:
if (not opts.disks and not opts.sd_size
and mode == constants.INSTANCE_CREATE):
- raise errors.OpPrereqError("No disk information specified")
+ raise errors.OpPrereqError("No disk information specified",
+ errors.ECODE_INVAL)
if opts.disks and opts.sd_size is not None:
raise errors.OpPrereqError("Please use either the '--disk' or"
- " '-s' option")
+ " '-s' option", errors.ECODE_INVAL)
if opts.sd_size is not None:
opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
try:
disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
except ValueError, err:
- raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
+ raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
+ errors.ECODE_INVAL)
disks = [{}] * disk_max
else:
disks = []
didx = int(didx)
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
- raise errors.OpPrereqError(msg)
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
elif constants.IDISK_SIZE in ddict:
if constants.IDISK_ADOPT in ddict:
raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
- " (disk %d)" % didx)
+ " (disk %d)" % didx, errors.ECODE_INVAL)
try:
ddict[constants.IDISK_SIZE] = \
utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
- (didx, err))
+ (didx, err), errors.ECODE_INVAL)
elif constants.IDISK_ADOPT in ddict:
+ if constants.IDISK_SPINDLES in ddict:
+ raise errors.OpPrereqError("spindles is not a valid option when"
+ " adopting a disk", errors.ECODE_INVAL)
if mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for instance"
- " import")
+ " import", errors.ECODE_INVAL)
ddict[constants.IDISK_SIZE] = 0
else:
raise errors.OpPrereqError("Missing size or adoption source for"
- " disk %d" % didx)
+ " disk %d" % didx, errors.ECODE_INVAL)
disks[didx] = ddict
if opts.tags is not None:
disks=disks,
disk_template=opts.disk_template,
nics=nics,
+ conflicts_check=opts.conflicts_check,
pnode=pnode, snode=snode,
ip_check=opts.ip_check,
name_check=opts.name_check,
# No need to use SSH
result = utils.RunCmd(cmd)
else:
- result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
+ result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
+ utils.ShellQuoteArgs(cmd))
if result.failed:
errmsg = ["Failed to run command %s" % result.cmd]
"""
# Pause watcher by acquiring an exclusive lock on watcher state file
self.feedback_fn("Blocking watcher")
- watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
+ watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
try:
# TODO: Currently, this just blocks. There's no timeout.
# TODO: Should it be a shared lock?
# Stop master daemons, so that no new jobs can come in and all running
# ones are finished
self.feedback_fn("Stopping master daemons")
- self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
+ self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
try:
# Stop daemons on all nodes
for node_name in self.online_nodes:
self.feedback_fn("Stopping daemons on %s" % node_name)
- self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
+ self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
# All daemons are shut down now
try:
# Start cluster again, master node last
for node_name in self.nonmaster_nodes + [self.master_node]:
self.feedback_fn("Starting daemons on %s" % node_name)
- self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
+ self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
finally:
# Resume watcher
watcher_block.Close()
def GenericList(resource, fields, names, unit, separator, header, cl=None,
- format_override=None, verbose=False, force_filter=False):
+ format_override=None, verbose=False, force_filter=False,
+ namefield=None, qfilter=None, isnumeric=False):
"""Generic implementation for listing all items of a resource.
@param resource: One of L{constants.QR_VIA_LUXI}
indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
@type verbose: boolean
@param verbose: whether to use verbose field descriptions or not
+ @type namefield: string
+ @param namefield: Name of field to use for simple filters (see
+ L{qlang.MakeFilter} for details)
+ @type qfilter: list or None
+ @param qfilter: Query filter (in addition to names)
+ @param isnumeric: bool
+ @param isnumeric: Whether the namefield's type is numeric, and therefore
+ any simple filters built by namefield should use integer values to
+ reflect that
"""
if not names:
names = None
- qfilter = qlang.MakeFilter(names, force_filter)
+ namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
+ isnumeric=isnumeric)
+
+ if qfilter is None:
+ qfilter = namefilter
+ elif namefilter is not None:
+ qfilter = [qlang.OP_AND, namefilter, qfilter]
if cl is None:
cl = GetClient()
return constants.EXIT_SUCCESS
+def _FieldDescValues(fdef):
+ """Helper function for L{GenericListFields} to get query field description.
+
+ @type fdef: L{objects.QueryFieldDefinition}
+ @rtype: list
+
+ """
+ return [
+ fdef.name,
+ _QFT_NAMES.get(fdef.kind, fdef.kind),
+ fdef.title,
+ fdef.doc,
+ ]
+
+
def GenericListFields(resource, fields, separator, header, cl=None):
"""Generic implementation for listing fields for a resource.
columns = [
TableColumn("Name", str, False),
+ TableColumn("Type", str, False),
TableColumn("Title", str, False),
TableColumn("Description", str, False),
]
- rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
+ rows = map(_FieldDescValues, response.fields)
for line in FormatTable(rows, columns, header, separator):
ToStdout(line)
"""
if not isinstance(ts, (tuple, list)) or len(ts) != 2:
return "?"
- sec, usec = ts
- return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
+
+ (sec, usecs) = ts
+ return utils.FormatTime(sec, usecs=usecs)
def ParseTimespec(value):
"""
value = str(value)
if not value:
- raise errors.OpPrereqError("Empty time specification passed")
+ raise errors.OpPrereqError("Empty time specification passed",
+ errors.ECODE_INVAL)
suffix_map = {
"s": 1,
"m": 60,
try:
value = int(value)
except (TypeError, ValueError):
- raise errors.OpPrereqError("Invalid time specification '%s'" % value)
+ raise errors.OpPrereqError("Invalid time specification '%s'" % value,
+ errors.ECODE_INVAL)
else:
multiplier = suffix_map[value[-1]]
value = value[:-1]
if not value: # no data left after stripping the suffix
raise errors.OpPrereqError("Invalid time specification (only"
- " suffix passed)")
+ " suffix passed)", errors.ECODE_INVAL)
try:
value = int(value) * multiplier
except (TypeError, ValueError):
- raise errors.OpPrereqError("Invalid time specification '%s'" % value)
+ raise errors.OpPrereqError("Invalid time specification '%s'" % value,
+ errors.ECODE_INVAL)
return value
return [row[1:3] for row in self.jobs]
-def FormatParameterDict(buf, param_dict, actual, level=1):
+def FormatParamsDictInfo(param_dict, actual):
"""Formats a parameter dictionary.
- @type buf: L{StringIO}
- @param buf: the buffer into which to write
@type param_dict: dict
@param param_dict: the own parameters
@type actual: dict
@param actual: the current parameter set (including defaults)
- @param level: Level of indent
+ @rtype: dict
+ @return: dictionary where the value of each parameter is either a fully
+ formatted string or a dictionary containing formatted strings
+
+ """
+ ret = {}
+ for (key, data) in actual.items():
+ if isinstance(data, dict) and data:
+ ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
+ else:
+ ret[key] = str(param_dict.get(key, "default (%s)" % data))
+ return ret
+
+
+def _FormatListInfoDefault(data, def_data):
+ if data is not None:
+ ret = utils.CommaJoin(data)
+ else:
+ ret = "default (%s)" % utils.CommaJoin(def_data)
+ return ret
+
+
+def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
+ """Formats an instance policy.
+
+ @type custom_ipolicy: dict
+ @param custom_ipolicy: own policy
+ @type eff_ipolicy: dict
+ @param eff_ipolicy: effective policy (including defaults); ignored for
+ cluster
+ @type iscluster: bool
+ @param iscluster: the policy is at cluster level
+ @rtype: list of pairs
+ @return: formatted data, suitable for L{PrintGenericInfo}
+
+ """
+ if iscluster:
+ eff_ipolicy = custom_ipolicy
+
+ minmax_out = []
+ custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
+ if custom_minmax:
+ for (k, minmax) in enumerate(custom_minmax):
+ minmax_out.append([
+ ("%s/%s" % (key, k),
+ FormatParamsDictInfo(minmax[key], minmax[key]))
+ for key in constants.ISPECS_MINMAX_KEYS
+ ])
+ else:
+ for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
+ minmax_out.append([
+ ("%s/%s" % (key, k),
+ FormatParamsDictInfo({}, minmax[key]))
+ for key in constants.ISPECS_MINMAX_KEYS
+ ])
+ ret = [("bounds specs", minmax_out)]
+
+ if iscluster:
+ stdspecs = custom_ipolicy[constants.ISPECS_STD]
+ ret.append(
+ (constants.ISPECS_STD,
+ FormatParamsDictInfo(stdspecs, stdspecs))
+ )
+
+ ret.append(
+ ("allowed disk templates",
+ _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
+ eff_ipolicy[constants.IPOLICY_DTS]))
+ )
+ ret.extend([
+ (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
+ for key in constants.IPOLICY_PARAMETERS
+ ])
+ return ret
+
+
+def _PrintSpecsParameters(buf, specs):
+ values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
+ buf.write(",".join(values))
+
+
+def PrintIPolicyCommand(buf, ipolicy, isgroup):
+ """Print the command option used to generate the given instance policy.
+
+ Currently only the parts dealing with specs are supported.
+
+ @type buf: StringIO
+ @param buf: stream to write into
+ @type ipolicy: dict
+ @param ipolicy: instance policy
+ @type isgroup: bool
+ @param isgroup: whether the policy is at group level
"""
- indent = " " * level
- for key in sorted(actual):
- val = param_dict.get(key, "default (%s)" % actual[key])
- buf.write("%s- %s: %s\n" % (indent, key, val))
+ if not isgroup:
+ stdspecs = ipolicy.get("std")
+ if stdspecs:
+ buf.write(" %s " % IPOLICY_STD_SPECS_STR)
+ _PrintSpecsParameters(buf, stdspecs)
+ minmaxes = ipolicy.get("minmax", [])
+ first = True
+ for minmax in minmaxes:
+ minspecs = minmax.get("min")
+ maxspecs = minmax.get("max")
+ if minspecs and maxspecs:
+ if first:
+ buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
+ first = False
+ else:
+ buf.write("//")
+ buf.write("min:")
+ _PrintSpecsParameters(buf, minspecs)
+ buf.write("/max:")
+ _PrintSpecsParameters(buf, maxspecs)
def ConfirmOperation(names, list_type, text, extra=""):
choices.pop(1)
choice = AskUser(msg + affected, choices)
return choice
+
+
+def _MaybeParseUnit(elements):
+ """Parses and returns an array of potential values with units.
+
+ """
+ parsed = {}
+ for k, v in elements.items():
+ if v == constants.VALUE_DEFAULT:
+ parsed[k] = v
+ else:
+ parsed[k] = utils.ParseUnit(v)
+ return parsed
+
+
+def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
+ ispecs_disk_count, ispecs_disk_size,
+ ispecs_nic_count, group_ipolicy, fill_all):
+ try:
+ if ispecs_mem_size:
+ ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
+ if ispecs_disk_size:
+ ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
+ except (TypeError, ValueError, errors.UnitParseError), err:
+ raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
+ " in policy: %s" %
+ (ispecs_disk_size, ispecs_mem_size, err),
+ errors.ECODE_INVAL)
+
+ # prepare ipolicy dict
+ ispecs_transposed = {
+ constants.ISPEC_MEM_SIZE: ispecs_mem_size,
+ constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
+ constants.ISPEC_DISK_COUNT: ispecs_disk_count,
+ constants.ISPEC_DISK_SIZE: ispecs_disk_size,
+ constants.ISPEC_NIC_COUNT: ispecs_nic_count,
+ }
+
+ # first, check that the values given are correct
+ if group_ipolicy:
+ forced_type = TISPECS_GROUP_TYPES
+ else:
+ forced_type = TISPECS_CLUSTER_TYPES
+ for specs in ispecs_transposed.values():
+ assert type(specs) is dict
+ utils.ForceDictType(specs, forced_type)
+
+ # then transpose
+ ispecs = {
+ constants.ISPECS_MIN: {},
+ constants.ISPECS_MAX: {},
+ constants.ISPECS_STD: {},
+ }
+ for (name, specs) in ispecs_transposed.iteritems():
+ assert name in constants.ISPECS_PARAMETERS
+ for key, val in specs.items(): # {min: .. ,max: .., std: ..}
+ assert key in ispecs
+ ispecs[key][name] = val
+ minmax_out = {}
+ for key in constants.ISPECS_MINMAX_KEYS:
+ if fill_all:
+ minmax_out[key] = \
+ objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
+ else:
+ minmax_out[key] = ispecs[key]
+ ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
+ if fill_all:
+ ipolicy[constants.ISPECS_STD] = \
+ objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
+ ispecs[constants.ISPECS_STD])
+ else:
+ ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
+
+
+def _ParseSpecUnit(spec, keyname):
+ ret = spec.copy()
+ for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
+ if k in ret:
+ try:
+ ret[k] = utils.ParseUnit(ret[k])
+ except (TypeError, ValueError, errors.UnitParseError), err:
+ raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
+ " specs: %s" % (k, ret[k], keyname, err)),
+ errors.ECODE_INVAL)
+ return ret
+
+
+def _ParseISpec(spec, keyname, required):
+ ret = _ParseSpecUnit(spec, keyname)
+ utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
+ missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
+ if required and missing:
+ raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
+ (keyname, utils.CommaJoin(missing)),
+ errors.ECODE_INVAL)
+ return ret
+
+
+def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
+ ret = None
+ if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
+ len(minmax_ispecs[0]) == 1):
+ for (key, spec) in minmax_ispecs[0].items():
+ # This loop is executed exactly once
+ if key in allowed_values and not spec:
+ ret = key
+ return ret
+
+
+def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+ group_ipolicy, allowed_values):
+ found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
+ if found_allowed is not None:
+ ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
+ elif minmax_ispecs is not None:
+ minmax_out = []
+ for mmpair in minmax_ispecs:
+ mmpair_out = {}
+ for (key, spec) in mmpair.items():
+ if key not in constants.ISPECS_MINMAX_KEYS:
+ msg = "Invalid key in bounds instance specifications: %s" % key
+ raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+ mmpair_out[key] = _ParseISpec(spec, key, True)
+ minmax_out.append(mmpair_out)
+ ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
+ if std_ispecs is not None:
+ assert not group_ipolicy # This is not an option for gnt-group
+ ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
+
+
+def CreateIPolicyFromOpts(ispecs_mem_size=None,
+ ispecs_cpu_count=None,
+ ispecs_disk_count=None,
+ ispecs_disk_size=None,
+ ispecs_nic_count=None,
+ minmax_ispecs=None,
+ std_ispecs=None,
+ ipolicy_disk_templates=None,
+ ipolicy_vcpu_ratio=None,
+ ipolicy_spindle_ratio=None,
+ group_ipolicy=False,
+ allowed_values=None,
+ fill_all=False):
+ """Creation of instance policy based on command line options.
+
+ @param fill_all: whether for cluster policies we should ensure that
+ all values are filled
+
+ """
+ assert not (fill_all and allowed_values)
+
+ split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
+ ispecs_disk_size or ispecs_nic_count)
+ if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
+ raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
+ " together with any --ipolicy-xxx-specs option",
+ errors.ECODE_INVAL)
+
+ ipolicy_out = objects.MakeEmptyIPolicy()
+ if split_specs:
+ assert fill_all
+ _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
+ ispecs_disk_count, ispecs_disk_size,
+ ispecs_nic_count, group_ipolicy, fill_all)
+ elif (minmax_ispecs is not None or std_ispecs is not None):
+ _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
+ group_ipolicy, allowed_values)
+
+ if ipolicy_disk_templates is not None:
+ if allowed_values and ipolicy_disk_templates in allowed_values:
+ ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
+ else:
+ ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
+ if ipolicy_vcpu_ratio is not None:
+ ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
+ if ipolicy_spindle_ratio is not None:
+ ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
+
+ assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
+
+ if not group_ipolicy and fill_all:
+ ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
+
+ return ipolicy_out
+
+
+def _SerializeGenericInfo(buf, data, level, afterkey=False):
+ """Formatting core of L{PrintGenericInfo}.
+
+ @param buf: (string) stream to accumulate the result into
+ @param data: data to format
+ @type level: int
+ @param level: depth in the data hierarchy, used for indenting
+ @type afterkey: bool
+ @param afterkey: True when we are in the middle of a line after a key (used
+ to properly add newlines or indentation)
+
+ """
+ baseind = " "
+ if isinstance(data, dict):
+ if not data:
+ buf.write("\n")
+ else:
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for key in sorted(data):
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write(key)
+ buf.write(": ")
+ _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
+ elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
+ # list of tuples (an ordered dictionary)
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for (key, val) in data:
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write(key)
+ buf.write(": ")
+ _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
+ elif isinstance(data, list):
+ if not data:
+ buf.write("\n")
+ else:
+ if afterkey:
+ buf.write("\n")
+ doindent = True
+ else:
+ doindent = False
+ for item in data:
+ if doindent:
+ buf.write(baseind * level)
+ else:
+ doindent = True
+ buf.write("-")
+ buf.write(baseind[1:])
+ _SerializeGenericInfo(buf, item, level + 1)
+ else:
+ # This branch should be only taken for strings, but it's practically
+ # impossible to guarantee that no other types are produced somewhere
+ buf.write(str(data))
+ buf.write("\n")
+
+
+def PrintGenericInfo(data):
+ """Print information formatted according to the hierarchy.
+
+ The output is a valid YAML string.
+
+ @param data: the data to print. It's a hierarchical structure whose elements
+ can be:
+ - dictionaries, where keys are strings and values are of any of the
+ types listed here
+ - lists of pairs (key, value), where key is a string and value is of
+ any of the types listed here; it's a way to encode ordered
+ dictionaries
+ - lists of any of the types listed here
+ - strings
+
+ """
+ buf = StringIO()
+ _SerializeGenericInfo(buf, data, 0)
+ ToStdout(buf.getvalue().rstrip("\n"))