#
#
-# Copyright (C) 2010, 2011 Google Inc.
+# Copyright (C) 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import compat
from ganeti import objects
from ganeti import ht
+from ganeti import runtime
from ganeti import qlang
+from ganeti import jstore
from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER,
QFT_UNIT, QFT_TIMESTAMP, QFT_OTHER,
RS_NORMAL, RS_UNKNOWN, RS_NODATA,
RS_UNAVAIL, RS_OFFLINE)
+(NETQ_CONFIG,
+ NETQ_GROUP,
+ NETQ_STATS,
+ NETQ_INST) = range(300, 304)
# Constants for requesting data from the caller/data provider. Each property
# collected/computed separately by the data provider should have its own to
(IQ_CONFIG,
IQ_LIVE,
IQ_DISKUSAGE,
- IQ_CONSOLE) = range(100, 104)
+ IQ_CONSOLE,
+ IQ_NODES,
+ IQ_NETWORKS) = range(100, 106)
(LQ_MODE,
LQ_OWNER,
(GQ_CONFIG,
GQ_NODE,
- GQ_INST) = range(200, 203)
+ GQ_INST,
+ GQ_DISKPARAMS) = range(200, 204)
+
+(CQ_CONFIG,
+ CQ_QUEUE_DRAINED,
+ CQ_WATCHER_PAUSE) = range(300, 303)
+
+(JQ_ARCHIVED, ) = range(400, 401)
# Query field flags
QFF_HOSTNAME = 0x01
QFF_IP_ADDRESS = 0x02
-# Next values: 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
-QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS)
+QFF_JOB_ID = 0x04
+QFF_SPLIT_TIMESTAMP = 0x08
+# Next values: 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
+QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS | QFF_JOB_ID | QFF_SPLIT_TIMESTAMP)
FIELD_NAME_RE = re.compile(r"^[a-z0-9/._]+$")
TITLE_RE = re.compile(r"^[^\s]+$")
QFT_BOOL: ht.TBool,
QFT_NUMBER: ht.TInt,
QFT_UNIT: ht.TInt,
- QFT_TIMESTAMP: ht.TOr(ht.TInt, ht.TFloat),
+ QFT_TIMESTAMP: ht.TNumber,
QFT_OTHER: lambda _: True,
}
_FS_OFFLINE = object()
#: List of all special status
-_FS_ALL = frozenset([_FS_UNKNOWN, _FS_NODATA, _FS_UNAVAIL, _FS_OFFLINE])
+_FS_ALL = compat.UniqueFrozenset([
+ _FS_UNKNOWN,
+ _FS_NODATA,
+ _FS_UNAVAIL,
+ _FS_OFFLINE,
+ ])
#: VType to QFT mapping
_VTToQFT = {
_SERIAL_NO_DOC = "%s object serial number, incremented on each modification"
-def _GetUnknownField(ctx, item): # pylint: disable-msg=W0613
+def _GetUnknownField(ctx, item): # pylint: disable=W0613
"""Gets the contents of an unknown field.
"""
if op != qlang.OP_OR:
self._NeedAllNames()
- def NoteUnaryOp(self, op): # pylint: disable-msg=W0613
+ def NoteUnaryOp(self, op, datakind): # pylint: disable=W0613
"""Called when handling an unary operation.
@type op: string
@param op: Operator
"""
+ if datakind is not None:
+ self._datakinds.add(datakind)
+
self._NeedAllNames()
def NoteBinaryOp(self, op, datakind, name, value):
return not fn(lhs, rhs)
+def _PrepareRegex(pattern):
+ """Compiles a regular expression.
+
+ """
+ try:
+ return re.compile(pattern)
+ except re.error, err:
+ raise errors.ParameterError("Invalid regex pattern (%s)" % err)
+
+
+def _PrepareSplitTimestamp(value):
+ """Prepares a value for comparison by L{_MakeSplitTimestampComparison}.
+
+ """
+ if ht.TNumber(value):
+ return value
+ else:
+ return utils.MergeTime(value)
+
+
+def _MakeSplitTimestampComparison(fn):
+ """Compares split timestamp values after converting to float.
+
+ """
+ return lambda lhs, rhs: fn(utils.MergeTime(lhs), rhs)
+
+
+def _MakeComparisonChecks(fn):
+ """Prepares flag-specific comparisons using a comparison function.
+
+ """
+ return [
+ (QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(fn),
+ _PrepareSplitTimestamp),
+ (QFF_JOB_ID, lambda lhs, rhs: fn(jstore.ParseJobId(lhs), rhs),
+ jstore.ParseJobId),
+ (None, fn, None),
+ ]
+
+
class _FilterCompilerHelper:
"""Converts a query filter to a callable usable for filtering.
"""
- # String statement has no effect, pylint: disable-msg=W0105
+ # String statement has no effect, pylint: disable=W0105
#: How deep filters can be nested
_LEVELS_MAX = 10
List of tuples containing flags and a callable receiving the left- and
right-hand side of the operator. The flags are an OR-ed value of C{QFF_*}
- (e.g. L{QFF_HOSTNAME}).
+ (e.g. L{QFF_HOSTNAME} or L{QFF_SPLIT_TIMESTAMP}).
Order matters. The first item with flags will be used. Flags are checked
using binary AND.
_EQUALITY_CHECKS = [
(QFF_HOSTNAME,
lambda lhs, rhs: utils.MatchNameComponent(rhs, [lhs],
- case_sensitive=False)),
- (None, operator.eq),
+ case_sensitive=False),
+ None),
+ (QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(operator.eq),
+ _PrepareSplitTimestamp),
+ (None, operator.eq, None),
]
"""Known operators
# Binary operators
qlang.OP_EQUAL: (_OPTYPE_BINARY, _EQUALITY_CHECKS),
qlang.OP_NOT_EQUAL:
- (_OPTYPE_BINARY, [(flags, compat.partial(_WrapNot, fn))
- for (flags, fn) in _EQUALITY_CHECKS]),
- qlang.OP_GLOB: (_OPTYPE_BINARY, NotImplemented),
- qlang.OP_REGEXP: (_OPTYPE_BINARY, NotImplemented),
+ (_OPTYPE_BINARY, [(flags, compat.partial(_WrapNot, fn), valprepfn)
+ for (flags, fn, valprepfn) in _EQUALITY_CHECKS]),
+ qlang.OP_LT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.lt)),
+ qlang.OP_LE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.le)),
+ qlang.OP_GT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.gt)),
+ qlang.OP_GE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.ge)),
+ qlang.OP_REGEXP: (_OPTYPE_BINARY, [
+ (None, lambda lhs, rhs: rhs.search(lhs), _PrepareRegex),
+ ]),
qlang.OP_CONTAINS: (_OPTYPE_BINARY, [
- (None, operator.contains),
+ (None, operator.contains, None),
]),
}
self._hints = None
self._op_handler = None
- def __call__(self, hints, filter_):
+ def __call__(self, hints, qfilter):
"""Converts a query filter into a callable function.
@type hints: L{_FilterHints} or None
@param hints: Callbacks doing analysis on filter
- @type filter_: list
- @param filter_: Filter structure
+ @type qfilter: list
+ @param qfilter: Filter structure
@rtype: callable
@return: Function receiving context and item as parameters, returning
boolean as to whether item matches filter
}
try:
- filter_fn = self._Compile(filter_, 0)
+ filter_fn = self._Compile(qfilter, 0)
finally:
self._op_handler = None
return filter_fn
- def _Compile(self, filter_, level):
+ def _Compile(self, qfilter, level):
"""Inner function for converting filters.
Calls the correct handler functions for the top-level operator. This
function is called recursively (e.g. for logic operators).
"""
- if not (isinstance(filter_, (list, tuple)) and filter_):
+ if not (isinstance(qfilter, (list, tuple)) and qfilter):
raise errors.ParameterError("Invalid filter on level %s" % level)
# Limit recursion
" nested too deep)" % self._LEVELS_MAX)
# Create copy to be modified
- operands = filter_[:]
+ operands = qfilter[:]
op = operands.pop(0)
try:
"""
assert op_fn is None
- if hints_fn:
- hints_fn(op)
-
if len(operands) != 1:
raise errors.ParameterError("Unary operator '%s' expects exactly one"
" operand" % op)
if op == qlang.OP_TRUE:
- (_, _, _, retrieval_fn) = self._LookupField(operands[0])
+ (_, datakind, _, retrieval_fn) = self._LookupField(operands[0])
+
+ if hints_fn:
+ hints_fn(op, datakind)
op_fn = operator.truth
arg = retrieval_fn
elif op == qlang.OP_NOT:
+ if hints_fn:
+ hints_fn(op, None)
+
op_fn = operator.not_
arg = self._Compile(operands[0], level + 1)
else:
@param operands: List of operands
"""
- # Unused arguments, pylint: disable-msg=W0613
+ # Unused arguments, pylint: disable=W0613
try:
(name, value) = operands
except (ValueError, TypeError):
if hints_fn:
hints_fn(op, datakind, name, value)
- for (fn_flags, fn) in op_data:
+ for (fn_flags, fn, valprepfn) in op_data:
if fn_flags is None or fn_flags & field_flags:
+ # Prepare value if necessary (e.g. compile regular expression)
+ if valprepfn:
+ value = valprepfn(value)
+
return compat.partial(_WrapBinaryOp, fn, retrieval_fn, value)
raise errors.ProgrammerError("Unable to find operator implementation"
" (op '%s', flags %s)" % (op, field_flags))
-def _CompileFilter(fields, hints, filter_):
+def _CompileFilter(fields, hints, qfilter):
"""Converts a query filter into a callable function.
See L{_FilterCompilerHelper} for details.
@rtype: callable
"""
- return _FilterCompilerHelper(fields)(hints, filter_)
+ return _FilterCompilerHelper(fields)(hints, qfilter)
class Query:
- def __init__(self, fieldlist, selected, filter_=None, namefield=None):
+ def __init__(self, fieldlist, selected, qfilter=None, namefield=None):
"""Initializes this class.
The field definition is a dictionary with the field's name as a key and a
self._requested_names = None
self._filter_datakinds = frozenset()
- if filter_ is not None:
+ if qfilter is not None:
# Collect requested names if wanted
if namefield:
hints = _FilterHints(namefield)
hints = None
# Build filter function
- self._filter_fn = _CompileFilter(fieldlist, hints, filter_)
+ self._filter_fn = _CompileFilter(fieldlist, hints, qfilter)
if hints:
self._requested_names = hints.RequestedNames()
self._filter_datakinds = hints.ReferencedData()
(status, name) = _ProcessResult(self._name_fn(ctx, item))
assert status == constants.RS_NORMAL
# TODO: Are there cases where we wouldn't want to use NiceSort?
+ # Answer: if the name field is non-string...
result.append((utils.NiceSortKey(name), idx, row))
else:
result.append(row)
elif value is not None:
errs.append("abnormal field %s has a non-None value" % fdef.name)
assert not errs, ("Failed validation: %s in row %s" %
- (utils.CommaJoin(errors), row))
+ (utils.CommaJoin(errs), row))
+
+
+def _FieldDictKey((fdef, _, flags, fn)):
+ """Generates key for field dictionary.
+
+ """
+ assert fdef.name and fdef.title, "Name and title are required"
+ assert FIELD_NAME_RE.match(fdef.name)
+ assert TITLE_RE.match(fdef.title)
+ assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and
+ fdef.doc.strip() == fdef.doc), \
+ "Invalid description for field '%s'" % fdef.name
+ assert callable(fn)
+ assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name
+
+ return fdef.name
def _PrepareFieldList(fields, aliases):
for (fdef, _, _, _) in fields)
assert not duplicates, "Duplicate title(s) found: %r" % duplicates
- result = {}
-
- for field in fields:
- (fdef, _, flags, fn) = field
-
- assert fdef.name and fdef.title, "Name and title are required"
- assert FIELD_NAME_RE.match(fdef.name)
- assert TITLE_RE.match(fdef.title)
- assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and
- fdef.doc.strip() == fdef.doc), \
- "Invalid description for field '%s'" % fdef.name
- assert callable(fn)
- assert fdef.name not in result, \
- "Duplicate field name '%s' found" % fdef.name
- assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name
-
- result[fdef.name] = field
+ result = utils.SequenceToDict(fields, key=_FieldDictKey)
for alias, target in aliases:
assert alias not in result, "Alias %s overrides an existing field" % alias
doc=doc)
+def _StaticValueInner(value, ctx, _): # pylint: disable=W0613
+ """Returns a static value.
+
+ """
+ return value
+
+
+def _StaticValue(value):
+ """Prepares a function to return a static value.
+
+ """
+ return compat.partial(_StaticValueInner, value)
+
+
def _GetNodeRole(node, master_name):
"""Determine node role.
return lambda _, item: getter(item)
+def _GetItemMaybeAttr(attr):
+ """Returns a field function to return a not-None attribute of the item.
+
+ If the value is None, then C{_FS_UNAVAIL} will be returned instead.
+
+ @param attr: Attribute name
+
+ """
+ def _helper(_, obj):
+ val = getattr(obj, attr)
+ if val is None:
+ return _FS_UNAVAIL
+ else:
+ return val
+ return _helper
+
+
+def _GetNDParam(name):
+ """Return a field function to return an ND parameter out of the context.
+
+ """
+ def _helper(ctx, _):
+ if ctx.ndparams is None:
+ return _FS_UNAVAIL
+ else:
+ return ctx.ndparams.get(name, None)
+ return _helper
+
+
+def _BuildNDFields(is_group):
+ """Builds all the ndparam fields.
+
+ @param is_group: whether this is called at group or node level
+
+ """
+ if is_group:
+ field_kind = GQ_CONFIG
+ else:
+ field_kind = NQ_GROUP
+ return [(_MakeField("ndp/%s" % name,
+ constants.NDS_PARAMETER_TITLES.get(name,
+ "ndp/%s" % name),
+ _VTToQFT[kind], "The \"%s\" node parameter" % name),
+ field_kind, 0, _GetNDParam(name))
+ for name, kind in constants.NDS_PARAMETER_TYPES.items()]
+
+
def _ConvWrapInner(convert, fn, ctx, item):
"""Wrapper for converting values.
"""Data container for node data queries.
"""
- def __init__(self, nodes, live_data, master_name, node_to_primary,
- node_to_secondary, groups, oob_support, cluster):
+ def __init__(self, nodes, live_data, master_uuid, node_to_primary,
+ node_to_secondary, inst_uuid_to_inst_name, groups, oob_support,
+ cluster):
"""Initializes this class.
"""
self.nodes = nodes
self.live_data = live_data
- self.master_name = master_name
+ self.master_uuid = master_uuid
self.node_to_primary = node_to_primary
self.node_to_secondary = node_to_secondary
+ self.inst_uuid_to_inst_name = inst_uuid_to_inst_name
self.groups = groups
self.oob_support = oob_support
self.cluster = cluster
# Used for individual rows
self.curlive_data = None
+ self.ndparams = None
def __iter__(self):
"""Iterate over all nodes.
"""
for node in self.nodes:
+ group = self.groups.get(node.group, None)
+ if group is None:
+ self.ndparams = None
+ else:
+ self.ndparams = self.cluster.FillND(node, group)
if self.live_data:
- self.curlive_data = self.live_data.get(node.name, None)
+ self.curlive_data = self.live_data.get(node.uuid, None)
else:
self.curlive_data = None
yield node
" for detecting reboots by tracking changes"),
"cnodes": ("CNodes", QFT_NUMBER, "cpu_nodes",
"Number of NUMA domains on node (if exported by hypervisor)"),
+ "cnos": ("CNOs", QFT_NUMBER, "cpu_dom0",
+ "Number of logical processors used by the node OS (dom0 for Xen)"),
"csockets": ("CSockets", QFT_NUMBER, "cpu_sockets",
"Number of physical CPU sockets (if exported by hypervisor)"),
"ctotal": ("CTotal", QFT_NUMBER, "cpu_total", "Number of logical processors"),
- "dfree": ("DFree", QFT_UNIT, "vg_free",
- "Available disk space in volume group"),
- "dtotal": ("DTotal", QFT_UNIT, "vg_size",
- "Total disk space in volume group used for instance disk"
+ "dfree": ("DFree", QFT_UNIT, "storage_free",
+ "Available storage space in storage unit"),
+ "dtotal": ("DTotal", QFT_UNIT, "storage_size",
+ "Total storage space in storage unit used for instance disk"
" allocation"),
+ "spfree": ("SpFree", QFT_NUMBER, "spindles_free",
+ "Available spindles in volume group (exclusive storage only)"),
+ "sptotal": ("SpTotal", QFT_NUMBER, "spindles_total",
+ "Total spindles in volume group (exclusive storage only)"),
"mfree": ("MFree", QFT_UNIT, "memory_free",
"Memory available for instance allocations"),
"mnode": ("MNode", QFT_UNIT, "memory_dom0",
return fn
-def _GetNodeGroup(ctx, node, ng): # pylint: disable-msg=W0613
+def _GetNodeGroup(ctx, node, ng): # pylint: disable=W0613
"""Returns the name of a node's group.
@type ctx: L{NodeQueryData}
@param node: Node object
"""
- if ctx.oob_support[node.name]:
+ if ctx.oob_support[node.uuid]:
return node.powered
return _FS_UNAVAIL
if not ctx.curlive_data:
return _FS_NODATA
+ return _GetStatsField(field, kind, ctx.curlive_data)
+
+
+def _GetStatsField(field, kind, data):
+ """Gets a value from live statistics.
+
+ If the value is not found, L{_FS_UNAVAIL} is returned. If the field kind is
+ numeric a conversion to integer is attempted. If that fails, L{_FS_UNAVAIL}
+ is returned.
+
+ @param field: Live field name
+ @param kind: Data kind, one of L{constants.QFT_ALL}
+ @type data: dict
+ @param data: Statistics
+
+ """
try:
- value = ctx.curlive_data[field]
+ value = data[field]
except KeyError:
return _FS_UNAVAIL
return int(value)
except (ValueError, TypeError):
logging.exception("Failed to convert node field '%s' (value %r) to int",
- value, field)
+ field, value)
+ return _FS_UNAVAIL
+
+
+def _GetNodeHvState(_, node):
+ """Converts node's hypervisor state for query result.
+
+ """
+ hv_state = node.hv_state
+
+ if hv_state is None:
return _FS_UNAVAIL
+ return dict((name, value.ToDict()) for (name, value) in hv_state.items())
+
+
+def _GetNodeDiskState(_, node):
+ """Converts node's disk state for query result.
+
+ """
+ disk_state = node.disk_state
+
+ if disk_state is None:
+ return _FS_UNAVAIL
+
+ return dict((disk_kind, dict((name, value.ToDict())
+ for (name, value) in kind_state.items()))
+ for (disk_kind, kind_state) in disk_state.items())
+
def _BuildNodeFields():
"""Builds list of fields for node queries.
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), NQ_CONFIG, 0,
lambda ctx, node: list(node.GetTags())),
(_MakeField("master", "IsMaster", QFT_BOOL, "Whether node is master"),
- NQ_CONFIG, 0, lambda ctx, node: node.name == ctx.master_name),
+ NQ_CONFIG, 0, lambda ctx, node: node.uuid == ctx.master_uuid),
(_MakeField("group", "Group", QFT_TEXT, "Node group"), NQ_GROUP, 0,
_GetGroup(_GetNodeGroup)),
(_MakeField("group.uuid", "GroupUUID", QFT_TEXT, "UUID of node group"),
(_MakeField("custom_ndparams", "CustomNodeParameters", QFT_OTHER,
"Custom node parameters"),
NQ_GROUP, 0, _GetItemAttr("ndparams")),
+ (_MakeField("hv_state", "HypervisorState", QFT_OTHER, "Hypervisor state"),
+ NQ_CONFIG, 0, _GetNodeHvState),
+ (_MakeField("disk_state", "DiskState", QFT_OTHER, "Disk state"),
+ NQ_CONFIG, 0, _GetNodeDiskState),
]
+ fields.extend(_BuildNDFields(False))
+
# Node role
role_values = (constants.NR_MASTER, constants.NR_MCANDIDATE,
constants.NR_REGULAR, constants.NR_DRAINED,
constants.NR_OFFLINE)
role_doc = ("Node role; \"%s\" for master, \"%s\" for master candidate,"
- " \"%s\" for regular, \"%s\" for a drained, \"%s\" for offline" %
+ " \"%s\" for regular, \"%s\" for drained, \"%s\" for offline" %
role_values)
fields.append((_MakeField("role", "Role", QFT_TEXT, role_doc), NQ_CONFIG, 0,
- lambda ctx, node: _GetNodeRole(node, ctx.master_name)))
+ lambda ctx, node: _GetNodeRole(node, ctx.master_uuid)))
assert set(role_values) == constants.NR_ALL
def _GetLength(getter):
- return lambda ctx, node: len(getter(ctx)[node.name])
+ return lambda ctx, node: len(getter(ctx)[node.uuid])
def _GetList(getter):
- return lambda ctx, node: list(getter(ctx)[node.name])
+ return lambda ctx, node: utils.NiceSort(
+ [ctx.inst_uuid_to_inst_name[uuid]
+ for uuid in getter(ctx)[node.uuid]])
# Add fields operating on instance lists
for prefix, titleprefix, docword, getter in \
# Add simple fields
fields.extend([
(_MakeField(name, title, kind, doc), NQ_CONFIG, flags, _GetItemAttr(name))
- for (name, (title, kind, flags, doc)) in _NODE_SIMPLE_FIELDS.items()
- ])
+ for (name, (title, kind, flags, doc)) in _NODE_SIMPLE_FIELDS.items()])
# Add fields requiring live data
fields.extend([
(_MakeField(name, title, kind, doc), NQ_LIVE, 0,
compat.partial(_GetLiveNodeField, nfield, kind))
- for (name, (title, kind, nfield, doc)) in _NODE_LIVE_FIELDS.items()
- ])
+ for (name, (title, kind, nfield, doc)) in _NODE_LIVE_FIELDS.items()])
# Add timestamps
fields.extend(_GetItemTimestampFields(NQ_CONFIG))
"""Data container for instance data queries.
"""
- def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes,
- live_data, wrongnode_inst, console):
+ def __init__(self, instances, cluster, disk_usage, offline_node_uuids,
+ bad_node_uuids, live_data, wrongnode_inst, console, nodes,
+ groups, networks):
"""Initializes this class.
@param instances: List of instance objects
@param cluster: Cluster object
- @type disk_usage: dict; instance name as key
+ @type disk_usage: dict; instance UUID as key
@param disk_usage: Per-instance disk usage
- @type offline_nodes: list of strings
- @param offline_nodes: List of offline nodes
- @type bad_nodes: list of strings
- @param bad_nodes: List of faulty nodes
- @type live_data: dict; instance name as key
+ @type offline_node_uuids: list of strings
+ @param offline_node_uuids: List of offline nodes
+ @type bad_node_uuids: list of strings
+ @param bad_node_uuids: List of faulty nodes
+ @type live_data: dict; instance UUID as key
@param live_data: Per-instance live data
@type wrongnode_inst: set
@param wrongnode_inst: Set of instances running on wrong node(s)
- @type console: dict; instance name as key
+ @type console: dict; instance UUID as key
@param console: Per-instance console information
+ @type nodes: dict; node UUID as key
+ @param nodes: Node objects
+ @type networks: dict; net_uuid as key
+ @param networks: Network objects
"""
- assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \
+ assert len(set(bad_node_uuids) & set(offline_node_uuids)) == \
+ len(offline_node_uuids), \
"Offline nodes not included in bad nodes"
- assert not (set(live_data.keys()) & set(bad_nodes)), \
+ assert not (set(live_data.keys()) & set(bad_node_uuids)), \
"Found live data for bad or offline nodes"
self.instances = instances
self.cluster = cluster
self.disk_usage = disk_usage
- self.offline_nodes = offline_nodes
- self.bad_nodes = bad_nodes
+ self.offline_nodes = offline_node_uuids
+ self.bad_nodes = bad_node_uuids
self.live_data = live_data
self.wrongnode_inst = wrongnode_inst
self.console = console
+ self.nodes = nodes
+ self.groups = groups
+ self.networks = networks
# Used for individual rows
self.inst_hvparams = None
self.inst_beparams = None
+ self.inst_osparams = None
self.inst_nicparams = None
def __iter__(self):
for inst in self.instances:
self.inst_hvparams = self.cluster.FillHV(inst, skip_globals=True)
self.inst_beparams = self.cluster.FillBE(inst)
+ self.inst_osparams = self.cluster.SimpleFillOS(inst.os, inst.osparams)
self.inst_nicparams = [self.cluster.SimpleFillNIC(nic.nicparams)
for nic in inst.nics]
if inst.primary_node in ctx.bad_nodes:
return _FS_NODATA
else:
- return bool(ctx.live_data.get(inst.name))
+ return bool(ctx.live_data.get(inst.uuid))
def _GetInstLiveData(name):
# offline when we actually don't know due to missing data
return _FS_NODATA
- if inst.name in ctx.live_data:
- data = ctx.live_data[inst.name]
+ if inst.uuid in ctx.live_data:
+ data = ctx.live_data[inst.uuid]
if name in data:
return data[name]
if inst.primary_node in ctx.bad_nodes:
return constants.INSTST_NODEDOWN
- if bool(ctx.live_data.get(inst.name)):
- if inst.name in ctx.wrongnode_inst:
+ if bool(ctx.live_data.get(inst.uuid)):
+ if inst.uuid in ctx.wrongnode_inst:
return constants.INSTST_WRONGNODE
- elif inst.admin_up:
+ elif inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_RUNNING
else:
return constants.INSTST_ERRORUP
- if inst.admin_up:
+ if inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_ERRORDOWN
+ elif inst.admin_state == constants.ADMINST_DOWN:
+ return constants.INSTST_ADMINDOWN
- return constants.INSTST_ADMINDOWN
+ return constants.INSTST_ADMINOFFLINE
-def _GetInstDiskSize(index):
- """Build function for retrieving disk size.
+def _GetInstDisk(index, cb):
+ """Build function for calling another function with an instance Disk.
@type index: int
@param index: Disk index
+ @type cb: callable
+ @param cb: Callback
"""
- def fn(_, inst):
- """Get size of a disk.
+ def fn(ctx, inst):
+ """Call helper function with instance Disk.
+ @type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
- return inst.disks[index].size
+ nic = inst.disks[index]
except IndexError:
return _FS_UNAVAIL
+ return cb(ctx, index, nic)
+
return fn
+def _GetInstDiskSize(ctx, _, disk): # pylint: disable=W0613
+ """Get a Disk's size.
+
+ @type ctx: L{InstanceQueryData}
+ @type disk: L{objects.Disk}
+ @param disk: The Disk object
+
+ """
+ if disk.size is None:
+ return _FS_UNAVAIL
+ else:
+ return disk.size
+
+
+def _GetInstDiskSpindles(ctx, _, disk): # pylint: disable=W0613
+ """Get a Disk's spindles.
+
+ @type disk: L{objects.Disk}
+ @param disk: The Disk object
+
+ """
+ if disk.spindles is None:
+ return _FS_UNAVAIL
+ else:
+ return disk.spindles
+
+
+def _GetInstDeviceName(ctx, _, device): # pylint: disable=W0613
+ """Get a Device's Name.
+
+ @type ctx: L{InstanceQueryData}
+ @type device: L{objects.NIC} or L{objects.Disk}
+ @param device: The NIC or Disk object
+
+ """
+ if device.name is None:
+ return _FS_UNAVAIL
+ else:
+ return device.name
+
+
+def _GetInstDeviceUUID(ctx, _, device): # pylint: disable=W0613
+ """Get a Device's UUID.
+
+ @type ctx: L{InstanceQueryData}
+ @type device: L{objects.NIC} or L{objects.Disk}
+ @param device: The NIC or Disk object
+
+ """
+ if device.uuid is None:
+ return _FS_UNAVAIL
+ else:
+ return device.uuid
+
+
def _GetInstNic(index, cb):
"""Build function for calling another function with an instance NIC.
return fn
-def _GetInstNicIp(ctx, _, nic): # pylint: disable-msg=W0613
+def _GetInstNicNetworkName(ctx, _, nic): # pylint: disable=W0613
+ """Get a NIC's Network.
+
+ @type ctx: L{InstanceQueryData}
+ @type nic: L{objects.NIC}
+ @param nic: NIC object
+
+ """
+ if nic.network is None:
+ return _FS_UNAVAIL
+ else:
+ return ctx.networks[nic.network].name
+
+
+def _GetInstNicNetwork(ctx, _, nic): # pylint: disable=W0613
+ """Get a NIC's Network.
+
+ @type ctx: L{InstanceQueryData}
+ @type nic: L{objects.NIC}
+ @param nic: NIC object
+
+ """
+ if nic.network is None:
+ return _FS_UNAVAIL
+ else:
+ return nic.network
+
+
+def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's IP address.
@type ctx: L{InstanceQueryData}
return _FS_UNAVAIL
+def _GetInstAllNicNetworkNames(ctx, inst):
+ """Get all network names for an instance.
+
+ @type ctx: L{InstanceQueryData}
+ @type inst: L{objects.Instance}
+ @param inst: Instance object
+
+ """
+ result = []
+
+ for nic in inst.nics:
+ name = None
+ if nic.network:
+ name = ctx.networks[nic.network].name
+ result.append(name)
+
+ assert len(result) == len(inst.nics)
+
+ return result
+
+
def _GetInstAllNicBridges(ctx, inst):
"""Get all network bridges for an instance.
(_MakeField("nic.ips", "NIC_IPs", QFT_OTHER,
"List containing each network interface's IP address"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.ip for nic in inst.nics]),
+ (_MakeField("nic.names", "NIC_Names", QFT_OTHER,
+ "List containing each network interface's name"),
+ IQ_CONFIG, 0, lambda ctx, inst: [nic.name for nic in inst.nics]),
+ (_MakeField("nic.uuids", "NIC_UUIDs", QFT_OTHER,
+ "List containing each network interface's UUID"),
+ IQ_CONFIG, 0, lambda ctx, inst: [nic.uuid for nic in inst.nics]),
(_MakeField("nic.modes", "NIC_modes", QFT_OTHER,
"List containing each network interface's mode"), IQ_CONFIG, 0,
lambda ctx, inst: [nicp[constants.NIC_MODE]
(_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER,
"List containing each network interface's bridge"),
IQ_CONFIG, 0, _GetInstAllNicBridges),
+ (_MakeField("nic.networks", "NIC_networks", QFT_OTHER,
+ "List containing each interface's network"), IQ_CONFIG, 0,
+ lambda ctx, inst: [nic.network for nic in inst.nics]),
+ (_MakeField("nic.networks.names", "NIC_networks_names", QFT_OTHER,
+ "List containing each interface's network"),
+ IQ_NETWORKS, 0, _GetInstAllNicNetworkNames)
]
# NICs by number
(_MakeField("nic.mac/%s" % i, "NicMAC/%s" % i, QFT_TEXT,
"MAC address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mac_fn)),
+ (_MakeField("nic.name/%s" % i, "NicName/%s" % i, QFT_TEXT,
+ "Name address of %s network interface" % numtext),
+ IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceName)),
+ (_MakeField("nic.uuid/%s" % i, "NicUUID/%s" % i, QFT_TEXT,
+ "UUID address of %s network interface" % numtext),
+ IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceUUID)),
(_MakeField("nic.mode/%s" % i, "NicMode/%s" % i, QFT_TEXT,
"Mode of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mode_fn)),
(_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT,
"Bridge of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
+ (_MakeField("nic.network/%s" % i, "NicNetwork/%s" % i, QFT_TEXT,
+ "Network of %s network interface" % numtext),
+ IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicNetwork)),
+ (_MakeField("nic.network.name/%s" % i, "NicNetworkName/%s" % i, QFT_TEXT,
+ "Network name of %s network interface" % numtext),
+ IQ_NETWORKS, 0, _GetInstNic(i, _GetInstNicNetworkName)),
])
aliases = [
("bridge", "nic.bridge/0"),
("nic_mode", "nic.mode/0"),
("nic_link", "nic.link/0"),
+ ("nic_network", "nic.network/0"),
]
return (fields, aliases)
@param inst: Instance object
"""
- usage = ctx.disk_usage[inst.name]
+ usage = ctx.disk_usage[inst.uuid]
if usage is None:
usage = 0
@param inst: Instance object
"""
- consinfo = ctx.console[inst.name]
+ consinfo = ctx.console[inst.uuid]
if consinfo is None:
return _FS_UNAVAIL
IQ_CONFIG, 0, lambda ctx, inst: len(inst.disks)),
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER, "List of disk sizes"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.size for disk in inst.disks]),
+ (_MakeField("disk.spindles", "Disk_spindles", QFT_OTHER,
+ "List of disk spindles"),
+ IQ_CONFIG, 0, lambda ctx, inst: [disk.spindles for disk in inst.disks]),
+ (_MakeField("disk.names", "Disk_names", QFT_OTHER, "List of disk names"),
+ IQ_CONFIG, 0, lambda ctx, inst: [disk.name for disk in inst.disks]),
+ (_MakeField("disk.uuids", "Disk_UUIDs", QFT_OTHER, "List of disk UUIDs"),
+ IQ_CONFIG, 0, lambda ctx, inst: [disk.uuid for disk in inst.disks]),
]
# Disks by number
- fields.extend([
- (_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT,
- "Disk size of %s disk" % utils.FormatOrdinal(i + 1)),
- IQ_CONFIG, 0, _GetInstDiskSize(i))
- for i in range(constants.MAX_DISKS)
- ])
+ for i in range(constants.MAX_DISKS):
+ numtext = utils.FormatOrdinal(i + 1)
+ fields.extend([
+ (_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT,
+ "Disk size of %s disk" % numtext),
+ IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSize)),
+ (_MakeField("disk.spindles/%s" % i, "DiskSpindles/%s" % i, QFT_NUMBER,
+ "Spindles of %s disk" % numtext),
+ IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSpindles)),
+ (_MakeField("disk.name/%s" % i, "DiskName/%s" % i, QFT_TEXT,
+ "Name of %s disk" % numtext),
+ IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceName)),
+ (_MakeField("disk.uuid/%s" % i, "DiskUUID/%s" % i, QFT_TEXT,
+ "UUID of %s disk" % numtext),
+ IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceUUID))])
return fields
@return: List of field definitions used as input for L{_PrepareFieldList}
"""
- # TODO: Consider moving titles closer to constants
- be_title = {
- constants.BE_AUTO_BALANCE: "Auto_balance",
- constants.BE_MEMORY: "ConfigMemory",
- constants.BE_VCPUS: "ConfigVCPUs",
- }
-
- hv_title = {
- constants.HV_ACPI: "ACPI",
- constants.HV_BOOT_ORDER: "Boot_order",
- constants.HV_CDROM_IMAGE_PATH: "CDROM_image_path",
- constants.HV_DISK_TYPE: "Disk_type",
- constants.HV_INITRD_PATH: "Initrd_path",
- constants.HV_KERNEL_PATH: "Kernel_path",
- constants.HV_NIC_TYPE: "NIC_type",
- constants.HV_PAE: "PAE",
- constants.HV_VNC_BIND_ADDRESS: "VNC_bind_address",
- }
-
fields = [
# Filled parameters
(_MakeField("hvparams", "HypervisorParameters", QFT_OTHER,
- "Hypervisor parameters"),
+ "Hypervisor parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_hvparams),
(_MakeField("beparams", "BackendParameters", QFT_OTHER,
- "Backend parameters"),
+ "Backend parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_beparams),
+ (_MakeField("osparams", "OpSysParameters", QFT_OTHER,
+ "Operating system parameters (merged)"),
+ IQ_CONFIG, 0, lambda ctx, _: ctx.inst_osparams),
# Unfilled parameters
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER,
(_MakeField("custom_beparams", "CustomBackendParameters", QFT_OTHER,
"Custom backend parameters",),
IQ_CONFIG, 0, _GetItemAttr("beparams")),
+ (_MakeField("custom_osparams", "CustomOpSysParameters", QFT_OTHER,
+ "Custom operating system parameters",),
+ IQ_CONFIG, 0, _GetItemAttr("osparams")),
(_MakeField("custom_nicparams", "CustomNicParameters", QFT_OTHER,
"Custom network interface parameters"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.nicparams for nic in inst.nics]),
return lambda ctx, _: ctx.inst_hvparams.get(name, _FS_UNAVAIL)
fields.extend([
- (_MakeField("hv/%s" % name, hv_title.get(name, "hv/%s" % name),
+ (_MakeField("hv/%s" % name,
+ constants.HVS_PARAMETER_TITLES.get(name, "hv/%s" % name),
_VTToQFT[kind], "The \"%s\" hypervisor parameter" % name),
IQ_CONFIG, 0, _GetInstHvParam(name))
for name, kind in constants.HVS_PARAMETER_TYPES.items()
- if name not in constants.HVC_GLOBALS
- ])
+ if name not in constants.HVC_GLOBALS])
# BE params
def _GetInstBeParam(name):
return lambda ctx, _: ctx.inst_beparams.get(name, None)
fields.extend([
- (_MakeField("be/%s" % name, be_title.get(name, "be/%s" % name),
+ (_MakeField("be/%s" % name,
+ constants.BES_PARAMETER_TITLES.get(name, "be/%s" % name),
_VTToQFT[kind], "The \"%s\" backend parameter" % name),
IQ_CONFIG, 0, _GetInstBeParam(name))
- for name, kind in constants.BES_PARAMETER_TYPES.items()
- ])
+ for name, kind in constants.BES_PARAMETER_TYPES.items()])
return fields
}
+def _GetNodeName(ctx, default, node_uuid):
+ """Gets node name of a node.
+
+ @type ctx: L{InstanceQueryData}
+ @param default: Default value
+ @type node_uuid: string
+ @param node_uuid: Node UUID
+
+ """
+ try:
+ node = ctx.nodes[node_uuid]
+ except KeyError:
+ return default
+ else:
+ return node.name
+
+
+def _GetInstNodeGroup(ctx, default, node_uuid):
+ """Gets group UUID of an instance node.
+
+ @type ctx: L{InstanceQueryData}
+ @param default: Default value
+ @type node_uuid: string
+ @param node_uuid: Node UUID
+
+ """
+ try:
+ node = ctx.nodes[node_uuid]
+ except KeyError:
+ return default
+ else:
+ return node.group
+
+
+def _GetInstNodeGroupName(ctx, default, node_uuid):
+ """Gets group name of an instance node.
+
+ @type ctx: L{InstanceQueryData}
+ @param default: Default value
+ @type node_uuid: string
+ @param node_uuid: Node UUID
+
+ """
+ try:
+ node = ctx.nodes[node_uuid]
+ except KeyError:
+ return default
+
+ try:
+ group = ctx.groups[node.group]
+ except KeyError:
+ return default
+
+ return group.name
+
+
def _BuildInstanceFields():
"""Builds list of fields for instance queries.
"""
fields = [
(_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"),
- IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")),
+ IQ_NODES, QFF_HOSTNAME,
+ lambda ctx, inst: _GetNodeName(ctx, None, inst.primary_node)),
+ (_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT,
+ "Primary node's group"),
+ IQ_NODES, 0,
+ lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL,
+ inst.primary_node)),
+ (_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT,
+ "Primary node's group UUID"),
+ IQ_NODES, 0,
+ lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)),
# TODO: Allow filtering by secondary node as hostname
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
"Secondary nodes; usually this will just be one node"),
- IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)),
- (_MakeField("admin_state", "Autostart", QFT_BOOL,
- "Desired state of instance (if set, the instance should be"
- " up)"),
- IQ_CONFIG, 0, _GetItemAttr("admin_up")),
+ IQ_NODES, 0,
+ lambda ctx, inst: map(compat.partial(_GetNodeName, ctx, None),
+ inst.secondary_nodes)),
+ (_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
+ "Node groups of secondary nodes"),
+ IQ_NODES, 0,
+ lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None),
+ inst.secondary_nodes)),
+ (_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER,
+ "Node group UUIDs of secondary nodes"),
+ IQ_NODES, 0,
+ lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
+ inst.secondary_nodes)),
+ (_MakeField("admin_state", "InstanceState", QFT_TEXT,
+ "Desired state of instance"),
+ IQ_CONFIG, 0, _GetItemAttr("admin_state")),
+ (_MakeField("admin_up", "Autostart", QFT_BOOL,
+ "Desired state of instance"),
+ IQ_CONFIG, 0, lambda ctx, inst: inst.admin_state == constants.ADMINST_UP),
+ (_MakeField("disks_active", "DisksActive", QFT_BOOL,
+ "Desired state of instance disks"),
+ IQ_CONFIG, 0, _GetItemAttr("disks_active")),
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
lambda ctx, inst: list(inst.GetTags())),
(_MakeField("console", "Console", QFT_OTHER,
# Add simple fields
fields.extend([
(_MakeField(name, title, kind, doc), IQ_CONFIG, flags, _GetItemAttr(name))
- for (name, (title, kind, flags, doc)) in _INST_SIMPLE_FIELDS.items()
- ])
+ for (name, (title, kind, flags, doc)) in _INST_SIMPLE_FIELDS.items()])
# Fields requiring talking to the node
fields.extend([
status_values = (constants.INSTST_RUNNING, constants.INSTST_ADMINDOWN,
constants.INSTST_WRONGNODE, constants.INSTST_ERRORUP,
constants.INSTST_ERRORDOWN, constants.INSTST_NODEDOWN,
- constants.INSTST_NODEOFFLINE)
+ constants.INSTST_NODEOFFLINE, constants.INSTST_ADMINOFFLINE)
status_doc = ("Instance status; \"%s\" if instance is set to be running"
" and actually is, \"%s\" if instance is stopped and"
" is not running, \"%s\" if instance running, but not on its"
" designated primary node, \"%s\" if instance should be"
" stopped, but is actually running, \"%s\" if instance should"
" run, but doesn't, \"%s\" if instance's primary node is down,"
- " \"%s\" if instance's primary node is marked offline" %
- status_values)
+ " \"%s\" if instance's primary node is marked offline,"
+ " \"%s\" if instance is offline and does not use dynamic"
+ " resources" % status_values)
fields.append((_MakeField("status", "Status", QFT_TEXT, status_doc),
IQ_LIVE, 0, _GetInstStatus))
assert set(status_values) == constants.INSTST_ALL, \
aliases = [
("vcpus", "be/vcpus"),
+ ("be/memory", "be/maxmem"),
("sda_size", "disk.size/0"),
("sdb_size", "disk.size/1"),
] + network_aliases
"""Data container for node group data queries.
"""
- def __init__(self, groups, group_to_nodes, group_to_instances):
+ def __init__(self, cluster, groups, group_to_nodes, group_to_instances,
+ want_diskparams):
"""Initializes this class.
+ @param cluster: Cluster object
@param groups: List of node group objects
@type group_to_nodes: dict; group UUID as key
@param group_to_nodes: Per-group list of nodes
@type group_to_instances: dict; group UUID as key
@param group_to_instances: Per-group list of (primary) instances
+ @type want_diskparams: bool
+ @param want_diskparams: Whether diskparamters should be calculated
"""
self.groups = groups
self.group_to_nodes = group_to_nodes
self.group_to_instances = group_to_instances
+ self.cluster = cluster
+ self.want_diskparams = want_diskparams
+
+ # Used for individual rows
+ self.group_ipolicy = None
+ self.ndparams = None
+ self.group_dp = None
def __iter__(self):
"""Iterate over all node groups.
+ This function has side-effects and only one instance of the resulting
+ generator should be used at a time.
+
"""
- return iter(self.groups)
+ for group in self.groups:
+ self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy)
+ self.ndparams = self.cluster.SimpleFillND(group.ndparams)
+ if self.want_diskparams:
+ self.group_dp = self.cluster.SimpleFillDP(group.diskparams)
+ else:
+ self.group_dp = None
+ yield group
_GROUP_SIMPLE_FIELDS = {
"name": ("Group", QFT_TEXT, "Group name"),
"serial_no": ("SerialNo", QFT_NUMBER, _SERIAL_NO_DOC % "Group"),
"uuid": ("UUID", QFT_TEXT, "Group UUID"),
- "ndparams": ("NDParams", QFT_OTHER, "Node parameters"),
}
GQ_INST, 0, _GetSortedList(group_to_instances)),
])
+ # Other fields
+ fields.extend([
+ (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0,
+ lambda ctx, group: list(group.GetTags())),
+ (_MakeField("ipolicy", "InstancePolicy", QFT_OTHER,
+ "Instance policy limitations (merged)"),
+ GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy),
+ (_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER,
+ "Custom instance policy limitations"),
+ GQ_CONFIG, 0, _GetItemAttr("ipolicy")),
+ (_MakeField("custom_ndparams", "CustomNDParams", QFT_OTHER,
+ "Custom node parameters"),
+ GQ_CONFIG, 0, _GetItemAttr("ndparams")),
+ (_MakeField("ndparams", "NDParams", QFT_OTHER,
+ "Node parameters"),
+ GQ_CONFIG, 0, lambda ctx, _: ctx.ndparams),
+ (_MakeField("diskparams", "DiskParameters", QFT_OTHER,
+ "Disk parameters (merged)"),
+ GQ_DISKPARAMS, 0, lambda ctx, _: ctx.group_dp),
+ (_MakeField("custom_diskparams", "CustomDiskParameters", QFT_OTHER,
+ "Custom disk parameters"),
+ GQ_CONFIG, 0, _GetItemAttr("diskparams")),
+ ])
+
+ # ND parameters
+ fields.extend(_BuildNDFields(True))
+
fields.extend(_GetItemTimestampFields(GQ_CONFIG))
return _PrepareFieldList(fields, [])
None, 0, _ConvWrap(sorted, _GetItemAttr("api_versions"))),
(_MakeField("parameters", "Parameters", QFT_OTHER,
"Operating system parameters"),
- None, 0, _ConvWrap(utils.NiceSort, _GetItemAttr("parameters"))),
+ None, 0, _ConvWrap(compat.partial(utils.NiceSort, key=compat.fst),
+ _GetItemAttr("parameters"))),
+ (_MakeField("node_status", "NodeStatus", QFT_OTHER,
+ "Status from node"),
+ None, 0, _GetItemAttr("node_status")),
+ ]
+
+ return _PrepareFieldList(fields, [])
+
+
+class ExtStorageInfo(objects.ConfigObject):
+ __slots__ = [
+ "name",
+ "node_status",
+ "nodegroup_status",
+ "parameters",
+ ]
+
+
+def _BuildExtStorageFields():
+ """Builds list of fields for extstorage provider queries.
+
+ """
+ fields = [
+ (_MakeField("name", "Name", QFT_TEXT, "ExtStorage provider name"),
+ None, 0, _GetItemAttr("name")),
(_MakeField("node_status", "NodeStatus", QFT_OTHER,
"Status from node"),
None, 0, _GetItemAttr("node_status")),
+ (_MakeField("nodegroup_status", "NodegroupStatus", QFT_OTHER,
+ "Overall Nodegroup status"),
+ None, 0, _GetItemAttr("nodegroup_status")),
+ (_MakeField("parameters", "Parameters", QFT_OTHER,
+ "ExtStorage provider parameters"),
+ None, 0, _GetItemAttr("parameters")),
+ ]
+
+ return _PrepareFieldList(fields, [])
+
+
+def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613
+ """Return L{_FS_UNAVAIL} if job is None.
+
+ When listing specifc jobs (e.g. "gnt-job list 1 2 3"), a job may not be
+ found, in which case this function converts it to L{_FS_UNAVAIL}.
+
+ """
+ if job is None:
+ return _FS_UNAVAIL
+ else:
+ return fn(job)
+
+
+def _JobUnavail(inner):
+ """Wrapper for L{_JobUnavailInner}.
+
+ """
+ return compat.partial(_JobUnavailInner, inner)
+
+
+def _PerJobOpInner(fn, job):
+ """Executes a function per opcode in a job.
+
+ """
+ return map(fn, job.ops)
+
+
+def _PerJobOp(fn):
+ """Wrapper for L{_PerJobOpInner}.
+
+ """
+ return _JobUnavail(compat.partial(_PerJobOpInner, fn))
+
+
+def _JobTimestampInner(fn, job):
+ """Converts unavailable timestamp to L{_FS_UNAVAIL}.
+
+ """
+ timestamp = fn(job)
+
+ if timestamp is None:
+ return _FS_UNAVAIL
+ else:
+ return timestamp
+
+
+def _JobTimestamp(fn):
+ """Wrapper for L{_JobTimestampInner}.
+
+ """
+ return _JobUnavail(compat.partial(_JobTimestampInner, fn))
+
+
+def _BuildJobFields():
+ """Builds list of fields for job queries.
+
+ """
+ fields = [
+ (_MakeField("id", "ID", QFT_NUMBER, "Job ID"),
+ None, QFF_JOB_ID, lambda _, (job_id, job): job_id),
+ (_MakeField("status", "Status", QFT_TEXT, "Job status"),
+ None, 0, _JobUnavail(lambda job: job.CalcStatus())),
+ (_MakeField("priority", "Priority", QFT_NUMBER,
+ ("Current job priority (%s to %s)" %
+ (constants.OP_PRIO_LOWEST, constants.OP_PRIO_HIGHEST))),
+ None, 0, _JobUnavail(lambda job: job.CalcPriority())),
+ (_MakeField("archived", "Archived", QFT_BOOL, "Whether job is archived"),
+ JQ_ARCHIVED, 0, lambda _, (job_id, job): job.archived),
+ (_MakeField("ops", "OpCodes", QFT_OTHER, "List of all opcodes"),
+ None, 0, _PerJobOp(lambda op: op.input.__getstate__())),
+ (_MakeField("opresult", "OpCode_result", QFT_OTHER,
+ "List of opcodes results"),
+ None, 0, _PerJobOp(operator.attrgetter("result"))),
+ (_MakeField("opstatus", "OpCode_status", QFT_OTHER,
+ "List of opcodes status"),
+ None, 0, _PerJobOp(operator.attrgetter("status"))),
+ (_MakeField("oplog", "OpCode_log", QFT_OTHER,
+ "List of opcode output logs"),
+ None, 0, _PerJobOp(operator.attrgetter("log"))),
+ (_MakeField("opstart", "OpCode_start", QFT_OTHER,
+ "List of opcode start timestamps (before acquiring locks)"),
+ None, 0, _PerJobOp(operator.attrgetter("start_timestamp"))),
+ (_MakeField("opexec", "OpCode_exec", QFT_OTHER,
+ "List of opcode execution start timestamps (after acquiring"
+ " locks)"),
+ None, 0, _PerJobOp(operator.attrgetter("exec_timestamp"))),
+ (_MakeField("opend", "OpCode_end", QFT_OTHER,
+ "List of opcode execution end timestamps"),
+ None, 0, _PerJobOp(operator.attrgetter("end_timestamp"))),
+ (_MakeField("oppriority", "OpCode_prio", QFT_OTHER,
+ "List of opcode priorities"),
+ None, 0, _PerJobOp(operator.attrgetter("priority"))),
+ (_MakeField("summary", "Summary", QFT_OTHER,
+ "List of per-opcode summaries"),
+ None, 0, _PerJobOp(lambda op: op.input.Summary())),
+ ]
+
+ # Timestamp fields
+ for (name, attr, title, desc) in [
+ ("received_ts", "received_timestamp", "Received",
+ "Timestamp of when job was received"),
+ ("start_ts", "start_timestamp", "Start", "Timestamp of job start"),
+ ("end_ts", "end_timestamp", "End", "Timestamp of job end"),
+ ]:
+ getter = operator.attrgetter(attr)
+ fields.extend([
+ (_MakeField(name, title, QFT_OTHER,
+ "%s (tuple containing seconds and microseconds)" % desc),
+ None, QFF_SPLIT_TIMESTAMP, _JobTimestamp(getter)),
+ ])
+
+ return _PrepareFieldList(fields, [])
+
+
+def _GetExportName(_, (node_name, expname)): # pylint: disable=W0613
+ """Returns an export name if available.
+
+ """
+ if expname is None:
+ return _FS_NODATA
+ else:
+ return expname
+
+
+def _BuildExportFields():
+ """Builds list of fields for exports.
+
+ """
+ fields = [
+ (_MakeField("node", "Node", QFT_TEXT, "Node name"),
+ None, QFF_HOSTNAME, lambda _, (node_name, expname): node_name),
+ (_MakeField("export", "Export", QFT_TEXT, "Export name"),
+ None, 0, _GetExportName),
]
return _PrepareFieldList(fields, [])
+_CLUSTER_VERSION_FIELDS = {
+ "software_version": ("SoftwareVersion", QFT_TEXT, constants.RELEASE_VERSION,
+ "Software version"),
+ "protocol_version": ("ProtocolVersion", QFT_NUMBER,
+ constants.PROTOCOL_VERSION,
+ "RPC protocol version"),
+ "config_version": ("ConfigVersion", QFT_NUMBER, constants.CONFIG_VERSION,
+ "Configuration format version"),
+ "os_api_version": ("OsApiVersion", QFT_NUMBER, max(constants.OS_API_VERSIONS),
+ "API version for OS template scripts"),
+ "export_version": ("ExportVersion", QFT_NUMBER, constants.EXPORT_VERSION,
+ "Import/export file format version"),
+ "vcs_version": ("VCSVersion", QFT_TEXT, constants.VCS_VERSION,
+ "VCS version"),
+ }
+
+
+_CLUSTER_SIMPLE_FIELDS = {
+ "cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"),
+ "volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"),
+ }
+
+
+class ClusterQueryData:
+ def __init__(self, cluster, nodes, drain_flag, watcher_pause):
+ """Initializes this class.
+
+ @type cluster: L{objects.Cluster}
+ @param cluster: Instance of cluster object
+ @type nodes: dict; node UUID as key
+ @param nodes: Node objects
+ @type drain_flag: bool
+ @param drain_flag: Whether job queue is drained
+ @type watcher_pause: number
+ @param watcher_pause: Until when watcher is paused (Unix timestamp)
+
+ """
+ self._cluster = cluster
+ self.nodes = nodes
+ self.drain_flag = drain_flag
+ self.watcher_pause = watcher_pause
+
+ def __iter__(self):
+ return iter([self._cluster])
+
+
+def _ClusterWatcherPause(ctx, _):
+ """Returns until when watcher is paused (if available).
+
+ """
+ if ctx.watcher_pause is None:
+ return _FS_UNAVAIL
+ else:
+ return ctx.watcher_pause
+
+
+def _BuildClusterFields():
+ """Builds list of fields for cluster information.
+
+ """
+ fields = [
+ (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), CQ_CONFIG, 0,
+ lambda ctx, cluster: list(cluster.GetTags())),
+ (_MakeField("architecture", "ArchInfo", QFT_OTHER,
+ "Architecture information"), None, 0,
+ lambda ctx, _: runtime.GetArchInfo()),
+ (_MakeField("drain_flag", "QueueDrained", QFT_BOOL,
+ "Flag whether job queue is drained"), CQ_QUEUE_DRAINED, 0,
+ lambda ctx, _: ctx.drain_flag),
+ (_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP,
+ "Until when watcher is paused"), CQ_WATCHER_PAUSE, 0,
+ _ClusterWatcherPause),
+ (_MakeField("master_node", "Master", QFT_TEXT, "Master node name"),
+ CQ_CONFIG, QFF_HOSTNAME,
+ lambda ctx, cluster: _GetNodeName(ctx, None, cluster.master_node)),
+ ]
+
+ # Simple fields
+ fields.extend([
+ (_MakeField(name, title, kind, doc), CQ_CONFIG, flags, _GetItemAttr(name))
+ for (name, (title, kind, flags, doc)) in _CLUSTER_SIMPLE_FIELDS.items()
+ ],)
+
+ # Version fields
+ fields.extend([
+ (_MakeField(name, title, kind, doc), None, 0, _StaticValue(value))
+ for (name, (title, kind, value, doc)) in _CLUSTER_VERSION_FIELDS.items()])
+
+ # Add timestamps
+ fields.extend(_GetItemTimestampFields(CQ_CONFIG))
+
+ return _PrepareFieldList(fields, [
+ ("name", "cluster_name")])
+
+
+class NetworkQueryData:
+ """Data container for network data queries.
+
+ """
+ def __init__(self, networks, network_to_groups,
+ network_to_instances, stats):
+ """Initializes this class.
+
+ @param networks: List of network objects
+ @type network_to_groups: dict; network UUID as key
+ @param network_to_groups: Per-network list of groups
+ @type network_to_instances: dict; network UUID as key
+ @param network_to_instances: Per-network list of instances
+ @type stats: dict; network UUID as key
+ @param stats: Per-network usage statistics
+
+ """
+ self.networks = networks
+ self.network_to_groups = network_to_groups
+ self.network_to_instances = network_to_instances
+ self.stats = stats
+
+ def __iter__(self):
+ """Iterate over all networks.
+
+ """
+ for net in self.networks:
+ if self.stats:
+ self.curstats = self.stats.get(net.uuid, None)
+ else:
+ self.curstats = None
+ yield net
+
+
+_NETWORK_SIMPLE_FIELDS = {
+ "name": ("Network", QFT_TEXT, 0, "Name"),
+ "network": ("Subnet", QFT_TEXT, 0, "IPv4 subnet"),
+ "gateway": ("Gateway", QFT_OTHER, 0, "IPv4 gateway"),
+ "network6": ("IPv6Subnet", QFT_OTHER, 0, "IPv6 subnet"),
+ "gateway6": ("IPv6Gateway", QFT_OTHER, 0, "IPv6 gateway"),
+ "mac_prefix": ("MacPrefix", QFT_OTHER, 0, "MAC address prefix"),
+ "serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Network"),
+ "uuid": ("UUID", QFT_TEXT, 0, "Network UUID"),
+ }
+
+
+_NETWORK_STATS_FIELDS = {
+ "free_count": ("FreeCount", QFT_NUMBER, 0, "Number of available addresses"),
+ "reserved_count":
+ ("ReservedCount", QFT_NUMBER, 0, "Number of reserved addresses"),
+ "map": ("Map", QFT_TEXT, 0, "Actual mapping"),
+ "external_reservations":
+ ("ExternalReservations", QFT_TEXT, 0, "External reservations"),
+ }
+
+
+def _GetNetworkStatsField(field, kind, ctx, _):
+ """Gets the value of a "stats" field from L{NetworkQueryData}.
+
+ @param field: Field name
+ @param kind: Data kind, one of L{constants.QFT_ALL}
+ @type ctx: L{NetworkQueryData}
+
+ """
+ return _GetStatsField(field, kind, ctx.curstats)
+
+
+def _BuildNetworkFields():
+ """Builds list of fields for network queries.
+
+ """
+ fields = [
+ (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
+ lambda ctx, inst: list(inst.GetTags())),
+ ]
+
+ # Add simple fields
+ fields.extend([
+ (_MakeField(name, title, kind, doc),
+ NETQ_CONFIG, 0, _GetItemMaybeAttr(name))
+ for (name, (title, kind, _, doc)) in _NETWORK_SIMPLE_FIELDS.items()])
+
+ def _GetLength(getter):
+ return lambda ctx, network: len(getter(ctx)[network.uuid])
+
+ def _GetSortedList(getter):
+ return lambda ctx, network: utils.NiceSort(getter(ctx)[network.uuid])
+
+ network_to_groups = operator.attrgetter("network_to_groups")
+ network_to_instances = operator.attrgetter("network_to_instances")
+
+ # Add fields for node groups
+ fields.extend([
+ (_MakeField("group_cnt", "NodeGroups", QFT_NUMBER, "Number of nodegroups"),
+ NETQ_GROUP, 0, _GetLength(network_to_groups)),
+ (_MakeField("group_list", "GroupList", QFT_OTHER,
+ "List of nodegroups (group name, NIC mode, NIC link)"),
+ NETQ_GROUP, 0, lambda ctx, network: network_to_groups(ctx)[network.uuid]),
+ ])
+
+ # Add fields for instances
+ fields.extend([
+ (_MakeField("inst_cnt", "Instances", QFT_NUMBER, "Number of instances"),
+ NETQ_INST, 0, _GetLength(network_to_instances)),
+ (_MakeField("inst_list", "InstanceList", QFT_OTHER, "List of instances"),
+ NETQ_INST, 0, _GetSortedList(network_to_instances)),
+ ])
+
+ # Add fields for usage statistics
+ fields.extend([
+ (_MakeField(name, title, kind, doc), NETQ_STATS, 0,
+ compat.partial(_GetNetworkStatsField, name, kind))
+ for (name, (title, kind, _, doc)) in _NETWORK_STATS_FIELDS.items()])
+
+ # Add timestamps
+ fields.extend(_GetItemTimestampFields(IQ_NETWORKS))
+
+ return _PrepareFieldList(fields, [])
+
+#: Fields for cluster information
+CLUSTER_FIELDS = _BuildClusterFields()
+
#: Fields available for node queries
NODE_FIELDS = _BuildNodeFields()
#: Fields available for operating system queries
OS_FIELDS = _BuildOsFields()
+#: Fields available for extstorage provider queries
+EXTSTORAGE_FIELDS = _BuildExtStorageFields()
+
+#: Fields available for job queries
+JOB_FIELDS = _BuildJobFields()
+
+#: Fields available for exports
+EXPORT_FIELDS = _BuildExportFields()
+
+#: Fields available for network queries
+NETWORK_FIELDS = _BuildNetworkFields()
+
#: All available resources
ALL_FIELDS = {
+ constants.QR_CLUSTER: CLUSTER_FIELDS,
constants.QR_INSTANCE: INSTANCE_FIELDS,
constants.QR_NODE: NODE_FIELDS,
constants.QR_LOCK: LOCK_FIELDS,
constants.QR_GROUP: GROUP_FIELDS,
constants.QR_OS: OS_FIELDS,
+ constants.QR_EXTSTORAGE: EXTSTORAGE_FIELDS,
+ constants.QR_JOB: JOB_FIELDS,
+ constants.QR_EXPORT: EXPORT_FIELDS,
+ constants.QR_NETWORK: NETWORK_FIELDS,
}
#: All available field lists