root / lib / query.py @ b5f0b5cc
History | View | Annotate | Download (72.1 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2010, 2011, 2012 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module for query operations
|
23 |
|
24 |
How it works:
|
25 |
|
26 |
- Add field definitions
|
27 |
- See how L{NODE_FIELDS} is built
|
28 |
- Each field gets:
|
29 |
- Query field definition (L{objects.QueryFieldDefinition}, use
|
30 |
L{_MakeField} for creating), containing:
|
31 |
- Name, must be lowercase and match L{FIELD_NAME_RE}
|
32 |
- Title for tables, must not contain whitespace and match
|
33 |
L{TITLE_RE}
|
34 |
- Value data type, e.g. L{constants.QFT_NUMBER}
|
35 |
- Human-readable description, must not end with punctuation or
|
36 |
contain newlines
|
37 |
- Data request type, see e.g. C{NQ_*}
|
38 |
- OR-ed flags, see C{QFF_*}
|
39 |
- A retrieval function, see L{Query.__init__} for description
|
40 |
- Pass list of fields through L{_PrepareFieldList} for preparation and
|
41 |
checks
|
42 |
- Instantiate L{Query} with prepared field list definition and selected fields
|
43 |
- Call L{Query.RequestedData} to determine what data to collect/compute
|
44 |
- Call L{Query.Query} or L{Query.OldStyleQuery} with collected data and use
|
45 |
result
|
46 |
- Data container must support iteration using C{__iter__}
|
47 |
- Items are passed to retrieval functions and can have any format
|
48 |
- Call L{Query.GetFields} to get list of definitions for selected fields
|
49 |
|
50 |
@attention: Retrieval functions must be idempotent. They can be called multiple
|
51 |
times, in any order and any number of times.
|
52 |
|
53 |
"""
|
54 |
|
55 |
import logging |
56 |
import operator |
57 |
import re |
58 |
|
59 |
from ganeti import constants |
60 |
from ganeti import errors |
61 |
from ganeti import utils |
62 |
from ganeti import compat |
63 |
from ganeti import objects |
64 |
from ganeti import ht |
65 |
from ganeti import runtime |
66 |
from ganeti import qlang |
67 |
from ganeti import jstore |
68 |
|
69 |
from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER, |
70 |
QFT_UNIT, QFT_TIMESTAMP, QFT_OTHER, |
71 |
RS_NORMAL, RS_UNKNOWN, RS_NODATA, |
72 |
RS_UNAVAIL, RS_OFFLINE) |
73 |
|
74 |
|
75 |
# Constants for requesting data from the caller/data provider. Each property
|
76 |
# collected/computed separately by the data provider should have its own to
|
77 |
# only collect the requested data and not more.
|
78 |
|
79 |
(NQ_CONFIG, |
80 |
NQ_INST, |
81 |
NQ_LIVE, |
82 |
NQ_GROUP, |
83 |
NQ_OOB) = range(1, 6) |
84 |
|
85 |
(IQ_CONFIG, |
86 |
IQ_LIVE, |
87 |
IQ_DISKUSAGE, |
88 |
IQ_CONSOLE, |
89 |
IQ_NODES) = range(100, 105) |
90 |
|
91 |
(LQ_MODE, |
92 |
LQ_OWNER, |
93 |
LQ_PENDING) = range(10, 13) |
94 |
|
95 |
(GQ_CONFIG, |
96 |
GQ_NODE, |
97 |
GQ_INST, |
98 |
GQ_DISKPARAMS) = range(200, 204) |
99 |
|
100 |
(CQ_CONFIG, |
101 |
CQ_QUEUE_DRAINED, |
102 |
CQ_WATCHER_PAUSE) = range(300, 303) |
103 |
|
104 |
# Query field flags
|
105 |
QFF_HOSTNAME = 0x01
|
106 |
QFF_IP_ADDRESS = 0x02
|
107 |
QFF_JOB_ID = 0x04
|
108 |
QFF_SPLIT_TIMESTAMP = 0x08
|
109 |
# Next values: 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
|
110 |
QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS | QFF_JOB_ID | QFF_SPLIT_TIMESTAMP) |
111 |
|
112 |
FIELD_NAME_RE = re.compile(r"^[a-z0-9/._]+$")
|
113 |
TITLE_RE = re.compile(r"^[^\s]+$")
|
114 |
DOC_RE = re.compile(r"^[A-Z].*[^.,?!]$")
|
115 |
|
116 |
#: Verification function for each field type
|
117 |
_VERIFY_FN = { |
118 |
QFT_UNKNOWN: ht.TNone, |
119 |
QFT_TEXT: ht.TString, |
120 |
QFT_BOOL: ht.TBool, |
121 |
QFT_NUMBER: ht.TInt, |
122 |
QFT_UNIT: ht.TInt, |
123 |
QFT_TIMESTAMP: ht.TNumber, |
124 |
QFT_OTHER: lambda _: True, |
125 |
} |
126 |
|
127 |
# Unique objects for special field statuses
|
128 |
_FS_UNKNOWN = object()
|
129 |
_FS_NODATA = object()
|
130 |
_FS_UNAVAIL = object()
|
131 |
_FS_OFFLINE = object()
|
132 |
|
133 |
#: List of all special status
|
134 |
_FS_ALL = frozenset([_FS_UNKNOWN, _FS_NODATA, _FS_UNAVAIL, _FS_OFFLINE])
|
135 |
|
136 |
#: VType to QFT mapping
|
137 |
_VTToQFT = { |
138 |
# TODO: fix validation of empty strings
|
139 |
constants.VTYPE_STRING: QFT_OTHER, # since VTYPE_STRINGs can be empty
|
140 |
constants.VTYPE_MAYBE_STRING: QFT_OTHER, |
141 |
constants.VTYPE_BOOL: QFT_BOOL, |
142 |
constants.VTYPE_SIZE: QFT_UNIT, |
143 |
constants.VTYPE_INT: QFT_NUMBER, |
144 |
} |
145 |
|
146 |
_SERIAL_NO_DOC = "%s object serial number, incremented on each modification"
|
147 |
|
148 |
|
149 |
def _GetUnknownField(ctx, item): # pylint: disable=W0613 |
150 |
"""Gets the contents of an unknown field.
|
151 |
|
152 |
"""
|
153 |
return _FS_UNKNOWN
|
154 |
|
155 |
|
156 |
def _GetQueryFields(fielddefs, selected): |
157 |
"""Calculates the internal list of selected fields.
|
158 |
|
159 |
Unknown fields are returned as L{constants.QFT_UNKNOWN}.
|
160 |
|
161 |
@type fielddefs: dict
|
162 |
@param fielddefs: Field definitions
|
163 |
@type selected: list of strings
|
164 |
@param selected: List of selected fields
|
165 |
|
166 |
"""
|
167 |
result = [] |
168 |
|
169 |
for name in selected: |
170 |
try:
|
171 |
fdef = fielddefs[name] |
172 |
except KeyError: |
173 |
fdef = (_MakeField(name, name, QFT_UNKNOWN, "Unknown field '%s'" % name),
|
174 |
None, 0, _GetUnknownField) |
175 |
|
176 |
assert len(fdef) == 4 |
177 |
|
178 |
result.append(fdef) |
179 |
|
180 |
return result
|
181 |
|
182 |
|
183 |
def GetAllFields(fielddefs): |
184 |
"""Extract L{objects.QueryFieldDefinition} from field definitions.
|
185 |
|
186 |
@rtype: list of L{objects.QueryFieldDefinition}
|
187 |
|
188 |
"""
|
189 |
return [fdef for (fdef, _, _, _) in fielddefs] |
190 |
|
191 |
|
192 |
class _FilterHints: |
193 |
"""Class for filter analytics.
|
194 |
|
195 |
When filters are used, the user of the L{Query} class usually doesn't know
|
196 |
exactly which items will be necessary for building the result. It therefore
|
197 |
has to prepare and compute the input data for potentially returning
|
198 |
everything.
|
199 |
|
200 |
There are two ways to optimize this. The first, and simpler, is to assign
|
201 |
each field a group of data, so that the caller can determine which
|
202 |
computations are necessary depending on the data groups requested. The list
|
203 |
of referenced groups must also be computed for fields referenced in the
|
204 |
filter.
|
205 |
|
206 |
The second is restricting the items based on a primary key. The primary key
|
207 |
is usually a unique name (e.g. a node name). This class extracts all
|
208 |
referenced names from a filter. If it encounters any filter condition which
|
209 |
disallows such a list to be determined (e.g. a non-equality filter), all
|
210 |
names will be requested.
|
211 |
|
212 |
The end-effect is that any operation other than L{qlang.OP_OR} and
|
213 |
L{qlang.OP_EQUAL} will make the query more expensive.
|
214 |
|
215 |
"""
|
216 |
def __init__(self, namefield): |
217 |
"""Initializes this class.
|
218 |
|
219 |
@type namefield: string
|
220 |
@param namefield: Field caller is interested in
|
221 |
|
222 |
"""
|
223 |
self._namefield = namefield
|
224 |
|
225 |
#: Whether all names need to be requested (e.g. if a non-equality operator
|
226 |
#: has been used)
|
227 |
self._allnames = False |
228 |
|
229 |
#: Which names to request
|
230 |
self._names = None |
231 |
|
232 |
#: Data kinds referenced by the filter (used by L{Query.RequestedData})
|
233 |
self._datakinds = set() |
234 |
|
235 |
def RequestedNames(self): |
236 |
"""Returns all requested values.
|
237 |
|
238 |
Returns C{None} if list of values can't be determined (e.g. encountered
|
239 |
non-equality operators).
|
240 |
|
241 |
@rtype: list
|
242 |
|
243 |
"""
|
244 |
if self._allnames or self._names is None: |
245 |
return None |
246 |
|
247 |
return utils.UniqueSequence(self._names) |
248 |
|
249 |
def ReferencedData(self): |
250 |
"""Returns all kinds of data referenced by the filter.
|
251 |
|
252 |
"""
|
253 |
return frozenset(self._datakinds) |
254 |
|
255 |
def _NeedAllNames(self): |
256 |
"""Changes internal state to request all names.
|
257 |
|
258 |
"""
|
259 |
self._allnames = True |
260 |
self._names = None |
261 |
|
262 |
def NoteLogicOp(self, op): |
263 |
"""Called when handling a logic operation.
|
264 |
|
265 |
@type op: string
|
266 |
@param op: Operator
|
267 |
|
268 |
"""
|
269 |
if op != qlang.OP_OR:
|
270 |
self._NeedAllNames()
|
271 |
|
272 |
def NoteUnaryOp(self, op): # pylint: disable=W0613 |
273 |
"""Called when handling an unary operation.
|
274 |
|
275 |
@type op: string
|
276 |
@param op: Operator
|
277 |
|
278 |
"""
|
279 |
self._NeedAllNames()
|
280 |
|
281 |
def NoteBinaryOp(self, op, datakind, name, value): |
282 |
"""Called when handling a binary operation.
|
283 |
|
284 |
@type op: string
|
285 |
@param op: Operator
|
286 |
@type name: string
|
287 |
@param name: Left-hand side of operator (field name)
|
288 |
@param value: Right-hand side of operator
|
289 |
|
290 |
"""
|
291 |
if datakind is not None: |
292 |
self._datakinds.add(datakind)
|
293 |
|
294 |
if self._allnames: |
295 |
return
|
296 |
|
297 |
# If any operator other than equality was used, all names need to be
|
298 |
# retrieved
|
299 |
if op == qlang.OP_EQUAL and name == self._namefield: |
300 |
if self._names is None: |
301 |
self._names = []
|
302 |
self._names.append(value)
|
303 |
else:
|
304 |
self._NeedAllNames()
|
305 |
|
306 |
|
307 |
def _WrapLogicOp(op_fn, sentences, ctx, item): |
308 |
"""Wrapper for logic operator functions.
|
309 |
|
310 |
"""
|
311 |
return op_fn(fn(ctx, item) for fn in sentences) |
312 |
|
313 |
|
314 |
def _WrapUnaryOp(op_fn, inner, ctx, item): |
315 |
"""Wrapper for unary operator functions.
|
316 |
|
317 |
"""
|
318 |
return op_fn(inner(ctx, item))
|
319 |
|
320 |
|
321 |
def _WrapBinaryOp(op_fn, retrieval_fn, value, ctx, item): |
322 |
"""Wrapper for binary operator functions.
|
323 |
|
324 |
"""
|
325 |
return op_fn(retrieval_fn(ctx, item), value)
|
326 |
|
327 |
|
328 |
def _WrapNot(fn, lhs, rhs): |
329 |
"""Negates the result of a wrapped function.
|
330 |
|
331 |
"""
|
332 |
return not fn(lhs, rhs) |
333 |
|
334 |
|
335 |
def _PrepareRegex(pattern): |
336 |
"""Compiles a regular expression.
|
337 |
|
338 |
"""
|
339 |
try:
|
340 |
return re.compile(pattern)
|
341 |
except re.error, err:
|
342 |
raise errors.ParameterError("Invalid regex pattern (%s)" % err) |
343 |
|
344 |
|
345 |
def _PrepareSplitTimestamp(value): |
346 |
"""Prepares a value for comparison by L{_MakeSplitTimestampComparison}.
|
347 |
|
348 |
"""
|
349 |
if ht.TNumber(value):
|
350 |
return value
|
351 |
else:
|
352 |
return utils.MergeTime(value)
|
353 |
|
354 |
|
355 |
def _MakeSplitTimestampComparison(fn): |
356 |
"""Compares split timestamp values after converting to float.
|
357 |
|
358 |
"""
|
359 |
return lambda lhs, rhs: fn(utils.MergeTime(lhs), rhs) |
360 |
|
361 |
|
362 |
def _MakeComparisonChecks(fn): |
363 |
"""Prepares flag-specific comparisons using a comparison function.
|
364 |
|
365 |
"""
|
366 |
return [
|
367 |
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(fn), |
368 |
_PrepareSplitTimestamp), |
369 |
(QFF_JOB_ID, lambda lhs, rhs: fn(jstore.ParseJobId(lhs), rhs),
|
370 |
jstore.ParseJobId), |
371 |
(None, fn, None), |
372 |
] |
373 |
|
374 |
|
375 |
class _FilterCompilerHelper: |
376 |
"""Converts a query filter to a callable usable for filtering.
|
377 |
|
378 |
"""
|
379 |
# String statement has no effect, pylint: disable=W0105
|
380 |
|
381 |
#: How deep filters can be nested
|
382 |
_LEVELS_MAX = 10
|
383 |
|
384 |
# Unique identifiers for operator groups
|
385 |
(_OPTYPE_LOGIC, |
386 |
_OPTYPE_UNARY, |
387 |
_OPTYPE_BINARY) = range(1, 4) |
388 |
|
389 |
"""Functions for equality checks depending on field flags.
|
390 |
|
391 |
List of tuples containing flags and a callable receiving the left- and
|
392 |
right-hand side of the operator. The flags are an OR-ed value of C{QFF_*}
|
393 |
(e.g. L{QFF_HOSTNAME} or L{QFF_SPLIT_TIMESTAMP}).
|
394 |
|
395 |
Order matters. The first item with flags will be used. Flags are checked
|
396 |
using binary AND.
|
397 |
|
398 |
"""
|
399 |
_EQUALITY_CHECKS = [ |
400 |
(QFF_HOSTNAME, |
401 |
lambda lhs, rhs: utils.MatchNameComponent(rhs, [lhs],
|
402 |
case_sensitive=False),
|
403 |
None),
|
404 |
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(operator.eq), |
405 |
_PrepareSplitTimestamp), |
406 |
(None, operator.eq, None), |
407 |
] |
408 |
|
409 |
"""Known operators
|
410 |
|
411 |
Operator as key (C{qlang.OP_*}), value a tuple of operator group
|
412 |
(C{_OPTYPE_*}) and a group-specific value:
|
413 |
|
414 |
- C{_OPTYPE_LOGIC}: Callable taking any number of arguments; used by
|
415 |
L{_HandleLogicOp}
|
416 |
- C{_OPTYPE_UNARY}: Always C{None}; details handled by L{_HandleUnaryOp}
|
417 |
- C{_OPTYPE_BINARY}: Callable taking exactly two parameters, the left- and
|
418 |
right-hand side of the operator, used by L{_HandleBinaryOp}
|
419 |
|
420 |
"""
|
421 |
_OPS = { |
422 |
# Logic operators
|
423 |
qlang.OP_OR: (_OPTYPE_LOGIC, compat.any), |
424 |
qlang.OP_AND: (_OPTYPE_LOGIC, compat.all), |
425 |
|
426 |
# Unary operators
|
427 |
qlang.OP_NOT: (_OPTYPE_UNARY, None),
|
428 |
qlang.OP_TRUE: (_OPTYPE_UNARY, None),
|
429 |
|
430 |
# Binary operators
|
431 |
qlang.OP_EQUAL: (_OPTYPE_BINARY, _EQUALITY_CHECKS), |
432 |
qlang.OP_NOT_EQUAL: |
433 |
(_OPTYPE_BINARY, [(flags, compat.partial(_WrapNot, fn), valprepfn) |
434 |
for (flags, fn, valprepfn) in _EQUALITY_CHECKS]), |
435 |
qlang.OP_LT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.lt)), |
436 |
qlang.OP_LE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.le)), |
437 |
qlang.OP_GT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.gt)), |
438 |
qlang.OP_GE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.ge)), |
439 |
qlang.OP_REGEXP: (_OPTYPE_BINARY, [ |
440 |
(None, lambda lhs, rhs: rhs.search(lhs), _PrepareRegex), |
441 |
]), |
442 |
qlang.OP_CONTAINS: (_OPTYPE_BINARY, [ |
443 |
(None, operator.contains, None), |
444 |
]), |
445 |
} |
446 |
|
447 |
def __init__(self, fields): |
448 |
"""Initializes this class.
|
449 |
|
450 |
@param fields: Field definitions (return value of L{_PrepareFieldList})
|
451 |
|
452 |
"""
|
453 |
self._fields = fields
|
454 |
self._hints = None |
455 |
self._op_handler = None |
456 |
|
457 |
def __call__(self, hints, qfilter): |
458 |
"""Converts a query filter into a callable function.
|
459 |
|
460 |
@type hints: L{_FilterHints} or None
|
461 |
@param hints: Callbacks doing analysis on filter
|
462 |
@type qfilter: list
|
463 |
@param qfilter: Filter structure
|
464 |
@rtype: callable
|
465 |
@return: Function receiving context and item as parameters, returning
|
466 |
boolean as to whether item matches filter
|
467 |
|
468 |
"""
|
469 |
self._op_handler = {
|
470 |
self._OPTYPE_LOGIC:
|
471 |
(self._HandleLogicOp, getattr(hints, "NoteLogicOp", None)), |
472 |
self._OPTYPE_UNARY:
|
473 |
(self._HandleUnaryOp, getattr(hints, "NoteUnaryOp", None)), |
474 |
self._OPTYPE_BINARY:
|
475 |
(self._HandleBinaryOp, getattr(hints, "NoteBinaryOp", None)), |
476 |
} |
477 |
|
478 |
try:
|
479 |
filter_fn = self._Compile(qfilter, 0) |
480 |
finally:
|
481 |
self._op_handler = None |
482 |
|
483 |
return filter_fn
|
484 |
|
485 |
def _Compile(self, qfilter, level): |
486 |
"""Inner function for converting filters.
|
487 |
|
488 |
Calls the correct handler functions for the top-level operator. This
|
489 |
function is called recursively (e.g. for logic operators).
|
490 |
|
491 |
"""
|
492 |
if not (isinstance(qfilter, (list, tuple)) and qfilter): |
493 |
raise errors.ParameterError("Invalid filter on level %s" % level) |
494 |
|
495 |
# Limit recursion
|
496 |
if level >= self._LEVELS_MAX: |
497 |
raise errors.ParameterError("Only up to %s levels are allowed (filter" |
498 |
" nested too deep)" % self._LEVELS_MAX) |
499 |
|
500 |
# Create copy to be modified
|
501 |
operands = qfilter[:] |
502 |
op = operands.pop(0)
|
503 |
|
504 |
try:
|
505 |
(kind, op_data) = self._OPS[op]
|
506 |
except KeyError: |
507 |
raise errors.ParameterError("Unknown operator '%s'" % op) |
508 |
|
509 |
(handler, hints_cb) = self._op_handler[kind]
|
510 |
|
511 |
return handler(hints_cb, level, op, op_data, operands)
|
512 |
|
513 |
def _LookupField(self, name): |
514 |
"""Returns a field definition by name.
|
515 |
|
516 |
"""
|
517 |
try:
|
518 |
return self._fields[name] |
519 |
except KeyError: |
520 |
raise errors.ParameterError("Unknown field '%s'" % name) |
521 |
|
522 |
def _HandleLogicOp(self, hints_fn, level, op, op_fn, operands): |
523 |
"""Handles logic operators.
|
524 |
|
525 |
@type hints_fn: callable
|
526 |
@param hints_fn: Callback doing some analysis on the filter
|
527 |
@type level: integer
|
528 |
@param level: Current depth
|
529 |
@type op: string
|
530 |
@param op: Operator
|
531 |
@type op_fn: callable
|
532 |
@param op_fn: Function implementing operator
|
533 |
@type operands: list
|
534 |
@param operands: List of operands
|
535 |
|
536 |
"""
|
537 |
if hints_fn:
|
538 |
hints_fn(op) |
539 |
|
540 |
return compat.partial(_WrapLogicOp, op_fn,
|
541 |
[self._Compile(op, level + 1) for op in operands]) |
542 |
|
543 |
def _HandleUnaryOp(self, hints_fn, level, op, op_fn, operands): |
544 |
"""Handles unary operators.
|
545 |
|
546 |
@type hints_fn: callable
|
547 |
@param hints_fn: Callback doing some analysis on the filter
|
548 |
@type level: integer
|
549 |
@param level: Current depth
|
550 |
@type op: string
|
551 |
@param op: Operator
|
552 |
@type op_fn: callable
|
553 |
@param op_fn: Function implementing operator
|
554 |
@type operands: list
|
555 |
@param operands: List of operands
|
556 |
|
557 |
"""
|
558 |
assert op_fn is None |
559 |
|
560 |
if hints_fn:
|
561 |
hints_fn(op) |
562 |
|
563 |
if len(operands) != 1: |
564 |
raise errors.ParameterError("Unary operator '%s' expects exactly one" |
565 |
" operand" % op)
|
566 |
|
567 |
if op == qlang.OP_TRUE:
|
568 |
(_, _, _, retrieval_fn) = self._LookupField(operands[0]) |
569 |
|
570 |
op_fn = operator.truth |
571 |
arg = retrieval_fn |
572 |
elif op == qlang.OP_NOT:
|
573 |
op_fn = operator.not_ |
574 |
arg = self._Compile(operands[0], level + 1) |
575 |
else:
|
576 |
raise errors.ProgrammerError("Can't handle operator '%s'" % op) |
577 |
|
578 |
return compat.partial(_WrapUnaryOp, op_fn, arg)
|
579 |
|
580 |
def _HandleBinaryOp(self, hints_fn, level, op, op_data, operands): |
581 |
"""Handles binary operators.
|
582 |
|
583 |
@type hints_fn: callable
|
584 |
@param hints_fn: Callback doing some analysis on the filter
|
585 |
@type level: integer
|
586 |
@param level: Current depth
|
587 |
@type op: string
|
588 |
@param op: Operator
|
589 |
@param op_data: Functions implementing operators
|
590 |
@type operands: list
|
591 |
@param operands: List of operands
|
592 |
|
593 |
"""
|
594 |
# Unused arguments, pylint: disable=W0613
|
595 |
try:
|
596 |
(name, value) = operands |
597 |
except (ValueError, TypeError): |
598 |
raise errors.ParameterError("Invalid binary operator, expected exactly" |
599 |
" two operands")
|
600 |
|
601 |
(fdef, datakind, field_flags, retrieval_fn) = self._LookupField(name)
|
602 |
|
603 |
assert fdef.kind != QFT_UNKNOWN
|
604 |
|
605 |
# TODO: Type conversions?
|
606 |
|
607 |
verify_fn = _VERIFY_FN[fdef.kind] |
608 |
if not verify_fn(value): |
609 |
raise errors.ParameterError("Unable to compare field '%s' (type '%s')" |
610 |
" with '%s', expected %s" %
|
611 |
(name, fdef.kind, value.__class__.__name__, |
612 |
verify_fn)) |
613 |
|
614 |
if hints_fn:
|
615 |
hints_fn(op, datakind, name, value) |
616 |
|
617 |
for (fn_flags, fn, valprepfn) in op_data: |
618 |
if fn_flags is None or fn_flags & field_flags: |
619 |
# Prepare value if necessary (e.g. compile regular expression)
|
620 |
if valprepfn:
|
621 |
value = valprepfn(value) |
622 |
|
623 |
return compat.partial(_WrapBinaryOp, fn, retrieval_fn, value)
|
624 |
|
625 |
raise errors.ProgrammerError("Unable to find operator implementation" |
626 |
" (op '%s', flags %s)" % (op, field_flags))
|
627 |
|
628 |
|
629 |
def _CompileFilter(fields, hints, qfilter): |
630 |
"""Converts a query filter into a callable function.
|
631 |
|
632 |
See L{_FilterCompilerHelper} for details.
|
633 |
|
634 |
@rtype: callable
|
635 |
|
636 |
"""
|
637 |
return _FilterCompilerHelper(fields)(hints, qfilter)
|
638 |
|
639 |
|
640 |
class Query: |
641 |
def __init__(self, fieldlist, selected, qfilter=None, namefield=None): |
642 |
"""Initializes this class.
|
643 |
|
644 |
The field definition is a dictionary with the field's name as a key and a
|
645 |
tuple containing, in order, the field definition object
|
646 |
(L{objects.QueryFieldDefinition}, the data kind to help calling code
|
647 |
collect data and a retrieval function. The retrieval function is called
|
648 |
with two parameters, in order, the data container and the item in container
|
649 |
(see L{Query.Query}).
|
650 |
|
651 |
Users of this class can call L{RequestedData} before preparing the data
|
652 |
container to determine what data is needed.
|
653 |
|
654 |
@type fieldlist: dictionary
|
655 |
@param fieldlist: Field definitions
|
656 |
@type selected: list of strings
|
657 |
@param selected: List of selected fields
|
658 |
|
659 |
"""
|
660 |
assert namefield is None or namefield in fieldlist |
661 |
|
662 |
self._fields = _GetQueryFields(fieldlist, selected)
|
663 |
|
664 |
self._filter_fn = None |
665 |
self._requested_names = None |
666 |
self._filter_datakinds = frozenset() |
667 |
|
668 |
if qfilter is not None: |
669 |
# Collect requested names if wanted
|
670 |
if namefield:
|
671 |
hints = _FilterHints(namefield) |
672 |
else:
|
673 |
hints = None
|
674 |
|
675 |
# Build filter function
|
676 |
self._filter_fn = _CompileFilter(fieldlist, hints, qfilter)
|
677 |
if hints:
|
678 |
self._requested_names = hints.RequestedNames()
|
679 |
self._filter_datakinds = hints.ReferencedData()
|
680 |
|
681 |
if namefield is None: |
682 |
self._name_fn = None |
683 |
else:
|
684 |
(_, _, _, self._name_fn) = fieldlist[namefield]
|
685 |
|
686 |
def RequestedNames(self): |
687 |
"""Returns all names referenced in the filter.
|
688 |
|
689 |
If there is no filter or operators are preventing determining the exact
|
690 |
names, C{None} is returned.
|
691 |
|
692 |
"""
|
693 |
return self._requested_names |
694 |
|
695 |
def RequestedData(self): |
696 |
"""Gets requested kinds of data.
|
697 |
|
698 |
@rtype: frozenset
|
699 |
|
700 |
"""
|
701 |
return (self._filter_datakinds | |
702 |
frozenset(datakind for (_, datakind, _, _) in self._fields |
703 |
if datakind is not None)) |
704 |
|
705 |
def GetFields(self): |
706 |
"""Returns the list of fields for this query.
|
707 |
|
708 |
Includes unknown fields.
|
709 |
|
710 |
@rtype: List of L{objects.QueryFieldDefinition}
|
711 |
|
712 |
"""
|
713 |
return GetAllFields(self._fields) |
714 |
|
715 |
def Query(self, ctx, sort_by_name=True): |
716 |
"""Execute a query.
|
717 |
|
718 |
@param ctx: Data container passed to field retrieval functions, must
|
719 |
support iteration using C{__iter__}
|
720 |
@type sort_by_name: boolean
|
721 |
@param sort_by_name: Whether to sort by name or keep the input data's
|
722 |
ordering
|
723 |
|
724 |
"""
|
725 |
sort = (self._name_fn and sort_by_name) |
726 |
|
727 |
result = [] |
728 |
|
729 |
for idx, item in enumerate(ctx): |
730 |
if not (self._filter_fn is None or self._filter_fn(ctx, item)): |
731 |
continue
|
732 |
|
733 |
row = [_ProcessResult(fn(ctx, item)) for (_, _, _, fn) in self._fields] |
734 |
|
735 |
# Verify result
|
736 |
if __debug__:
|
737 |
_VerifyResultRow(self._fields, row)
|
738 |
|
739 |
if sort:
|
740 |
(status, name) = _ProcessResult(self._name_fn(ctx, item))
|
741 |
assert status == constants.RS_NORMAL
|
742 |
# TODO: Are there cases where we wouldn't want to use NiceSort?
|
743 |
# Answer: if the name field is non-string...
|
744 |
result.append((utils.NiceSortKey(name), idx, row)) |
745 |
else:
|
746 |
result.append(row) |
747 |
|
748 |
if not sort: |
749 |
return result
|
750 |
|
751 |
# TODO: Would "heapq" be more efficient than sorting?
|
752 |
|
753 |
# Sorting in-place instead of using "sorted()"
|
754 |
result.sort() |
755 |
|
756 |
assert not result or (len(result[0]) == 3 and len(result[-1]) == 3) |
757 |
|
758 |
return map(operator.itemgetter(2), result) |
759 |
|
760 |
def OldStyleQuery(self, ctx, sort_by_name=True): |
761 |
"""Query with "old" query result format.
|
762 |
|
763 |
See L{Query.Query} for arguments.
|
764 |
|
765 |
"""
|
766 |
unknown = set(fdef.name for (fdef, _, _, _) in self._fields |
767 |
if fdef.kind == QFT_UNKNOWN)
|
768 |
if unknown:
|
769 |
raise errors.OpPrereqError("Unknown output fields selected: %s" % |
770 |
(utils.CommaJoin(unknown), ), |
771 |
errors.ECODE_INVAL) |
772 |
|
773 |
return [[value for (_, value) in row] |
774 |
for row in self.Query(ctx, sort_by_name=sort_by_name)] |
775 |
|
776 |
|
777 |
def _ProcessResult(value): |
778 |
"""Converts result values into externally-visible ones.
|
779 |
|
780 |
"""
|
781 |
if value is _FS_UNKNOWN: |
782 |
return (RS_UNKNOWN, None) |
783 |
elif value is _FS_NODATA: |
784 |
return (RS_NODATA, None) |
785 |
elif value is _FS_UNAVAIL: |
786 |
return (RS_UNAVAIL, None) |
787 |
elif value is _FS_OFFLINE: |
788 |
return (RS_OFFLINE, None) |
789 |
else:
|
790 |
return (RS_NORMAL, value)
|
791 |
|
792 |
|
793 |
def _VerifyResultRow(fields, row): |
794 |
"""Verifies the contents of a query result row.
|
795 |
|
796 |
@type fields: list
|
797 |
@param fields: Field definitions for result
|
798 |
@type row: list of tuples
|
799 |
@param row: Row data
|
800 |
|
801 |
"""
|
802 |
assert len(row) == len(fields) |
803 |
errs = [] |
804 |
for ((status, value), (fdef, _, _, _)) in zip(row, fields): |
805 |
if status == RS_NORMAL:
|
806 |
if not _VERIFY_FN[fdef.kind](value): |
807 |
errs.append("normal field %s fails validation (value is %s)" %
|
808 |
(fdef.name, value)) |
809 |
elif value is not None: |
810 |
errs.append("abnormal field %s has a non-None value" % fdef.name)
|
811 |
assert not errs, ("Failed validation: %s in row %s" % |
812 |
(utils.CommaJoin(errs), row)) |
813 |
|
814 |
|
815 |
def _FieldDictKey((fdef, _, flags, fn)): |
816 |
"""Generates key for field dictionary.
|
817 |
|
818 |
"""
|
819 |
assert fdef.name and fdef.title, "Name and title are required" |
820 |
assert FIELD_NAME_RE.match(fdef.name)
|
821 |
assert TITLE_RE.match(fdef.title)
|
822 |
assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and |
823 |
fdef.doc.strip() == fdef.doc), \ |
824 |
"Invalid description for field '%s'" % fdef.name
|
825 |
assert callable(fn) |
826 |
assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name |
827 |
|
828 |
return fdef.name
|
829 |
|
830 |
|
831 |
def _PrepareFieldList(fields, aliases): |
832 |
"""Prepares field list for use by L{Query}.
|
833 |
|
834 |
Converts the list to a dictionary and does some verification.
|
835 |
|
836 |
@type fields: list of tuples; (L{objects.QueryFieldDefinition}, data
|
837 |
kind, retrieval function)
|
838 |
@param fields: List of fields, see L{Query.__init__} for a better
|
839 |
description
|
840 |
@type aliases: list of tuples; (alias, target)
|
841 |
@param aliases: list of tuples containing aliases; for each
|
842 |
alias/target pair, a duplicate will be created in the field list
|
843 |
@rtype: dict
|
844 |
@return: Field dictionary for L{Query}
|
845 |
|
846 |
"""
|
847 |
if __debug__:
|
848 |
duplicates = utils.FindDuplicates(fdef.title.lower() |
849 |
for (fdef, _, _, _) in fields) |
850 |
assert not duplicates, "Duplicate title(s) found: %r" % duplicates |
851 |
|
852 |
result = utils.SequenceToDict(fields, key=_FieldDictKey) |
853 |
|
854 |
for alias, target in aliases: |
855 |
assert alias not in result, "Alias %s overrides an existing field" % alias |
856 |
assert target in result, "Missing target %s for alias %s" % (target, alias) |
857 |
(fdef, k, flags, fn) = result[target] |
858 |
fdef = fdef.Copy() |
859 |
fdef.name = alias |
860 |
result[alias] = (fdef, k, flags, fn) |
861 |
|
862 |
assert len(result) == len(fields) + len(aliases) |
863 |
assert compat.all(name == fdef.name
|
864 |
for (name, (fdef, _, _, _)) in result.items()) |
865 |
|
866 |
return result
|
867 |
|
868 |
|
869 |
def GetQueryResponse(query, ctx, sort_by_name=True): |
870 |
"""Prepares the response for a query.
|
871 |
|
872 |
@type query: L{Query}
|
873 |
@param ctx: Data container, see L{Query.Query}
|
874 |
@type sort_by_name: boolean
|
875 |
@param sort_by_name: Whether to sort by name or keep the input data's
|
876 |
ordering
|
877 |
|
878 |
"""
|
879 |
return objects.QueryResponse(data=query.Query(ctx, sort_by_name=sort_by_name),
|
880 |
fields=query.GetFields()).ToDict() |
881 |
|
882 |
|
883 |
def QueryFields(fielddefs, selected): |
884 |
"""Returns list of available fields.
|
885 |
|
886 |
@type fielddefs: dict
|
887 |
@param fielddefs: Field definitions
|
888 |
@type selected: list of strings
|
889 |
@param selected: List of selected fields
|
890 |
@return: List of L{objects.QueryFieldDefinition}
|
891 |
|
892 |
"""
|
893 |
if selected is None: |
894 |
# Client requests all fields, sort by name
|
895 |
fdefs = utils.NiceSort(GetAllFields(fielddefs.values()), |
896 |
key=operator.attrgetter("name"))
|
897 |
else:
|
898 |
# Keep order as requested by client
|
899 |
fdefs = Query(fielddefs, selected).GetFields() |
900 |
|
901 |
return objects.QueryFieldsResponse(fields=fdefs).ToDict()
|
902 |
|
903 |
|
904 |
def _MakeField(name, title, kind, doc): |
905 |
"""Wrapper for creating L{objects.QueryFieldDefinition} instances.
|
906 |
|
907 |
@param name: Field name as a regular expression
|
908 |
@param title: Human-readable title
|
909 |
@param kind: Field type
|
910 |
@param doc: Human-readable description
|
911 |
|
912 |
"""
|
913 |
return objects.QueryFieldDefinition(name=name, title=title, kind=kind,
|
914 |
doc=doc) |
915 |
|
916 |
|
917 |
def _StaticValueInner(value, ctx, _): # pylint: disable=W0613 |
918 |
"""Returns a static value.
|
919 |
|
920 |
"""
|
921 |
return value
|
922 |
|
923 |
|
924 |
def _StaticValue(value): |
925 |
"""Prepares a function to return a static value.
|
926 |
|
927 |
"""
|
928 |
return compat.partial(_StaticValueInner, value)
|
929 |
|
930 |
|
931 |
def _GetNodeRole(node, master_name): |
932 |
"""Determine node role.
|
933 |
|
934 |
@type node: L{objects.Node}
|
935 |
@param node: Node object
|
936 |
@type master_name: string
|
937 |
@param master_name: Master node name
|
938 |
|
939 |
"""
|
940 |
if node.name == master_name:
|
941 |
return constants.NR_MASTER
|
942 |
elif node.master_candidate:
|
943 |
return constants.NR_MCANDIDATE
|
944 |
elif node.drained:
|
945 |
return constants.NR_DRAINED
|
946 |
elif node.offline:
|
947 |
return constants.NR_OFFLINE
|
948 |
else:
|
949 |
return constants.NR_REGULAR
|
950 |
|
951 |
|
952 |
def _GetItemAttr(attr): |
953 |
"""Returns a field function to return an attribute of the item.
|
954 |
|
955 |
@param attr: Attribute name
|
956 |
|
957 |
"""
|
958 |
getter = operator.attrgetter(attr) |
959 |
return lambda _, item: getter(item) |
960 |
|
961 |
|
962 |
def _GetNDParam(name): |
963 |
"""Return a field function to return an ND parameter out of the context.
|
964 |
|
965 |
"""
|
966 |
def _helper(ctx, _): |
967 |
if ctx.ndparams is None: |
968 |
return _FS_UNAVAIL
|
969 |
else:
|
970 |
return ctx.ndparams.get(name, None) |
971 |
return _helper
|
972 |
|
973 |
|
974 |
def _BuildNDFields(is_group): |
975 |
"""Builds all the ndparam fields.
|
976 |
|
977 |
@param is_group: whether this is called at group or node level
|
978 |
|
979 |
"""
|
980 |
if is_group:
|
981 |
field_kind = GQ_CONFIG |
982 |
else:
|
983 |
field_kind = NQ_GROUP |
984 |
return [(_MakeField("ndp/%s" % name, |
985 |
constants.NDS_PARAMETER_TITLES.get(name, |
986 |
"ndp/%s" % name),
|
987 |
_VTToQFT[kind], "The \"%s\" node parameter" % name),
|
988 |
field_kind, 0, _GetNDParam(name))
|
989 |
for name, kind in constants.NDS_PARAMETER_TYPES.items()] |
990 |
|
991 |
|
992 |
def _ConvWrapInner(convert, fn, ctx, item): |
993 |
"""Wrapper for converting values.
|
994 |
|
995 |
@param convert: Conversion function receiving value as single parameter
|
996 |
@param fn: Retrieval function
|
997 |
|
998 |
"""
|
999 |
value = fn(ctx, item) |
1000 |
|
1001 |
# Is the value an abnormal status?
|
1002 |
if compat.any(value is fs for fs in _FS_ALL): |
1003 |
# Return right away
|
1004 |
return value
|
1005 |
|
1006 |
# TODO: Should conversion function also receive context, item or both?
|
1007 |
return convert(value)
|
1008 |
|
1009 |
|
1010 |
def _ConvWrap(convert, fn): |
1011 |
"""Convenience wrapper for L{_ConvWrapInner}.
|
1012 |
|
1013 |
@param convert: Conversion function receiving value as single parameter
|
1014 |
@param fn: Retrieval function
|
1015 |
|
1016 |
"""
|
1017 |
return compat.partial(_ConvWrapInner, convert, fn)
|
1018 |
|
1019 |
|
1020 |
def _GetItemTimestamp(getter): |
1021 |
"""Returns function for getting timestamp of item.
|
1022 |
|
1023 |
@type getter: callable
|
1024 |
@param getter: Function to retrieve timestamp attribute
|
1025 |
|
1026 |
"""
|
1027 |
def fn(_, item): |
1028 |
"""Returns a timestamp of item.
|
1029 |
|
1030 |
"""
|
1031 |
timestamp = getter(item) |
1032 |
if timestamp is None: |
1033 |
# Old configs might not have all timestamps
|
1034 |
return _FS_UNAVAIL
|
1035 |
else:
|
1036 |
return timestamp
|
1037 |
|
1038 |
return fn
|
1039 |
|
1040 |
|
1041 |
def _GetItemTimestampFields(datatype): |
1042 |
"""Returns common timestamp fields.
|
1043 |
|
1044 |
@param datatype: Field data type for use by L{Query.RequestedData}
|
1045 |
|
1046 |
"""
|
1047 |
return [
|
1048 |
(_MakeField("ctime", "CTime", QFT_TIMESTAMP, "Creation timestamp"), |
1049 |
datatype, 0, _GetItemTimestamp(operator.attrgetter("ctime"))), |
1050 |
(_MakeField("mtime", "MTime", QFT_TIMESTAMP, "Modification timestamp"), |
1051 |
datatype, 0, _GetItemTimestamp(operator.attrgetter("mtime"))), |
1052 |
] |
1053 |
|
1054 |
|
1055 |
class NodeQueryData: |
1056 |
"""Data container for node data queries.
|
1057 |
|
1058 |
"""
|
1059 |
def __init__(self, nodes, live_data, master_name, node_to_primary, |
1060 |
node_to_secondary, groups, oob_support, cluster): |
1061 |
"""Initializes this class.
|
1062 |
|
1063 |
"""
|
1064 |
self.nodes = nodes
|
1065 |
self.live_data = live_data
|
1066 |
self.master_name = master_name
|
1067 |
self.node_to_primary = node_to_primary
|
1068 |
self.node_to_secondary = node_to_secondary
|
1069 |
self.groups = groups
|
1070 |
self.oob_support = oob_support
|
1071 |
self.cluster = cluster
|
1072 |
|
1073 |
# Used for individual rows
|
1074 |
self.curlive_data = None |
1075 |
self.ndparams = None |
1076 |
|
1077 |
def __iter__(self): |
1078 |
"""Iterate over all nodes.
|
1079 |
|
1080 |
This function has side-effects and only one instance of the resulting
|
1081 |
generator should be used at a time.
|
1082 |
|
1083 |
"""
|
1084 |
for node in self.nodes: |
1085 |
group = self.groups.get(node.group, None) |
1086 |
if group is None: |
1087 |
self.ndparams = None |
1088 |
else:
|
1089 |
self.ndparams = self.cluster.FillND(node, group) |
1090 |
if self.live_data: |
1091 |
self.curlive_data = self.live_data.get(node.name, None) |
1092 |
else:
|
1093 |
self.curlive_data = None |
1094 |
yield node
|
1095 |
|
1096 |
|
1097 |
#: Fields that are direct attributes of an L{objects.Node} object
|
1098 |
_NODE_SIMPLE_FIELDS = { |
1099 |
"drained": ("Drained", QFT_BOOL, 0, "Whether node is drained"), |
1100 |
"master_candidate": ("MasterC", QFT_BOOL, 0, |
1101 |
"Whether node is a master candidate"),
|
1102 |
"master_capable": ("MasterCapable", QFT_BOOL, 0, |
1103 |
"Whether node can become a master candidate"),
|
1104 |
"name": ("Node", QFT_TEXT, QFF_HOSTNAME, "Node name"), |
1105 |
"offline": ("Offline", QFT_BOOL, 0, "Whether node is marked offline"), |
1106 |
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Node"), |
1107 |
"uuid": ("UUID", QFT_TEXT, 0, "Node UUID"), |
1108 |
"vm_capable": ("VMCapable", QFT_BOOL, 0, "Whether node can host instances"), |
1109 |
} |
1110 |
|
1111 |
|
1112 |
#: Fields requiring talking to the node
|
1113 |
# Note that none of these are available for non-vm_capable nodes
|
1114 |
_NODE_LIVE_FIELDS = { |
1115 |
"bootid": ("BootID", QFT_TEXT, "bootid", |
1116 |
"Random UUID renewed for each system reboot, can be used"
|
1117 |
" for detecting reboots by tracking changes"),
|
1118 |
"cnodes": ("CNodes", QFT_NUMBER, "cpu_nodes", |
1119 |
"Number of NUMA domains on node (if exported by hypervisor)"),
|
1120 |
"csockets": ("CSockets", QFT_NUMBER, "cpu_sockets", |
1121 |
"Number of physical CPU sockets (if exported by hypervisor)"),
|
1122 |
"ctotal": ("CTotal", QFT_NUMBER, "cpu_total", "Number of logical processors"), |
1123 |
"dfree": ("DFree", QFT_UNIT, "vg_free", |
1124 |
"Available disk space in volume group"),
|
1125 |
"dtotal": ("DTotal", QFT_UNIT, "vg_size", |
1126 |
"Total disk space in volume group used for instance disk"
|
1127 |
" allocation"),
|
1128 |
"mfree": ("MFree", QFT_UNIT, "memory_free", |
1129 |
"Memory available for instance allocations"),
|
1130 |
"mnode": ("MNode", QFT_UNIT, "memory_dom0", |
1131 |
"Amount of memory used by node (dom0 for Xen)"),
|
1132 |
"mtotal": ("MTotal", QFT_UNIT, "memory_total", |
1133 |
"Total amount of memory of physical machine"),
|
1134 |
} |
1135 |
|
1136 |
|
1137 |
def _GetGroup(cb): |
1138 |
"""Build function for calling another function with an node group.
|
1139 |
|
1140 |
@param cb: The callback to be called with the nodegroup
|
1141 |
|
1142 |
"""
|
1143 |
def fn(ctx, node): |
1144 |
"""Get group data for a node.
|
1145 |
|
1146 |
@type ctx: L{NodeQueryData}
|
1147 |
@type inst: L{objects.Node}
|
1148 |
@param inst: Node object
|
1149 |
|
1150 |
"""
|
1151 |
ng = ctx.groups.get(node.group, None)
|
1152 |
if ng is None: |
1153 |
# Nodes always have a group, or the configuration is corrupt
|
1154 |
return _FS_UNAVAIL
|
1155 |
|
1156 |
return cb(ctx, node, ng)
|
1157 |
|
1158 |
return fn
|
1159 |
|
1160 |
|
1161 |
def _GetNodeGroup(ctx, node, ng): # pylint: disable=W0613 |
1162 |
"""Returns the name of a node's group.
|
1163 |
|
1164 |
@type ctx: L{NodeQueryData}
|
1165 |
@type node: L{objects.Node}
|
1166 |
@param node: Node object
|
1167 |
@type ng: L{objects.NodeGroup}
|
1168 |
@param ng: The node group this node belongs to
|
1169 |
|
1170 |
"""
|
1171 |
return ng.name
|
1172 |
|
1173 |
|
1174 |
def _GetNodePower(ctx, node): |
1175 |
"""Returns the node powered state
|
1176 |
|
1177 |
@type ctx: L{NodeQueryData}
|
1178 |
@type node: L{objects.Node}
|
1179 |
@param node: Node object
|
1180 |
|
1181 |
"""
|
1182 |
if ctx.oob_support[node.name]:
|
1183 |
return node.powered
|
1184 |
|
1185 |
return _FS_UNAVAIL
|
1186 |
|
1187 |
|
1188 |
def _GetNdParams(ctx, node, ng): |
1189 |
"""Returns the ndparams for this node.
|
1190 |
|
1191 |
@type ctx: L{NodeQueryData}
|
1192 |
@type node: L{objects.Node}
|
1193 |
@param node: Node object
|
1194 |
@type ng: L{objects.NodeGroup}
|
1195 |
@param ng: The node group this node belongs to
|
1196 |
|
1197 |
"""
|
1198 |
return ctx.cluster.SimpleFillND(ng.FillND(node))
|
1199 |
|
1200 |
|
1201 |
def _GetLiveNodeField(field, kind, ctx, node): |
1202 |
"""Gets the value of a "live" field from L{NodeQueryData}.
|
1203 |
|
1204 |
@param field: Live field name
|
1205 |
@param kind: Data kind, one of L{constants.QFT_ALL}
|
1206 |
@type ctx: L{NodeQueryData}
|
1207 |
@type node: L{objects.Node}
|
1208 |
@param node: Node object
|
1209 |
|
1210 |
"""
|
1211 |
if node.offline:
|
1212 |
return _FS_OFFLINE
|
1213 |
|
1214 |
if not node.vm_capable: |
1215 |
return _FS_UNAVAIL
|
1216 |
|
1217 |
if not ctx.curlive_data: |
1218 |
return _FS_NODATA
|
1219 |
|
1220 |
try:
|
1221 |
value = ctx.curlive_data[field] |
1222 |
except KeyError: |
1223 |
return _FS_UNAVAIL
|
1224 |
|
1225 |
if kind == QFT_TEXT:
|
1226 |
return value
|
1227 |
|
1228 |
assert kind in (QFT_NUMBER, QFT_UNIT) |
1229 |
|
1230 |
# Try to convert into number
|
1231 |
try:
|
1232 |
return int(value) |
1233 |
except (ValueError, TypeError): |
1234 |
logging.exception("Failed to convert node field '%s' (value %r) to int",
|
1235 |
value, field) |
1236 |
return _FS_UNAVAIL
|
1237 |
|
1238 |
|
1239 |
def _GetNodeHvState(_, node): |
1240 |
"""Converts node's hypervisor state for query result.
|
1241 |
|
1242 |
"""
|
1243 |
hv_state = node.hv_state |
1244 |
|
1245 |
if hv_state is None: |
1246 |
return _FS_UNAVAIL
|
1247 |
|
1248 |
return dict((name, value.ToDict()) for (name, value) in hv_state.items()) |
1249 |
|
1250 |
|
1251 |
def _GetNodeDiskState(_, node): |
1252 |
"""Converts node's disk state for query result.
|
1253 |
|
1254 |
"""
|
1255 |
disk_state = node.disk_state |
1256 |
|
1257 |
if disk_state is None: |
1258 |
return _FS_UNAVAIL
|
1259 |
|
1260 |
return dict((disk_kind, dict((name, value.ToDict()) |
1261 |
for (name, value) in kind_state.items())) |
1262 |
for (disk_kind, kind_state) in disk_state.items()) |
1263 |
|
1264 |
|
1265 |
def _BuildNodeFields(): |
1266 |
"""Builds list of fields for node queries.
|
1267 |
|
1268 |
"""
|
1269 |
fields = [ |
1270 |
(_MakeField("pip", "PrimaryIP", QFT_TEXT, "Primary IP address"), |
1271 |
NQ_CONFIG, 0, _GetItemAttr("primary_ip")), |
1272 |
(_MakeField("sip", "SecondaryIP", QFT_TEXT, "Secondary IP address"), |
1273 |
NQ_CONFIG, 0, _GetItemAttr("secondary_ip")), |
1274 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), NQ_CONFIG, 0, |
1275 |
lambda ctx, node: list(node.GetTags())), |
1276 |
(_MakeField("master", "IsMaster", QFT_BOOL, "Whether node is master"), |
1277 |
NQ_CONFIG, 0, lambda ctx, node: node.name == ctx.master_name), |
1278 |
(_MakeField("group", "Group", QFT_TEXT, "Node group"), NQ_GROUP, 0, |
1279 |
_GetGroup(_GetNodeGroup)), |
1280 |
(_MakeField("group.uuid", "GroupUUID", QFT_TEXT, "UUID of node group"), |
1281 |
NQ_CONFIG, 0, _GetItemAttr("group")), |
1282 |
(_MakeField("powered", "Powered", QFT_BOOL, |
1283 |
"Whether node is thought to be powered on"),
|
1284 |
NQ_OOB, 0, _GetNodePower),
|
1285 |
(_MakeField("ndparams", "NodeParameters", QFT_OTHER, |
1286 |
"Merged node parameters"),
|
1287 |
NQ_GROUP, 0, _GetGroup(_GetNdParams)),
|
1288 |
(_MakeField("custom_ndparams", "CustomNodeParameters", QFT_OTHER, |
1289 |
"Custom node parameters"),
|
1290 |
NQ_GROUP, 0, _GetItemAttr("ndparams")), |
1291 |
(_MakeField("hv_state", "HypervisorState", QFT_OTHER, "Hypervisor state"), |
1292 |
NQ_CONFIG, 0, _GetNodeHvState),
|
1293 |
(_MakeField("disk_state", "DiskState", QFT_OTHER, "Disk state"), |
1294 |
NQ_CONFIG, 0, _GetNodeDiskState),
|
1295 |
] |
1296 |
|
1297 |
fields.extend(_BuildNDFields(False))
|
1298 |
|
1299 |
# Node role
|
1300 |
role_values = (constants.NR_MASTER, constants.NR_MCANDIDATE, |
1301 |
constants.NR_REGULAR, constants.NR_DRAINED, |
1302 |
constants.NR_OFFLINE) |
1303 |
role_doc = ("Node role; \"%s\" for master, \"%s\" for master candidate,"
|
1304 |
" \"%s\" for regular, \"%s\" for drained, \"%s\" for offline" %
|
1305 |
role_values) |
1306 |
fields.append((_MakeField("role", "Role", QFT_TEXT, role_doc), NQ_CONFIG, 0, |
1307 |
lambda ctx, node: _GetNodeRole(node, ctx.master_name)))
|
1308 |
assert set(role_values) == constants.NR_ALL |
1309 |
|
1310 |
def _GetLength(getter): |
1311 |
return lambda ctx, node: len(getter(ctx)[node.name]) |
1312 |
|
1313 |
def _GetList(getter): |
1314 |
return lambda ctx, node: list(getter(ctx)[node.name]) |
1315 |
|
1316 |
# Add fields operating on instance lists
|
1317 |
for prefix, titleprefix, docword, getter in \ |
1318 |
[("p", "Pri", "primary", operator.attrgetter("node_to_primary")), |
1319 |
("s", "Sec", "secondary", operator.attrgetter("node_to_secondary"))]: |
1320 |
# TODO: Allow filterting by hostname in list
|
1321 |
fields.extend([ |
1322 |
(_MakeField("%sinst_cnt" % prefix, "%sinst" % prefix.upper(), QFT_NUMBER, |
1323 |
"Number of instances with this node as %s" % docword),
|
1324 |
NQ_INST, 0, _GetLength(getter)),
|
1325 |
(_MakeField("%sinst_list" % prefix, "%sInstances" % titleprefix, |
1326 |
QFT_OTHER, |
1327 |
"List of instances with this node as %s" % docword),
|
1328 |
NQ_INST, 0, _GetList(getter)),
|
1329 |
]) |
1330 |
|
1331 |
# Add simple fields
|
1332 |
fields.extend([ |
1333 |
(_MakeField(name, title, kind, doc), NQ_CONFIG, flags, _GetItemAttr(name)) |
1334 |
for (name, (title, kind, flags, doc)) in _NODE_SIMPLE_FIELDS.items() |
1335 |
]) |
1336 |
|
1337 |
# Add fields requiring live data
|
1338 |
fields.extend([ |
1339 |
(_MakeField(name, title, kind, doc), NQ_LIVE, 0,
|
1340 |
compat.partial(_GetLiveNodeField, nfield, kind)) |
1341 |
for (name, (title, kind, nfield, doc)) in _NODE_LIVE_FIELDS.items() |
1342 |
]) |
1343 |
|
1344 |
# Add timestamps
|
1345 |
fields.extend(_GetItemTimestampFields(NQ_CONFIG)) |
1346 |
|
1347 |
return _PrepareFieldList(fields, [])
|
1348 |
|
1349 |
|
1350 |
class InstanceQueryData: |
1351 |
"""Data container for instance data queries.
|
1352 |
|
1353 |
"""
|
1354 |
def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes, |
1355 |
live_data, wrongnode_inst, console, nodes, groups): |
1356 |
"""Initializes this class.
|
1357 |
|
1358 |
@param instances: List of instance objects
|
1359 |
@param cluster: Cluster object
|
1360 |
@type disk_usage: dict; instance name as key
|
1361 |
@param disk_usage: Per-instance disk usage
|
1362 |
@type offline_nodes: list of strings
|
1363 |
@param offline_nodes: List of offline nodes
|
1364 |
@type bad_nodes: list of strings
|
1365 |
@param bad_nodes: List of faulty nodes
|
1366 |
@type live_data: dict; instance name as key
|
1367 |
@param live_data: Per-instance live data
|
1368 |
@type wrongnode_inst: set
|
1369 |
@param wrongnode_inst: Set of instances running on wrong node(s)
|
1370 |
@type console: dict; instance name as key
|
1371 |
@param console: Per-instance console information
|
1372 |
@type nodes: dict; node name as key
|
1373 |
@param nodes: Node objects
|
1374 |
|
1375 |
"""
|
1376 |
assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \ |
1377 |
"Offline nodes not included in bad nodes"
|
1378 |
assert not (set(live_data.keys()) & set(bad_nodes)), \ |
1379 |
"Found live data for bad or offline nodes"
|
1380 |
|
1381 |
self.instances = instances
|
1382 |
self.cluster = cluster
|
1383 |
self.disk_usage = disk_usage
|
1384 |
self.offline_nodes = offline_nodes
|
1385 |
self.bad_nodes = bad_nodes
|
1386 |
self.live_data = live_data
|
1387 |
self.wrongnode_inst = wrongnode_inst
|
1388 |
self.console = console
|
1389 |
self.nodes = nodes
|
1390 |
self.groups = groups
|
1391 |
|
1392 |
# Used for individual rows
|
1393 |
self.inst_hvparams = None |
1394 |
self.inst_beparams = None |
1395 |
self.inst_osparams = None |
1396 |
self.inst_nicparams = None |
1397 |
|
1398 |
def __iter__(self): |
1399 |
"""Iterate over all instances.
|
1400 |
|
1401 |
This function has side-effects and only one instance of the resulting
|
1402 |
generator should be used at a time.
|
1403 |
|
1404 |
"""
|
1405 |
for inst in self.instances: |
1406 |
self.inst_hvparams = self.cluster.FillHV(inst, skip_globals=True) |
1407 |
self.inst_beparams = self.cluster.FillBE(inst) |
1408 |
self.inst_osparams = self.cluster.SimpleFillOS(inst.os, inst.osparams) |
1409 |
self.inst_nicparams = [self.cluster.SimpleFillNIC(nic.nicparams) |
1410 |
for nic in inst.nics] |
1411 |
|
1412 |
yield inst
|
1413 |
|
1414 |
|
1415 |
def _GetInstOperState(ctx, inst): |
1416 |
"""Get instance's operational status.
|
1417 |
|
1418 |
@type ctx: L{InstanceQueryData}
|
1419 |
@type inst: L{objects.Instance}
|
1420 |
@param inst: Instance object
|
1421 |
|
1422 |
"""
|
1423 |
# Can't use RS_OFFLINE here as it would describe the instance to
|
1424 |
# be offline when we actually don't know due to missing data
|
1425 |
if inst.primary_node in ctx.bad_nodes: |
1426 |
return _FS_NODATA
|
1427 |
else:
|
1428 |
return bool(ctx.live_data.get(inst.name)) |
1429 |
|
1430 |
|
1431 |
def _GetInstLiveData(name): |
1432 |
"""Build function for retrieving live data.
|
1433 |
|
1434 |
@type name: string
|
1435 |
@param name: Live data field name
|
1436 |
|
1437 |
"""
|
1438 |
def fn(ctx, inst): |
1439 |
"""Get live data for an instance.
|
1440 |
|
1441 |
@type ctx: L{InstanceQueryData}
|
1442 |
@type inst: L{objects.Instance}
|
1443 |
@param inst: Instance object
|
1444 |
|
1445 |
"""
|
1446 |
if (inst.primary_node in ctx.bad_nodes or |
1447 |
inst.primary_node in ctx.offline_nodes):
|
1448 |
# Can't use RS_OFFLINE here as it would describe the instance to be
|
1449 |
# offline when we actually don't know due to missing data
|
1450 |
return _FS_NODATA
|
1451 |
|
1452 |
if inst.name in ctx.live_data: |
1453 |
data = ctx.live_data[inst.name] |
1454 |
if name in data: |
1455 |
return data[name]
|
1456 |
|
1457 |
return _FS_UNAVAIL
|
1458 |
|
1459 |
return fn
|
1460 |
|
1461 |
|
1462 |
def _GetInstStatus(ctx, inst): |
1463 |
"""Get instance status.
|
1464 |
|
1465 |
@type ctx: L{InstanceQueryData}
|
1466 |
@type inst: L{objects.Instance}
|
1467 |
@param inst: Instance object
|
1468 |
|
1469 |
"""
|
1470 |
if inst.primary_node in ctx.offline_nodes: |
1471 |
return constants.INSTST_NODEOFFLINE
|
1472 |
|
1473 |
if inst.primary_node in ctx.bad_nodes: |
1474 |
return constants.INSTST_NODEDOWN
|
1475 |
|
1476 |
if bool(ctx.live_data.get(inst.name)): |
1477 |
if inst.name in ctx.wrongnode_inst: |
1478 |
return constants.INSTST_WRONGNODE
|
1479 |
elif inst.admin_state == constants.ADMINST_UP:
|
1480 |
return constants.INSTST_RUNNING
|
1481 |
else:
|
1482 |
return constants.INSTST_ERRORUP
|
1483 |
|
1484 |
if inst.admin_state == constants.ADMINST_UP:
|
1485 |
return constants.INSTST_ERRORDOWN
|
1486 |
elif inst.admin_state == constants.ADMINST_DOWN:
|
1487 |
return constants.INSTST_ADMINDOWN
|
1488 |
|
1489 |
return constants.INSTST_ADMINOFFLINE
|
1490 |
|
1491 |
|
1492 |
def _GetInstDiskSize(index): |
1493 |
"""Build function for retrieving disk size.
|
1494 |
|
1495 |
@type index: int
|
1496 |
@param index: Disk index
|
1497 |
|
1498 |
"""
|
1499 |
def fn(_, inst): |
1500 |
"""Get size of a disk.
|
1501 |
|
1502 |
@type inst: L{objects.Instance}
|
1503 |
@param inst: Instance object
|
1504 |
|
1505 |
"""
|
1506 |
try:
|
1507 |
return inst.disks[index].size
|
1508 |
except IndexError: |
1509 |
return _FS_UNAVAIL
|
1510 |
|
1511 |
return fn
|
1512 |
|
1513 |
|
1514 |
def _GetInstNic(index, cb): |
1515 |
"""Build function for calling another function with an instance NIC.
|
1516 |
|
1517 |
@type index: int
|
1518 |
@param index: NIC index
|
1519 |
@type cb: callable
|
1520 |
@param cb: Callback
|
1521 |
|
1522 |
"""
|
1523 |
def fn(ctx, inst): |
1524 |
"""Call helper function with instance NIC.
|
1525 |
|
1526 |
@type ctx: L{InstanceQueryData}
|
1527 |
@type inst: L{objects.Instance}
|
1528 |
@param inst: Instance object
|
1529 |
|
1530 |
"""
|
1531 |
try:
|
1532 |
nic = inst.nics[index] |
1533 |
except IndexError: |
1534 |
return _FS_UNAVAIL
|
1535 |
|
1536 |
return cb(ctx, index, nic)
|
1537 |
|
1538 |
return fn
|
1539 |
|
1540 |
|
1541 |
def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613 |
1542 |
"""Get a NIC's IP address.
|
1543 |
|
1544 |
@type ctx: L{InstanceQueryData}
|
1545 |
@type nic: L{objects.NIC}
|
1546 |
@param nic: NIC object
|
1547 |
|
1548 |
"""
|
1549 |
if nic.ip is None: |
1550 |
return _FS_UNAVAIL
|
1551 |
else:
|
1552 |
return nic.ip
|
1553 |
|
1554 |
|
1555 |
def _GetInstNicBridge(ctx, index, _): |
1556 |
"""Get a NIC's bridge.
|
1557 |
|
1558 |
@type ctx: L{InstanceQueryData}
|
1559 |
@type index: int
|
1560 |
@param index: NIC index
|
1561 |
|
1562 |
"""
|
1563 |
assert len(ctx.inst_nicparams) >= index |
1564 |
|
1565 |
nicparams = ctx.inst_nicparams[index] |
1566 |
|
1567 |
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
1568 |
return nicparams[constants.NIC_LINK]
|
1569 |
else:
|
1570 |
return _FS_UNAVAIL
|
1571 |
|
1572 |
|
1573 |
def _GetInstAllNicBridges(ctx, inst): |
1574 |
"""Get all network bridges for an instance.
|
1575 |
|
1576 |
@type ctx: L{InstanceQueryData}
|
1577 |
@type inst: L{objects.Instance}
|
1578 |
@param inst: Instance object
|
1579 |
|
1580 |
"""
|
1581 |
assert len(ctx.inst_nicparams) == len(inst.nics) |
1582 |
|
1583 |
result = [] |
1584 |
|
1585 |
for nicp in ctx.inst_nicparams: |
1586 |
if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
1587 |
result.append(nicp[constants.NIC_LINK]) |
1588 |
else:
|
1589 |
result.append(None)
|
1590 |
|
1591 |
assert len(result) == len(inst.nics) |
1592 |
|
1593 |
return result
|
1594 |
|
1595 |
|
1596 |
def _GetInstNicParam(name): |
1597 |
"""Build function for retrieving a NIC parameter.
|
1598 |
|
1599 |
@type name: string
|
1600 |
@param name: Parameter name
|
1601 |
|
1602 |
"""
|
1603 |
def fn(ctx, index, _): |
1604 |
"""Get a NIC's bridge.
|
1605 |
|
1606 |
@type ctx: L{InstanceQueryData}
|
1607 |
@type inst: L{objects.Instance}
|
1608 |
@param inst: Instance object
|
1609 |
@type nic: L{objects.NIC}
|
1610 |
@param nic: NIC object
|
1611 |
|
1612 |
"""
|
1613 |
assert len(ctx.inst_nicparams) >= index |
1614 |
return ctx.inst_nicparams[index][name]
|
1615 |
|
1616 |
return fn
|
1617 |
|
1618 |
|
1619 |
def _GetInstanceNetworkFields(): |
1620 |
"""Get instance fields involving network interfaces.
|
1621 |
|
1622 |
@return: Tuple containing list of field definitions used as input for
|
1623 |
L{_PrepareFieldList} and a list of aliases
|
1624 |
|
1625 |
"""
|
1626 |
nic_mac_fn = lambda ctx, _, nic: nic.mac
|
1627 |
nic_mode_fn = _GetInstNicParam(constants.NIC_MODE) |
1628 |
nic_link_fn = _GetInstNicParam(constants.NIC_LINK) |
1629 |
|
1630 |
fields = [ |
1631 |
# All NICs
|
1632 |
(_MakeField("nic.count", "NICs", QFT_NUMBER, |
1633 |
"Number of network interfaces"),
|
1634 |
IQ_CONFIG, 0, lambda ctx, inst: len(inst.nics)), |
1635 |
(_MakeField("nic.macs", "NIC_MACs", QFT_OTHER, |
1636 |
"List containing each network interface's MAC address"),
|
1637 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.mac for nic in inst.nics]), |
1638 |
(_MakeField("nic.ips", "NIC_IPs", QFT_OTHER, |
1639 |
"List containing each network interface's IP address"),
|
1640 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.ip for nic in inst.nics]), |
1641 |
(_MakeField("nic.modes", "NIC_modes", QFT_OTHER, |
1642 |
"List containing each network interface's mode"), IQ_CONFIG, 0, |
1643 |
lambda ctx, inst: [nicp[constants.NIC_MODE]
|
1644 |
for nicp in ctx.inst_nicparams]), |
1645 |
(_MakeField("nic.links", "NIC_links", QFT_OTHER, |
1646 |
"List containing each network interface's link"), IQ_CONFIG, 0, |
1647 |
lambda ctx, inst: [nicp[constants.NIC_LINK]
|
1648 |
for nicp in ctx.inst_nicparams]), |
1649 |
(_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER, |
1650 |
"List containing each network interface's bridge"),
|
1651 |
IQ_CONFIG, 0, _GetInstAllNicBridges),
|
1652 |
] |
1653 |
|
1654 |
# NICs by number
|
1655 |
for i in range(constants.MAX_NICS): |
1656 |
numtext = utils.FormatOrdinal(i + 1)
|
1657 |
fields.extend([ |
1658 |
(_MakeField("nic.ip/%s" % i, "NicIP/%s" % i, QFT_TEXT, |
1659 |
"IP address of %s network interface" % numtext),
|
1660 |
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicIp)),
|
1661 |
(_MakeField("nic.mac/%s" % i, "NicMAC/%s" % i, QFT_TEXT, |
1662 |
"MAC address of %s network interface" % numtext),
|
1663 |
IQ_CONFIG, 0, _GetInstNic(i, nic_mac_fn)),
|
1664 |
(_MakeField("nic.mode/%s" % i, "NicMode/%s" % i, QFT_TEXT, |
1665 |
"Mode of %s network interface" % numtext),
|
1666 |
IQ_CONFIG, 0, _GetInstNic(i, nic_mode_fn)),
|
1667 |
(_MakeField("nic.link/%s" % i, "NicLink/%s" % i, QFT_TEXT, |
1668 |
"Link of %s network interface" % numtext),
|
1669 |
IQ_CONFIG, 0, _GetInstNic(i, nic_link_fn)),
|
1670 |
(_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT, |
1671 |
"Bridge of %s network interface" % numtext),
|
1672 |
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
|
1673 |
]) |
1674 |
|
1675 |
aliases = [ |
1676 |
# Legacy fields for first NIC
|
1677 |
("ip", "nic.ip/0"), |
1678 |
("mac", "nic.mac/0"), |
1679 |
("bridge", "nic.bridge/0"), |
1680 |
("nic_mode", "nic.mode/0"), |
1681 |
("nic_link", "nic.link/0"), |
1682 |
] |
1683 |
|
1684 |
return (fields, aliases)
|
1685 |
|
1686 |
|
1687 |
def _GetInstDiskUsage(ctx, inst): |
1688 |
"""Get disk usage for an instance.
|
1689 |
|
1690 |
@type ctx: L{InstanceQueryData}
|
1691 |
@type inst: L{objects.Instance}
|
1692 |
@param inst: Instance object
|
1693 |
|
1694 |
"""
|
1695 |
usage = ctx.disk_usage[inst.name] |
1696 |
|
1697 |
if usage is None: |
1698 |
usage = 0
|
1699 |
|
1700 |
return usage
|
1701 |
|
1702 |
|
1703 |
def _GetInstanceConsole(ctx, inst): |
1704 |
"""Get console information for instance.
|
1705 |
|
1706 |
@type ctx: L{InstanceQueryData}
|
1707 |
@type inst: L{objects.Instance}
|
1708 |
@param inst: Instance object
|
1709 |
|
1710 |
"""
|
1711 |
consinfo = ctx.console[inst.name] |
1712 |
|
1713 |
if consinfo is None: |
1714 |
return _FS_UNAVAIL
|
1715 |
|
1716 |
return consinfo
|
1717 |
|
1718 |
|
1719 |
def _GetInstanceDiskFields(): |
1720 |
"""Get instance fields involving disks.
|
1721 |
|
1722 |
@return: List of field definitions used as input for L{_PrepareFieldList}
|
1723 |
|
1724 |
"""
|
1725 |
fields = [ |
1726 |
(_MakeField("disk_usage", "DiskUsage", QFT_UNIT, |
1727 |
"Total disk space used by instance on each of its nodes;"
|
1728 |
" this is not the disk size visible to the instance, but"
|
1729 |
" the usage on the node"),
|
1730 |
IQ_DISKUSAGE, 0, _GetInstDiskUsage),
|
1731 |
(_MakeField("disk.count", "Disks", QFT_NUMBER, "Number of disks"), |
1732 |
IQ_CONFIG, 0, lambda ctx, inst: len(inst.disks)), |
1733 |
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER, "List of disk sizes"), |
1734 |
IQ_CONFIG, 0, lambda ctx, inst: [disk.size for disk in inst.disks]), |
1735 |
] |
1736 |
|
1737 |
# Disks by number
|
1738 |
fields.extend([ |
1739 |
(_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT, |
1740 |
"Disk size of %s disk" % utils.FormatOrdinal(i + 1)), |
1741 |
IQ_CONFIG, 0, _GetInstDiskSize(i))
|
1742 |
for i in range(constants.MAX_DISKS) |
1743 |
]) |
1744 |
|
1745 |
return fields
|
1746 |
|
1747 |
|
1748 |
def _GetInstanceParameterFields(): |
1749 |
"""Get instance fields involving parameters.
|
1750 |
|
1751 |
@return: List of field definitions used as input for L{_PrepareFieldList}
|
1752 |
|
1753 |
"""
|
1754 |
fields = [ |
1755 |
# Filled parameters
|
1756 |
(_MakeField("hvparams", "HypervisorParameters", QFT_OTHER, |
1757 |
"Hypervisor parameters (merged)"),
|
1758 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_hvparams), |
1759 |
(_MakeField("beparams", "BackendParameters", QFT_OTHER, |
1760 |
"Backend parameters (merged)"),
|
1761 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_beparams), |
1762 |
(_MakeField("osparams", "OpSysParameters", QFT_OTHER, |
1763 |
"Operating system parameters (merged)"),
|
1764 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_osparams), |
1765 |
|
1766 |
# Unfilled parameters
|
1767 |
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER, |
1768 |
"Custom hypervisor parameters"),
|
1769 |
IQ_CONFIG, 0, _GetItemAttr("hvparams")), |
1770 |
(_MakeField("custom_beparams", "CustomBackendParameters", QFT_OTHER, |
1771 |
"Custom backend parameters",),
|
1772 |
IQ_CONFIG, 0, _GetItemAttr("beparams")), |
1773 |
(_MakeField("custom_osparams", "CustomOpSysParameters", QFT_OTHER, |
1774 |
"Custom operating system parameters",),
|
1775 |
IQ_CONFIG, 0, _GetItemAttr("osparams")), |
1776 |
(_MakeField("custom_nicparams", "CustomNicParameters", QFT_OTHER, |
1777 |
"Custom network interface parameters"),
|
1778 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.nicparams for nic in inst.nics]), |
1779 |
] |
1780 |
|
1781 |
# HV params
|
1782 |
def _GetInstHvParam(name): |
1783 |
return lambda ctx, _: ctx.inst_hvparams.get(name, _FS_UNAVAIL) |
1784 |
|
1785 |
fields.extend([ |
1786 |
(_MakeField("hv/%s" % name,
|
1787 |
constants.HVS_PARAMETER_TITLES.get(name, "hv/%s" % name),
|
1788 |
_VTToQFT[kind], "The \"%s\" hypervisor parameter" % name),
|
1789 |
IQ_CONFIG, 0, _GetInstHvParam(name))
|
1790 |
for name, kind in constants.HVS_PARAMETER_TYPES.items() |
1791 |
if name not in constants.HVC_GLOBALS |
1792 |
]) |
1793 |
|
1794 |
# BE params
|
1795 |
def _GetInstBeParam(name): |
1796 |
return lambda ctx, _: ctx.inst_beparams.get(name, None) |
1797 |
|
1798 |
fields.extend([ |
1799 |
(_MakeField("be/%s" % name,
|
1800 |
constants.BES_PARAMETER_TITLES.get(name, "be/%s" % name),
|
1801 |
_VTToQFT[kind], "The \"%s\" backend parameter" % name),
|
1802 |
IQ_CONFIG, 0, _GetInstBeParam(name))
|
1803 |
for name, kind in constants.BES_PARAMETER_TYPES.items() |
1804 |
]) |
1805 |
|
1806 |
return fields
|
1807 |
|
1808 |
|
1809 |
_INST_SIMPLE_FIELDS = { |
1810 |
"disk_template": ("Disk_template", QFT_TEXT, 0, "Instance disk template"), |
1811 |
"hypervisor": ("Hypervisor", QFT_TEXT, 0, "Hypervisor name"), |
1812 |
"name": ("Instance", QFT_TEXT, QFF_HOSTNAME, "Instance name"), |
1813 |
# Depending on the hypervisor, the port can be None
|
1814 |
"network_port": ("Network_port", QFT_OTHER, 0, |
1815 |
"Instance network port if available (e.g. for VNC console)"),
|
1816 |
"os": ("OS", QFT_TEXT, 0, "Operating system"), |
1817 |
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Instance"), |
1818 |
"uuid": ("UUID", QFT_TEXT, 0, "Instance UUID"), |
1819 |
} |
1820 |
|
1821 |
|
1822 |
def _GetInstNodeGroup(ctx, default, node_name): |
1823 |
"""Gets group UUID of an instance node.
|
1824 |
|
1825 |
@type ctx: L{InstanceQueryData}
|
1826 |
@param default: Default value
|
1827 |
@type node_name: string
|
1828 |
@param node_name: Node name
|
1829 |
|
1830 |
"""
|
1831 |
try:
|
1832 |
node = ctx.nodes[node_name] |
1833 |
except KeyError: |
1834 |
return default
|
1835 |
else:
|
1836 |
return node.group
|
1837 |
|
1838 |
|
1839 |
def _GetInstNodeGroupName(ctx, default, node_name): |
1840 |
"""Gets group name of an instance node.
|
1841 |
|
1842 |
@type ctx: L{InstanceQueryData}
|
1843 |
@param default: Default value
|
1844 |
@type node_name: string
|
1845 |
@param node_name: Node name
|
1846 |
|
1847 |
"""
|
1848 |
try:
|
1849 |
node = ctx.nodes[node_name] |
1850 |
except KeyError: |
1851 |
return default
|
1852 |
|
1853 |
try:
|
1854 |
group = ctx.groups[node.group] |
1855 |
except KeyError: |
1856 |
return default
|
1857 |
|
1858 |
return group.name
|
1859 |
|
1860 |
|
1861 |
def _BuildInstanceFields(): |
1862 |
"""Builds list of fields for instance queries.
|
1863 |
|
1864 |
"""
|
1865 |
fields = [ |
1866 |
(_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"), |
1867 |
IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")),
|
1868 |
(_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT, |
1869 |
"Primary node's group"),
|
1870 |
IQ_NODES, 0,
|
1871 |
lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL,
|
1872 |
inst.primary_node)), |
1873 |
(_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT, |
1874 |
"Primary node's group UUID"),
|
1875 |
IQ_NODES, 0,
|
1876 |
lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)),
|
1877 |
# TODO: Allow filtering by secondary node as hostname
|
1878 |
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER, |
1879 |
"Secondary nodes; usually this will just be one node"),
|
1880 |
IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)), |
1881 |
(_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER, |
1882 |
"Node groups of secondary nodes"),
|
1883 |
IQ_NODES, 0,
|
1884 |
lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None), |
1885 |
inst.secondary_nodes)), |
1886 |
(_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER, |
1887 |
"Node group UUIDs of secondary nodes"),
|
1888 |
IQ_NODES, 0,
|
1889 |
lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None), |
1890 |
inst.secondary_nodes)), |
1891 |
(_MakeField("admin_state", "InstanceState", QFT_TEXT, |
1892 |
"Desired state of instance"),
|
1893 |
IQ_CONFIG, 0, _GetItemAttr("admin_state")), |
1894 |
(_MakeField("admin_up", "Autostart", QFT_BOOL, |
1895 |
"Desired state of instance"),
|
1896 |
IQ_CONFIG, 0, lambda ctx, inst: inst.admin_state == constants.ADMINST_UP), |
1897 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0, |
1898 |
lambda ctx, inst: list(inst.GetTags())), |
1899 |
(_MakeField("console", "Console", QFT_OTHER, |
1900 |
"Instance console information"), IQ_CONSOLE, 0, |
1901 |
_GetInstanceConsole), |
1902 |
] |
1903 |
|
1904 |
# Add simple fields
|
1905 |
fields.extend([ |
1906 |
(_MakeField(name, title, kind, doc), IQ_CONFIG, flags, _GetItemAttr(name)) |
1907 |
for (name, (title, kind, flags, doc)) in _INST_SIMPLE_FIELDS.items() |
1908 |
]) |
1909 |
|
1910 |
# Fields requiring talking to the node
|
1911 |
fields.extend([ |
1912 |
(_MakeField("oper_state", "Running", QFT_BOOL, "Actual state of instance"), |
1913 |
IQ_LIVE, 0, _GetInstOperState),
|
1914 |
(_MakeField("oper_ram", "Memory", QFT_UNIT, |
1915 |
"Actual memory usage as seen by hypervisor"),
|
1916 |
IQ_LIVE, 0, _GetInstLiveData("memory")), |
1917 |
(_MakeField("oper_vcpus", "VCPUs", QFT_NUMBER, |
1918 |
"Actual number of VCPUs as seen by hypervisor"),
|
1919 |
IQ_LIVE, 0, _GetInstLiveData("vcpus")), |
1920 |
]) |
1921 |
|
1922 |
# Status field
|
1923 |
status_values = (constants.INSTST_RUNNING, constants.INSTST_ADMINDOWN, |
1924 |
constants.INSTST_WRONGNODE, constants.INSTST_ERRORUP, |
1925 |
constants.INSTST_ERRORDOWN, constants.INSTST_NODEDOWN, |
1926 |
constants.INSTST_NODEOFFLINE, constants.INSTST_ADMINOFFLINE) |
1927 |
status_doc = ("Instance status; \"%s\" if instance is set to be running"
|
1928 |
" and actually is, \"%s\" if instance is stopped and"
|
1929 |
" is not running, \"%s\" if instance running, but not on its"
|
1930 |
" designated primary node, \"%s\" if instance should be"
|
1931 |
" stopped, but is actually running, \"%s\" if instance should"
|
1932 |
" run, but doesn't, \"%s\" if instance's primary node is down,"
|
1933 |
" \"%s\" if instance's primary node is marked offline,"
|
1934 |
" \"%s\" if instance is offline and does not use dynamic"
|
1935 |
" resources" % status_values)
|
1936 |
fields.append((_MakeField("status", "Status", QFT_TEXT, status_doc), |
1937 |
IQ_LIVE, 0, _GetInstStatus))
|
1938 |
assert set(status_values) == constants.INSTST_ALL, \ |
1939 |
"Status documentation mismatch"
|
1940 |
|
1941 |
(network_fields, network_aliases) = _GetInstanceNetworkFields() |
1942 |
|
1943 |
fields.extend(network_fields) |
1944 |
fields.extend(_GetInstanceParameterFields()) |
1945 |
fields.extend(_GetInstanceDiskFields()) |
1946 |
fields.extend(_GetItemTimestampFields(IQ_CONFIG)) |
1947 |
|
1948 |
aliases = [ |
1949 |
("vcpus", "be/vcpus"), |
1950 |
("be/memory", "be/maxmem"), |
1951 |
("sda_size", "disk.size/0"), |
1952 |
("sdb_size", "disk.size/1"), |
1953 |
] + network_aliases |
1954 |
|
1955 |
return _PrepareFieldList(fields, aliases)
|
1956 |
|
1957 |
|
1958 |
class LockQueryData: |
1959 |
"""Data container for lock data queries.
|
1960 |
|
1961 |
"""
|
1962 |
def __init__(self, lockdata): |
1963 |
"""Initializes this class.
|
1964 |
|
1965 |
"""
|
1966 |
self.lockdata = lockdata
|
1967 |
|
1968 |
def __iter__(self): |
1969 |
"""Iterate over all locks.
|
1970 |
|
1971 |
"""
|
1972 |
return iter(self.lockdata) |
1973 |
|
1974 |
|
1975 |
def _GetLockOwners(_, data): |
1976 |
"""Returns a sorted list of a lock's current owners.
|
1977 |
|
1978 |
"""
|
1979 |
(_, _, owners, _) = data |
1980 |
|
1981 |
if owners:
|
1982 |
owners = utils.NiceSort(owners) |
1983 |
|
1984 |
return owners
|
1985 |
|
1986 |
|
1987 |
def _GetLockPending(_, data): |
1988 |
"""Returns a sorted list of a lock's pending acquires.
|
1989 |
|
1990 |
"""
|
1991 |
(_, _, _, pending) = data |
1992 |
|
1993 |
if pending:
|
1994 |
pending = [(mode, utils.NiceSort(names)) |
1995 |
for (mode, names) in pending] |
1996 |
|
1997 |
return pending
|
1998 |
|
1999 |
|
2000 |
def _BuildLockFields(): |
2001 |
"""Builds list of fields for lock queries.
|
2002 |
|
2003 |
"""
|
2004 |
return _PrepareFieldList([
|
2005 |
# TODO: Lock names are not always hostnames. Should QFF_HOSTNAME be used?
|
2006 |
(_MakeField("name", "Name", QFT_TEXT, "Lock name"), None, 0, |
2007 |
lambda ctx, (name, mode, owners, pending): name),
|
2008 |
(_MakeField("mode", "Mode", QFT_OTHER, |
2009 |
"Mode in which the lock is currently acquired"
|
2010 |
" (exclusive or shared)"),
|
2011 |
LQ_MODE, 0, lambda ctx, (name, mode, owners, pending): mode), |
2012 |
(_MakeField("owner", "Owner", QFT_OTHER, "Current lock owner(s)"), |
2013 |
LQ_OWNER, 0, _GetLockOwners),
|
2014 |
(_MakeField("pending", "Pending", QFT_OTHER, |
2015 |
"Threads waiting for the lock"),
|
2016 |
LQ_PENDING, 0, _GetLockPending),
|
2017 |
], []) |
2018 |
|
2019 |
|
2020 |
class GroupQueryData: |
2021 |
"""Data container for node group data queries.
|
2022 |
|
2023 |
"""
|
2024 |
def __init__(self, cluster, groups, group_to_nodes, group_to_instances, |
2025 |
want_diskparams): |
2026 |
"""Initializes this class.
|
2027 |
|
2028 |
@param cluster: Cluster object
|
2029 |
@param groups: List of node group objects
|
2030 |
@type group_to_nodes: dict; group UUID as key
|
2031 |
@param group_to_nodes: Per-group list of nodes
|
2032 |
@type group_to_instances: dict; group UUID as key
|
2033 |
@param group_to_instances: Per-group list of (primary) instances
|
2034 |
@type want_diskparams: bool
|
2035 |
@param want_diskparams: Whether diskparamters should be calculated
|
2036 |
|
2037 |
"""
|
2038 |
self.groups = groups
|
2039 |
self.group_to_nodes = group_to_nodes
|
2040 |
self.group_to_instances = group_to_instances
|
2041 |
self.cluster = cluster
|
2042 |
self.want_diskparams = want_diskparams
|
2043 |
|
2044 |
# Used for individual rows
|
2045 |
self.group_ipolicy = None |
2046 |
self.ndparams = None |
2047 |
self.group_dp = None |
2048 |
|
2049 |
def __iter__(self): |
2050 |
"""Iterate over all node groups.
|
2051 |
|
2052 |
This function has side-effects and only one instance of the resulting
|
2053 |
generator should be used at a time.
|
2054 |
|
2055 |
"""
|
2056 |
for group in self.groups: |
2057 |
self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy) |
2058 |
self.ndparams = self.cluster.SimpleFillND(group.ndparams) |
2059 |
if self.want_diskparams: |
2060 |
self.group_dp = self.cluster.SimpleFillDP(group.diskparams) |
2061 |
else:
|
2062 |
self.group_dp = None |
2063 |
yield group
|
2064 |
|
2065 |
|
2066 |
_GROUP_SIMPLE_FIELDS = { |
2067 |
"alloc_policy": ("AllocPolicy", QFT_TEXT, "Allocation policy for group"), |
2068 |
"name": ("Group", QFT_TEXT, "Group name"), |
2069 |
"serial_no": ("SerialNo", QFT_NUMBER, _SERIAL_NO_DOC % "Group"), |
2070 |
"uuid": ("UUID", QFT_TEXT, "Group UUID"), |
2071 |
} |
2072 |
|
2073 |
|
2074 |
def _BuildGroupFields(): |
2075 |
"""Builds list of fields for node group queries.
|
2076 |
|
2077 |
"""
|
2078 |
# Add simple fields
|
2079 |
fields = [(_MakeField(name, title, kind, doc), GQ_CONFIG, 0,
|
2080 |
_GetItemAttr(name)) |
2081 |
for (name, (title, kind, doc)) in _GROUP_SIMPLE_FIELDS.items()] |
2082 |
|
2083 |
def _GetLength(getter): |
2084 |
return lambda ctx, group: len(getter(ctx)[group.uuid]) |
2085 |
|
2086 |
def _GetSortedList(getter): |
2087 |
return lambda ctx, group: utils.NiceSort(getter(ctx)[group.uuid]) |
2088 |
|
2089 |
group_to_nodes = operator.attrgetter("group_to_nodes")
|
2090 |
group_to_instances = operator.attrgetter("group_to_instances")
|
2091 |
|
2092 |
# Add fields for nodes
|
2093 |
fields.extend([ |
2094 |
(_MakeField("node_cnt", "Nodes", QFT_NUMBER, "Number of nodes"), |
2095 |
GQ_NODE, 0, _GetLength(group_to_nodes)),
|
2096 |
(_MakeField("node_list", "NodeList", QFT_OTHER, "List of nodes"), |
2097 |
GQ_NODE, 0, _GetSortedList(group_to_nodes)),
|
2098 |
]) |
2099 |
|
2100 |
# Add fields for instances
|
2101 |
fields.extend([ |
2102 |
(_MakeField("pinst_cnt", "Instances", QFT_NUMBER, |
2103 |
"Number of primary instances"),
|
2104 |
GQ_INST, 0, _GetLength(group_to_instances)),
|
2105 |
(_MakeField("pinst_list", "InstanceList", QFT_OTHER, |
2106 |
"List of primary instances"),
|
2107 |
GQ_INST, 0, _GetSortedList(group_to_instances)),
|
2108 |
]) |
2109 |
|
2110 |
# Other fields
|
2111 |
fields.extend([ |
2112 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0, |
2113 |
lambda ctx, group: list(group.GetTags())), |
2114 |
(_MakeField("ipolicy", "InstancePolicy", QFT_OTHER, |
2115 |
"Instance policy limitations (merged)"),
|
2116 |
GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy), |
2117 |
(_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER, |
2118 |
"Custom instance policy limitations"),
|
2119 |
GQ_CONFIG, 0, _GetItemAttr("ipolicy")), |
2120 |
(_MakeField("custom_ndparams", "CustomNDParams", QFT_OTHER, |
2121 |
"Custom node parameters"),
|
2122 |
GQ_CONFIG, 0, _GetItemAttr("ndparams")), |
2123 |
(_MakeField("ndparams", "NDParams", QFT_OTHER, |
2124 |
"Node parameters"),
|
2125 |
GQ_CONFIG, 0, lambda ctx, _: ctx.ndparams), |
2126 |
(_MakeField("diskparams", "DiskParameters", QFT_OTHER, |
2127 |
"Disk parameters (merged)"),
|
2128 |
GQ_DISKPARAMS, 0, lambda ctx, _: ctx.group_dp), |
2129 |
(_MakeField("custom_diskparams", "CustomDiskParameters", QFT_OTHER, |
2130 |
"Custom disk parameters"),
|
2131 |
GQ_CONFIG, 0, _GetItemAttr("diskparams")), |
2132 |
]) |
2133 |
|
2134 |
# ND parameters
|
2135 |
fields.extend(_BuildNDFields(True))
|
2136 |
|
2137 |
fields.extend(_GetItemTimestampFields(GQ_CONFIG)) |
2138 |
|
2139 |
return _PrepareFieldList(fields, [])
|
2140 |
|
2141 |
|
2142 |
class OsInfo(objects.ConfigObject): |
2143 |
__slots__ = [ |
2144 |
"name",
|
2145 |
"valid",
|
2146 |
"hidden",
|
2147 |
"blacklisted",
|
2148 |
"variants",
|
2149 |
"api_versions",
|
2150 |
"parameters",
|
2151 |
"node_status",
|
2152 |
] |
2153 |
|
2154 |
|
2155 |
def _BuildOsFields(): |
2156 |
"""Builds list of fields for operating system queries.
|
2157 |
|
2158 |
"""
|
2159 |
fields = [ |
2160 |
(_MakeField("name", "Name", QFT_TEXT, "Operating system name"), |
2161 |
None, 0, _GetItemAttr("name")), |
2162 |
(_MakeField("valid", "Valid", QFT_BOOL, |
2163 |
"Whether operating system definition is valid"),
|
2164 |
None, 0, _GetItemAttr("valid")), |
2165 |
(_MakeField("hidden", "Hidden", QFT_BOOL, |
2166 |
"Whether operating system is hidden"),
|
2167 |
None, 0, _GetItemAttr("hidden")), |
2168 |
(_MakeField("blacklisted", "Blacklisted", QFT_BOOL, |
2169 |
"Whether operating system is blacklisted"),
|
2170 |
None, 0, _GetItemAttr("blacklisted")), |
2171 |
(_MakeField("variants", "Variants", QFT_OTHER, |
2172 |
"Operating system variants"),
|
2173 |
None, 0, _ConvWrap(utils.NiceSort, _GetItemAttr("variants"))), |
2174 |
(_MakeField("api_versions", "ApiVersions", QFT_OTHER, |
2175 |
"Operating system API versions"),
|
2176 |
None, 0, _ConvWrap(sorted, _GetItemAttr("api_versions"))), |
2177 |
(_MakeField("parameters", "Parameters", QFT_OTHER, |
2178 |
"Operating system parameters"),
|
2179 |
None, 0, _ConvWrap(compat.partial(utils.NiceSort, key=compat.fst), |
2180 |
_GetItemAttr("parameters"))),
|
2181 |
(_MakeField("node_status", "NodeStatus", QFT_OTHER, |
2182 |
"Status from node"),
|
2183 |
None, 0, _GetItemAttr("node_status")), |
2184 |
] |
2185 |
|
2186 |
return _PrepareFieldList(fields, [])
|
2187 |
|
2188 |
|
2189 |
def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613 |
2190 |
"""Return L{_FS_UNAVAIL} if job is None.
|
2191 |
|
2192 |
When listing specifc jobs (e.g. "gnt-job list 1 2 3"), a job may not be
|
2193 |
found, in which case this function converts it to L{_FS_UNAVAIL}.
|
2194 |
|
2195 |
"""
|
2196 |
if job is None: |
2197 |
return _FS_UNAVAIL
|
2198 |
else:
|
2199 |
return fn(job)
|
2200 |
|
2201 |
|
2202 |
def _JobUnavail(inner): |
2203 |
"""Wrapper for L{_JobUnavailInner}.
|
2204 |
|
2205 |
"""
|
2206 |
return compat.partial(_JobUnavailInner, inner)
|
2207 |
|
2208 |
|
2209 |
def _PerJobOpInner(fn, job): |
2210 |
"""Executes a function per opcode in a job.
|
2211 |
|
2212 |
"""
|
2213 |
return map(fn, job.ops) |
2214 |
|
2215 |
|
2216 |
def _PerJobOp(fn): |
2217 |
"""Wrapper for L{_PerJobOpInner}.
|
2218 |
|
2219 |
"""
|
2220 |
return _JobUnavail(compat.partial(_PerJobOpInner, fn))
|
2221 |
|
2222 |
|
2223 |
def _JobTimestampInner(fn, job): |
2224 |
"""Converts unavailable timestamp to L{_FS_UNAVAIL}.
|
2225 |
|
2226 |
"""
|
2227 |
timestamp = fn(job) |
2228 |
|
2229 |
if timestamp is None: |
2230 |
return _FS_UNAVAIL
|
2231 |
else:
|
2232 |
return timestamp
|
2233 |
|
2234 |
|
2235 |
def _JobTimestamp(fn): |
2236 |
"""Wrapper for L{_JobTimestampInner}.
|
2237 |
|
2238 |
"""
|
2239 |
return _JobUnavail(compat.partial(_JobTimestampInner, fn))
|
2240 |
|
2241 |
|
2242 |
def _BuildJobFields(): |
2243 |
"""Builds list of fields for job queries.
|
2244 |
|
2245 |
"""
|
2246 |
fields = [ |
2247 |
(_MakeField("id", "ID", QFT_NUMBER, "Job ID"), |
2248 |
None, QFF_JOB_ID, lambda _, (job_id, job): job_id), |
2249 |
(_MakeField("status", "Status", QFT_TEXT, "Job status"), |
2250 |
None, 0, _JobUnavail(lambda job: job.CalcStatus())), |
2251 |
(_MakeField("priority", "Priority", QFT_NUMBER, |
2252 |
("Current job priority (%s to %s)" %
|
2253 |
(constants.OP_PRIO_LOWEST, constants.OP_PRIO_HIGHEST))), |
2254 |
None, 0, _JobUnavail(lambda job: job.CalcPriority())), |
2255 |
(_MakeField("ops", "OpCodes", QFT_OTHER, "List of all opcodes"), |
2256 |
None, 0, _PerJobOp(lambda op: op.input.__getstate__())), |
2257 |
(_MakeField("opresult", "OpCode_result", QFT_OTHER, |
2258 |
"List of opcodes results"),
|
2259 |
None, 0, _PerJobOp(operator.attrgetter("result"))), |
2260 |
(_MakeField("opstatus", "OpCode_status", QFT_OTHER, |
2261 |
"List of opcodes status"),
|
2262 |
None, 0, _PerJobOp(operator.attrgetter("status"))), |
2263 |
(_MakeField("oplog", "OpCode_log", QFT_OTHER, |
2264 |
"List of opcode output logs"),
|
2265 |
None, 0, _PerJobOp(operator.attrgetter("log"))), |
2266 |
(_MakeField("opstart", "OpCode_start", QFT_OTHER, |
2267 |
"List of opcode start timestamps (before acquiring locks)"),
|
2268 |
None, 0, _PerJobOp(operator.attrgetter("start_timestamp"))), |
2269 |
(_MakeField("opexec", "OpCode_exec", QFT_OTHER, |
2270 |
"List of opcode execution start timestamps (after acquiring"
|
2271 |
" locks)"),
|
2272 |
None, 0, _PerJobOp(operator.attrgetter("exec_timestamp"))), |
2273 |
(_MakeField("opend", "OpCode_end", QFT_OTHER, |
2274 |
"List of opcode execution end timestamps"),
|
2275 |
None, 0, _PerJobOp(operator.attrgetter("end_timestamp"))), |
2276 |
(_MakeField("oppriority", "OpCode_prio", QFT_OTHER, |
2277 |
"List of opcode priorities"),
|
2278 |
None, 0, _PerJobOp(operator.attrgetter("priority"))), |
2279 |
(_MakeField("summary", "Summary", QFT_OTHER, |
2280 |
"List of per-opcode summaries"),
|
2281 |
None, 0, _PerJobOp(lambda op: op.input.Summary())), |
2282 |
] |
2283 |
|
2284 |
# Timestamp fields
|
2285 |
for (name, attr, title, desc) in [ |
2286 |
("received_ts", "received_timestamp", "Received", |
2287 |
"Timestamp of when job was received"),
|
2288 |
("start_ts", "start_timestamp", "Start", "Timestamp of job start"), |
2289 |
("end_ts", "end_timestamp", "End", "Timestamp of job end"), |
2290 |
]: |
2291 |
getter = operator.attrgetter(attr) |
2292 |
fields.extend([ |
2293 |
(_MakeField(name, title, QFT_OTHER, |
2294 |
"%s (tuple containing seconds and microseconds)" % desc),
|
2295 |
None, QFF_SPLIT_TIMESTAMP, _JobTimestamp(getter)),
|
2296 |
]) |
2297 |
|
2298 |
return _PrepareFieldList(fields, [])
|
2299 |
|
2300 |
|
2301 |
def _GetExportName(_, (node_name, expname)): # pylint: disable=W0613 |
2302 |
"""Returns an export name if available.
|
2303 |
|
2304 |
"""
|
2305 |
if expname is None: |
2306 |
return _FS_UNAVAIL
|
2307 |
else:
|
2308 |
return expname
|
2309 |
|
2310 |
|
2311 |
def _BuildExportFields(): |
2312 |
"""Builds list of fields for exports.
|
2313 |
|
2314 |
"""
|
2315 |
fields = [ |
2316 |
(_MakeField("node", "Node", QFT_TEXT, "Node name"), |
2317 |
None, QFF_HOSTNAME, lambda _, (node_name, expname): node_name), |
2318 |
(_MakeField("export", "Export", QFT_TEXT, "Export name"), |
2319 |
None, 0, _GetExportName), |
2320 |
] |
2321 |
|
2322 |
return _PrepareFieldList(fields, [])
|
2323 |
|
2324 |
|
2325 |
_CLUSTER_VERSION_FIELDS = { |
2326 |
"software_version": ("SoftwareVersion", QFT_TEXT, constants.RELEASE_VERSION, |
2327 |
"Software version"),
|
2328 |
"protocol_version": ("ProtocolVersion", QFT_NUMBER, |
2329 |
constants.PROTOCOL_VERSION, |
2330 |
"RPC protocol version"),
|
2331 |
"config_version": ("ConfigVersion", QFT_NUMBER, constants.CONFIG_VERSION, |
2332 |
"Configuration format version"),
|
2333 |
"os_api_version": ("OsApiVersion", QFT_NUMBER, max(constants.OS_API_VERSIONS), |
2334 |
"API version for OS template scripts"),
|
2335 |
"export_version": ("ExportVersion", QFT_NUMBER, constants.EXPORT_VERSION, |
2336 |
"Import/export file format version"),
|
2337 |
} |
2338 |
|
2339 |
|
2340 |
_CLUSTER_SIMPLE_FIELDS = { |
2341 |
"cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"), |
2342 |
"master_node": ("Master", QFT_TEXT, QFF_HOSTNAME, "Master node name"), |
2343 |
"volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"), |
2344 |
} |
2345 |
|
2346 |
|
2347 |
class ClusterQueryData: |
2348 |
def __init__(self, cluster, drain_flag, watcher_pause): |
2349 |
"""Initializes this class.
|
2350 |
|
2351 |
@type cluster: L{objects.Cluster}
|
2352 |
@param cluster: Instance of cluster object
|
2353 |
@type drain_flag: bool
|
2354 |
@param drain_flag: Whether job queue is drained
|
2355 |
@type watcher_pause: number
|
2356 |
@param watcher_pause: Until when watcher is paused (Unix timestamp)
|
2357 |
|
2358 |
"""
|
2359 |
self._cluster = cluster
|
2360 |
self.drain_flag = drain_flag
|
2361 |
self.watcher_pause = watcher_pause
|
2362 |
|
2363 |
def __iter__(self): |
2364 |
return iter([self._cluster]) |
2365 |
|
2366 |
|
2367 |
def _ClusterWatcherPause(ctx, _): |
2368 |
"""Returns until when watcher is paused (if available).
|
2369 |
|
2370 |
"""
|
2371 |
if ctx.watcher_pause is None: |
2372 |
return _FS_UNAVAIL
|
2373 |
else:
|
2374 |
return ctx.watcher_pause
|
2375 |
|
2376 |
|
2377 |
def _BuildClusterFields(): |
2378 |
"""Builds list of fields for cluster information.
|
2379 |
|
2380 |
"""
|
2381 |
fields = [ |
2382 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), CQ_CONFIG, 0, |
2383 |
lambda ctx, cluster: list(cluster.GetTags())), |
2384 |
(_MakeField("architecture", "ArchInfo", QFT_OTHER, |
2385 |
"Architecture information"), None, 0, |
2386 |
lambda ctx, _: runtime.GetArchInfo()),
|
2387 |
(_MakeField("drain_flag", "QueueDrained", QFT_BOOL, |
2388 |
"Flag whether job queue is drained"), CQ_QUEUE_DRAINED, 0, |
2389 |
lambda ctx, _: ctx.drain_flag),
|
2390 |
(_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP, |
2391 |
"Until when watcher is paused"), CQ_WATCHER_PAUSE, 0, |
2392 |
_ClusterWatcherPause), |
2393 |
] |
2394 |
|
2395 |
# Simple fields
|
2396 |
fields.extend([ |
2397 |
(_MakeField(name, title, kind, doc), CQ_CONFIG, flags, _GetItemAttr(name)) |
2398 |
for (name, (title, kind, flags, doc)) in _CLUSTER_SIMPLE_FIELDS.items() |
2399 |
]) |
2400 |
|
2401 |
# Version fields
|
2402 |
fields.extend([ |
2403 |
(_MakeField(name, title, kind, doc), None, 0, _StaticValue(value)) |
2404 |
for (name, (title, kind, value, doc)) in _CLUSTER_VERSION_FIELDS.items() |
2405 |
]) |
2406 |
|
2407 |
# Add timestamps
|
2408 |
fields.extend(_GetItemTimestampFields(CQ_CONFIG)) |
2409 |
|
2410 |
return _PrepareFieldList(fields, [
|
2411 |
("name", "cluster_name"), |
2412 |
]) |
2413 |
|
2414 |
|
2415 |
#: Fields for cluster information
|
2416 |
CLUSTER_FIELDS = _BuildClusterFields() |
2417 |
|
2418 |
#: Fields available for node queries
|
2419 |
NODE_FIELDS = _BuildNodeFields() |
2420 |
|
2421 |
#: Fields available for instance queries
|
2422 |
INSTANCE_FIELDS = _BuildInstanceFields() |
2423 |
|
2424 |
#: Fields available for lock queries
|
2425 |
LOCK_FIELDS = _BuildLockFields() |
2426 |
|
2427 |
#: Fields available for node group queries
|
2428 |
GROUP_FIELDS = _BuildGroupFields() |
2429 |
|
2430 |
#: Fields available for operating system queries
|
2431 |
OS_FIELDS = _BuildOsFields() |
2432 |
|
2433 |
#: Fields available for job queries
|
2434 |
JOB_FIELDS = _BuildJobFields() |
2435 |
|
2436 |
#: Fields available for exports
|
2437 |
EXPORT_FIELDS = _BuildExportFields() |
2438 |
|
2439 |
#: All available resources
|
2440 |
ALL_FIELDS = { |
2441 |
constants.QR_CLUSTER: CLUSTER_FIELDS, |
2442 |
constants.QR_INSTANCE: INSTANCE_FIELDS, |
2443 |
constants.QR_NODE: NODE_FIELDS, |
2444 |
constants.QR_LOCK: LOCK_FIELDS, |
2445 |
constants.QR_GROUP: GROUP_FIELDS, |
2446 |
constants.QR_OS: OS_FIELDS, |
2447 |
constants.QR_JOB: JOB_FIELDS, |
2448 |
constants.QR_EXPORT: EXPORT_FIELDS, |
2449 |
} |
2450 |
|
2451 |
#: All available field lists
|
2452 |
ALL_FIELD_LISTS = ALL_FIELDS.values() |