root / lib / query.py @ a9310b2f
History | View | Annotate | Download (72.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2010, 2011, 2012 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module for query operations
|
23 |
|
24 |
How it works:
|
25 |
|
26 |
- Add field definitions
|
27 |
- See how L{NODE_FIELDS} is built
|
28 |
- Each field gets:
|
29 |
- Query field definition (L{objects.QueryFieldDefinition}, use
|
30 |
L{_MakeField} for creating), containing:
|
31 |
- Name, must be lowercase and match L{FIELD_NAME_RE}
|
32 |
- Title for tables, must not contain whitespace and match
|
33 |
L{TITLE_RE}
|
34 |
- Value data type, e.g. L{constants.QFT_NUMBER}
|
35 |
- Human-readable description, must not end with punctuation or
|
36 |
contain newlines
|
37 |
- Data request type, see e.g. C{NQ_*}
|
38 |
- OR-ed flags, see C{QFF_*}
|
39 |
- A retrieval function, see L{Query.__init__} for description
|
40 |
- Pass list of fields through L{_PrepareFieldList} for preparation and
|
41 |
checks
|
42 |
- Instantiate L{Query} with prepared field list definition and selected fields
|
43 |
- Call L{Query.RequestedData} to determine what data to collect/compute
|
44 |
- Call L{Query.Query} or L{Query.OldStyleQuery} with collected data and use
|
45 |
result
|
46 |
- Data container must support iteration using C{__iter__}
|
47 |
- Items are passed to retrieval functions and can have any format
|
48 |
- Call L{Query.GetFields} to get list of definitions for selected fields
|
49 |
|
50 |
@attention: Retrieval functions must be idempotent. They can be called multiple
|
51 |
times, in any order and any number of times.
|
52 |
|
53 |
"""
|
54 |
|
55 |
import logging |
56 |
import operator |
57 |
import re |
58 |
|
59 |
from ganeti import constants |
60 |
from ganeti import errors |
61 |
from ganeti import utils |
62 |
from ganeti import compat |
63 |
from ganeti import objects |
64 |
from ganeti import ht |
65 |
from ganeti import runtime |
66 |
from ganeti import qlang |
67 |
from ganeti import jstore |
68 |
|
69 |
from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER, |
70 |
QFT_UNIT, QFT_TIMESTAMP, QFT_OTHER, |
71 |
RS_NORMAL, RS_UNKNOWN, RS_NODATA, |
72 |
RS_UNAVAIL, RS_OFFLINE) |
73 |
|
74 |
|
75 |
# Constants for requesting data from the caller/data provider. Each property
|
76 |
# collected/computed separately by the data provider should have its own to
|
77 |
# only collect the requested data and not more.
|
78 |
|
79 |
(NQ_CONFIG, |
80 |
NQ_INST, |
81 |
NQ_LIVE, |
82 |
NQ_GROUP, |
83 |
NQ_OOB) = range(1, 6) |
84 |
|
85 |
(IQ_CONFIG, |
86 |
IQ_LIVE, |
87 |
IQ_DISKUSAGE, |
88 |
IQ_CONSOLE, |
89 |
IQ_NODES) = range(100, 105) |
90 |
|
91 |
(LQ_MODE, |
92 |
LQ_OWNER, |
93 |
LQ_PENDING) = range(10, 13) |
94 |
|
95 |
(GQ_CONFIG, |
96 |
GQ_NODE, |
97 |
GQ_INST, |
98 |
GQ_DISKPARAMS) = range(200, 204) |
99 |
|
100 |
(CQ_CONFIG, |
101 |
CQ_QUEUE_DRAINED, |
102 |
CQ_WATCHER_PAUSE) = range(300, 303) |
103 |
|
104 |
# Query field flags
|
105 |
QFF_HOSTNAME = 0x01
|
106 |
QFF_IP_ADDRESS = 0x02
|
107 |
QFF_JOB_ID = 0x04
|
108 |
QFF_SPLIT_TIMESTAMP = 0x08
|
109 |
# Next values: 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
|
110 |
QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS | QFF_JOB_ID | QFF_SPLIT_TIMESTAMP) |
111 |
|
112 |
FIELD_NAME_RE = re.compile(r"^[a-z0-9/._]+$")
|
113 |
TITLE_RE = re.compile(r"^[^\s]+$")
|
114 |
DOC_RE = re.compile(r"^[A-Z].*[^.,?!]$")
|
115 |
|
116 |
#: Verification function for each field type
|
117 |
_VERIFY_FN = { |
118 |
QFT_UNKNOWN: ht.TNone, |
119 |
QFT_TEXT: ht.TString, |
120 |
QFT_BOOL: ht.TBool, |
121 |
QFT_NUMBER: ht.TInt, |
122 |
QFT_UNIT: ht.TInt, |
123 |
QFT_TIMESTAMP: ht.TNumber, |
124 |
QFT_OTHER: lambda _: True, |
125 |
} |
126 |
|
127 |
# Unique objects for special field statuses
|
128 |
_FS_UNKNOWN = object()
|
129 |
_FS_NODATA = object()
|
130 |
_FS_UNAVAIL = object()
|
131 |
_FS_OFFLINE = object()
|
132 |
|
133 |
#: List of all special status
|
134 |
_FS_ALL = frozenset([_FS_UNKNOWN, _FS_NODATA, _FS_UNAVAIL, _FS_OFFLINE])
|
135 |
|
136 |
#: VType to QFT mapping
|
137 |
_VTToQFT = { |
138 |
# TODO: fix validation of empty strings
|
139 |
constants.VTYPE_STRING: QFT_OTHER, # since VTYPE_STRINGs can be empty
|
140 |
constants.VTYPE_MAYBE_STRING: QFT_OTHER, |
141 |
constants.VTYPE_BOOL: QFT_BOOL, |
142 |
constants.VTYPE_SIZE: QFT_UNIT, |
143 |
constants.VTYPE_INT: QFT_NUMBER, |
144 |
} |
145 |
|
146 |
_SERIAL_NO_DOC = "%s object serial number, incremented on each modification"
|
147 |
|
148 |
# TODO: Consider moving titles closer to constants
|
149 |
NDP_TITLE = { |
150 |
constants.ND_OOB_PROGRAM: "OutOfBandProgram",
|
151 |
constants.ND_SPINDLE_COUNT: "SpindleCount",
|
152 |
} |
153 |
|
154 |
|
155 |
def _GetUnknownField(ctx, item): # pylint: disable=W0613 |
156 |
"""Gets the contents of an unknown field.
|
157 |
|
158 |
"""
|
159 |
return _FS_UNKNOWN
|
160 |
|
161 |
|
162 |
def _GetQueryFields(fielddefs, selected): |
163 |
"""Calculates the internal list of selected fields.
|
164 |
|
165 |
Unknown fields are returned as L{constants.QFT_UNKNOWN}.
|
166 |
|
167 |
@type fielddefs: dict
|
168 |
@param fielddefs: Field definitions
|
169 |
@type selected: list of strings
|
170 |
@param selected: List of selected fields
|
171 |
|
172 |
"""
|
173 |
result = [] |
174 |
|
175 |
for name in selected: |
176 |
try:
|
177 |
fdef = fielddefs[name] |
178 |
except KeyError: |
179 |
fdef = (_MakeField(name, name, QFT_UNKNOWN, "Unknown field '%s'" % name),
|
180 |
None, 0, _GetUnknownField) |
181 |
|
182 |
assert len(fdef) == 4 |
183 |
|
184 |
result.append(fdef) |
185 |
|
186 |
return result
|
187 |
|
188 |
|
189 |
def GetAllFields(fielddefs): |
190 |
"""Extract L{objects.QueryFieldDefinition} from field definitions.
|
191 |
|
192 |
@rtype: list of L{objects.QueryFieldDefinition}
|
193 |
|
194 |
"""
|
195 |
return [fdef for (fdef, _, _, _) in fielddefs] |
196 |
|
197 |
|
198 |
class _FilterHints: |
199 |
"""Class for filter analytics.
|
200 |
|
201 |
When filters are used, the user of the L{Query} class usually doesn't know
|
202 |
exactly which items will be necessary for building the result. It therefore
|
203 |
has to prepare and compute the input data for potentially returning
|
204 |
everything.
|
205 |
|
206 |
There are two ways to optimize this. The first, and simpler, is to assign
|
207 |
each field a group of data, so that the caller can determine which
|
208 |
computations are necessary depending on the data groups requested. The list
|
209 |
of referenced groups must also be computed for fields referenced in the
|
210 |
filter.
|
211 |
|
212 |
The second is restricting the items based on a primary key. The primary key
|
213 |
is usually a unique name (e.g. a node name). This class extracts all
|
214 |
referenced names from a filter. If it encounters any filter condition which
|
215 |
disallows such a list to be determined (e.g. a non-equality filter), all
|
216 |
names will be requested.
|
217 |
|
218 |
The end-effect is that any operation other than L{qlang.OP_OR} and
|
219 |
L{qlang.OP_EQUAL} will make the query more expensive.
|
220 |
|
221 |
"""
|
222 |
def __init__(self, namefield): |
223 |
"""Initializes this class.
|
224 |
|
225 |
@type namefield: string
|
226 |
@param namefield: Field caller is interested in
|
227 |
|
228 |
"""
|
229 |
self._namefield = namefield
|
230 |
|
231 |
#: Whether all names need to be requested (e.g. if a non-equality operator
|
232 |
#: has been used)
|
233 |
self._allnames = False |
234 |
|
235 |
#: Which names to request
|
236 |
self._names = None |
237 |
|
238 |
#: Data kinds referenced by the filter (used by L{Query.RequestedData})
|
239 |
self._datakinds = set() |
240 |
|
241 |
def RequestedNames(self): |
242 |
"""Returns all requested values.
|
243 |
|
244 |
Returns C{None} if list of values can't be determined (e.g. encountered
|
245 |
non-equality operators).
|
246 |
|
247 |
@rtype: list
|
248 |
|
249 |
"""
|
250 |
if self._allnames or self._names is None: |
251 |
return None |
252 |
|
253 |
return utils.UniqueSequence(self._names) |
254 |
|
255 |
def ReferencedData(self): |
256 |
"""Returns all kinds of data referenced by the filter.
|
257 |
|
258 |
"""
|
259 |
return frozenset(self._datakinds) |
260 |
|
261 |
def _NeedAllNames(self): |
262 |
"""Changes internal state to request all names.
|
263 |
|
264 |
"""
|
265 |
self._allnames = True |
266 |
self._names = None |
267 |
|
268 |
def NoteLogicOp(self, op): |
269 |
"""Called when handling a logic operation.
|
270 |
|
271 |
@type op: string
|
272 |
@param op: Operator
|
273 |
|
274 |
"""
|
275 |
if op != qlang.OP_OR:
|
276 |
self._NeedAllNames()
|
277 |
|
278 |
def NoteUnaryOp(self, op): # pylint: disable=W0613 |
279 |
"""Called when handling an unary operation.
|
280 |
|
281 |
@type op: string
|
282 |
@param op: Operator
|
283 |
|
284 |
"""
|
285 |
self._NeedAllNames()
|
286 |
|
287 |
def NoteBinaryOp(self, op, datakind, name, value): |
288 |
"""Called when handling a binary operation.
|
289 |
|
290 |
@type op: string
|
291 |
@param op: Operator
|
292 |
@type name: string
|
293 |
@param name: Left-hand side of operator (field name)
|
294 |
@param value: Right-hand side of operator
|
295 |
|
296 |
"""
|
297 |
if datakind is not None: |
298 |
self._datakinds.add(datakind)
|
299 |
|
300 |
if self._allnames: |
301 |
return
|
302 |
|
303 |
# If any operator other than equality was used, all names need to be
|
304 |
# retrieved
|
305 |
if op == qlang.OP_EQUAL and name == self._namefield: |
306 |
if self._names is None: |
307 |
self._names = []
|
308 |
self._names.append(value)
|
309 |
else:
|
310 |
self._NeedAllNames()
|
311 |
|
312 |
|
313 |
def _WrapLogicOp(op_fn, sentences, ctx, item): |
314 |
"""Wrapper for logic operator functions.
|
315 |
|
316 |
"""
|
317 |
return op_fn(fn(ctx, item) for fn in sentences) |
318 |
|
319 |
|
320 |
def _WrapUnaryOp(op_fn, inner, ctx, item): |
321 |
"""Wrapper for unary operator functions.
|
322 |
|
323 |
"""
|
324 |
return op_fn(inner(ctx, item))
|
325 |
|
326 |
|
327 |
def _WrapBinaryOp(op_fn, retrieval_fn, value, ctx, item): |
328 |
"""Wrapper for binary operator functions.
|
329 |
|
330 |
"""
|
331 |
return op_fn(retrieval_fn(ctx, item), value)
|
332 |
|
333 |
|
334 |
def _WrapNot(fn, lhs, rhs): |
335 |
"""Negates the result of a wrapped function.
|
336 |
|
337 |
"""
|
338 |
return not fn(lhs, rhs) |
339 |
|
340 |
|
341 |
def _PrepareRegex(pattern): |
342 |
"""Compiles a regular expression.
|
343 |
|
344 |
"""
|
345 |
try:
|
346 |
return re.compile(pattern)
|
347 |
except re.error, err:
|
348 |
raise errors.ParameterError("Invalid regex pattern (%s)" % err) |
349 |
|
350 |
|
351 |
def _PrepareSplitTimestamp(value): |
352 |
"""Prepares a value for comparison by L{_MakeSplitTimestampComparison}.
|
353 |
|
354 |
"""
|
355 |
if ht.TNumber(value):
|
356 |
return value
|
357 |
else:
|
358 |
return utils.MergeTime(value)
|
359 |
|
360 |
|
361 |
def _MakeSplitTimestampComparison(fn): |
362 |
"""Compares split timestamp values after converting to float.
|
363 |
|
364 |
"""
|
365 |
return lambda lhs, rhs: fn(utils.MergeTime(lhs), rhs) |
366 |
|
367 |
|
368 |
def _MakeComparisonChecks(fn): |
369 |
"""Prepares flag-specific comparisons using a comparison function.
|
370 |
|
371 |
"""
|
372 |
return [
|
373 |
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(fn), |
374 |
_PrepareSplitTimestamp), |
375 |
(QFF_JOB_ID, lambda lhs, rhs: fn(jstore.ParseJobId(lhs), rhs),
|
376 |
jstore.ParseJobId), |
377 |
(None, fn, None), |
378 |
] |
379 |
|
380 |
|
381 |
class _FilterCompilerHelper: |
382 |
"""Converts a query filter to a callable usable for filtering.
|
383 |
|
384 |
"""
|
385 |
# String statement has no effect, pylint: disable=W0105
|
386 |
|
387 |
#: How deep filters can be nested
|
388 |
_LEVELS_MAX = 10
|
389 |
|
390 |
# Unique identifiers for operator groups
|
391 |
(_OPTYPE_LOGIC, |
392 |
_OPTYPE_UNARY, |
393 |
_OPTYPE_BINARY) = range(1, 4) |
394 |
|
395 |
"""Functions for equality checks depending on field flags.
|
396 |
|
397 |
List of tuples containing flags and a callable receiving the left- and
|
398 |
right-hand side of the operator. The flags are an OR-ed value of C{QFF_*}
|
399 |
(e.g. L{QFF_HOSTNAME} or L{QFF_SPLIT_TIMESTAMP}).
|
400 |
|
401 |
Order matters. The first item with flags will be used. Flags are checked
|
402 |
using binary AND.
|
403 |
|
404 |
"""
|
405 |
_EQUALITY_CHECKS = [ |
406 |
(QFF_HOSTNAME, |
407 |
lambda lhs, rhs: utils.MatchNameComponent(rhs, [lhs],
|
408 |
case_sensitive=False),
|
409 |
None),
|
410 |
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(operator.eq), |
411 |
_PrepareSplitTimestamp), |
412 |
(None, operator.eq, None), |
413 |
] |
414 |
|
415 |
"""Known operators
|
416 |
|
417 |
Operator as key (C{qlang.OP_*}), value a tuple of operator group
|
418 |
(C{_OPTYPE_*}) and a group-specific value:
|
419 |
|
420 |
- C{_OPTYPE_LOGIC}: Callable taking any number of arguments; used by
|
421 |
L{_HandleLogicOp}
|
422 |
- C{_OPTYPE_UNARY}: Always C{None}; details handled by L{_HandleUnaryOp}
|
423 |
- C{_OPTYPE_BINARY}: Callable taking exactly two parameters, the left- and
|
424 |
right-hand side of the operator, used by L{_HandleBinaryOp}
|
425 |
|
426 |
"""
|
427 |
_OPS = { |
428 |
# Logic operators
|
429 |
qlang.OP_OR: (_OPTYPE_LOGIC, compat.any), |
430 |
qlang.OP_AND: (_OPTYPE_LOGIC, compat.all), |
431 |
|
432 |
# Unary operators
|
433 |
qlang.OP_NOT: (_OPTYPE_UNARY, None),
|
434 |
qlang.OP_TRUE: (_OPTYPE_UNARY, None),
|
435 |
|
436 |
# Binary operators
|
437 |
qlang.OP_EQUAL: (_OPTYPE_BINARY, _EQUALITY_CHECKS), |
438 |
qlang.OP_NOT_EQUAL: |
439 |
(_OPTYPE_BINARY, [(flags, compat.partial(_WrapNot, fn), valprepfn) |
440 |
for (flags, fn, valprepfn) in _EQUALITY_CHECKS]), |
441 |
qlang.OP_LT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.lt)), |
442 |
qlang.OP_LE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.le)), |
443 |
qlang.OP_GT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.gt)), |
444 |
qlang.OP_GE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.ge)), |
445 |
qlang.OP_REGEXP: (_OPTYPE_BINARY, [ |
446 |
(None, lambda lhs, rhs: rhs.search(lhs), _PrepareRegex), |
447 |
]), |
448 |
qlang.OP_CONTAINS: (_OPTYPE_BINARY, [ |
449 |
(None, operator.contains, None), |
450 |
]), |
451 |
} |
452 |
|
453 |
def __init__(self, fields): |
454 |
"""Initializes this class.
|
455 |
|
456 |
@param fields: Field definitions (return value of L{_PrepareFieldList})
|
457 |
|
458 |
"""
|
459 |
self._fields = fields
|
460 |
self._hints = None |
461 |
self._op_handler = None |
462 |
|
463 |
def __call__(self, hints, qfilter): |
464 |
"""Converts a query filter into a callable function.
|
465 |
|
466 |
@type hints: L{_FilterHints} or None
|
467 |
@param hints: Callbacks doing analysis on filter
|
468 |
@type qfilter: list
|
469 |
@param qfilter: Filter structure
|
470 |
@rtype: callable
|
471 |
@return: Function receiving context and item as parameters, returning
|
472 |
boolean as to whether item matches filter
|
473 |
|
474 |
"""
|
475 |
self._op_handler = {
|
476 |
self._OPTYPE_LOGIC:
|
477 |
(self._HandleLogicOp, getattr(hints, "NoteLogicOp", None)), |
478 |
self._OPTYPE_UNARY:
|
479 |
(self._HandleUnaryOp, getattr(hints, "NoteUnaryOp", None)), |
480 |
self._OPTYPE_BINARY:
|
481 |
(self._HandleBinaryOp, getattr(hints, "NoteBinaryOp", None)), |
482 |
} |
483 |
|
484 |
try:
|
485 |
filter_fn = self._Compile(qfilter, 0) |
486 |
finally:
|
487 |
self._op_handler = None |
488 |
|
489 |
return filter_fn
|
490 |
|
491 |
def _Compile(self, qfilter, level): |
492 |
"""Inner function for converting filters.
|
493 |
|
494 |
Calls the correct handler functions for the top-level operator. This
|
495 |
function is called recursively (e.g. for logic operators).
|
496 |
|
497 |
"""
|
498 |
if not (isinstance(qfilter, (list, tuple)) and qfilter): |
499 |
raise errors.ParameterError("Invalid filter on level %s" % level) |
500 |
|
501 |
# Limit recursion
|
502 |
if level >= self._LEVELS_MAX: |
503 |
raise errors.ParameterError("Only up to %s levels are allowed (filter" |
504 |
" nested too deep)" % self._LEVELS_MAX) |
505 |
|
506 |
# Create copy to be modified
|
507 |
operands = qfilter[:] |
508 |
op = operands.pop(0)
|
509 |
|
510 |
try:
|
511 |
(kind, op_data) = self._OPS[op]
|
512 |
except KeyError: |
513 |
raise errors.ParameterError("Unknown operator '%s'" % op) |
514 |
|
515 |
(handler, hints_cb) = self._op_handler[kind]
|
516 |
|
517 |
return handler(hints_cb, level, op, op_data, operands)
|
518 |
|
519 |
def _LookupField(self, name): |
520 |
"""Returns a field definition by name.
|
521 |
|
522 |
"""
|
523 |
try:
|
524 |
return self._fields[name] |
525 |
except KeyError: |
526 |
raise errors.ParameterError("Unknown field '%s'" % name) |
527 |
|
528 |
def _HandleLogicOp(self, hints_fn, level, op, op_fn, operands): |
529 |
"""Handles logic operators.
|
530 |
|
531 |
@type hints_fn: callable
|
532 |
@param hints_fn: Callback doing some analysis on the filter
|
533 |
@type level: integer
|
534 |
@param level: Current depth
|
535 |
@type op: string
|
536 |
@param op: Operator
|
537 |
@type op_fn: callable
|
538 |
@param op_fn: Function implementing operator
|
539 |
@type operands: list
|
540 |
@param operands: List of operands
|
541 |
|
542 |
"""
|
543 |
if hints_fn:
|
544 |
hints_fn(op) |
545 |
|
546 |
return compat.partial(_WrapLogicOp, op_fn,
|
547 |
[self._Compile(op, level + 1) for op in operands]) |
548 |
|
549 |
def _HandleUnaryOp(self, hints_fn, level, op, op_fn, operands): |
550 |
"""Handles unary operators.
|
551 |
|
552 |
@type hints_fn: callable
|
553 |
@param hints_fn: Callback doing some analysis on the filter
|
554 |
@type level: integer
|
555 |
@param level: Current depth
|
556 |
@type op: string
|
557 |
@param op: Operator
|
558 |
@type op_fn: callable
|
559 |
@param op_fn: Function implementing operator
|
560 |
@type operands: list
|
561 |
@param operands: List of operands
|
562 |
|
563 |
"""
|
564 |
assert op_fn is None |
565 |
|
566 |
if hints_fn:
|
567 |
hints_fn(op) |
568 |
|
569 |
if len(operands) != 1: |
570 |
raise errors.ParameterError("Unary operator '%s' expects exactly one" |
571 |
" operand" % op)
|
572 |
|
573 |
if op == qlang.OP_TRUE:
|
574 |
(_, _, _, retrieval_fn) = self._LookupField(operands[0]) |
575 |
|
576 |
op_fn = operator.truth |
577 |
arg = retrieval_fn |
578 |
elif op == qlang.OP_NOT:
|
579 |
op_fn = operator.not_ |
580 |
arg = self._Compile(operands[0], level + 1) |
581 |
else:
|
582 |
raise errors.ProgrammerError("Can't handle operator '%s'" % op) |
583 |
|
584 |
return compat.partial(_WrapUnaryOp, op_fn, arg)
|
585 |
|
586 |
def _HandleBinaryOp(self, hints_fn, level, op, op_data, operands): |
587 |
"""Handles binary operators.
|
588 |
|
589 |
@type hints_fn: callable
|
590 |
@param hints_fn: Callback doing some analysis on the filter
|
591 |
@type level: integer
|
592 |
@param level: Current depth
|
593 |
@type op: string
|
594 |
@param op: Operator
|
595 |
@param op_data: Functions implementing operators
|
596 |
@type operands: list
|
597 |
@param operands: List of operands
|
598 |
|
599 |
"""
|
600 |
# Unused arguments, pylint: disable=W0613
|
601 |
try:
|
602 |
(name, value) = operands |
603 |
except (ValueError, TypeError): |
604 |
raise errors.ParameterError("Invalid binary operator, expected exactly" |
605 |
" two operands")
|
606 |
|
607 |
(fdef, datakind, field_flags, retrieval_fn) = self._LookupField(name)
|
608 |
|
609 |
assert fdef.kind != QFT_UNKNOWN
|
610 |
|
611 |
# TODO: Type conversions?
|
612 |
|
613 |
verify_fn = _VERIFY_FN[fdef.kind] |
614 |
if not verify_fn(value): |
615 |
raise errors.ParameterError("Unable to compare field '%s' (type '%s')" |
616 |
" with '%s', expected %s" %
|
617 |
(name, fdef.kind, value.__class__.__name__, |
618 |
verify_fn)) |
619 |
|
620 |
if hints_fn:
|
621 |
hints_fn(op, datakind, name, value) |
622 |
|
623 |
for (fn_flags, fn, valprepfn) in op_data: |
624 |
if fn_flags is None or fn_flags & field_flags: |
625 |
# Prepare value if necessary (e.g. compile regular expression)
|
626 |
if valprepfn:
|
627 |
value = valprepfn(value) |
628 |
|
629 |
return compat.partial(_WrapBinaryOp, fn, retrieval_fn, value)
|
630 |
|
631 |
raise errors.ProgrammerError("Unable to find operator implementation" |
632 |
" (op '%s', flags %s)" % (op, field_flags))
|
633 |
|
634 |
|
635 |
def _CompileFilter(fields, hints, qfilter): |
636 |
"""Converts a query filter into a callable function.
|
637 |
|
638 |
See L{_FilterCompilerHelper} for details.
|
639 |
|
640 |
@rtype: callable
|
641 |
|
642 |
"""
|
643 |
return _FilterCompilerHelper(fields)(hints, qfilter)
|
644 |
|
645 |
|
646 |
class Query: |
647 |
def __init__(self, fieldlist, selected, qfilter=None, namefield=None): |
648 |
"""Initializes this class.
|
649 |
|
650 |
The field definition is a dictionary with the field's name as a key and a
|
651 |
tuple containing, in order, the field definition object
|
652 |
(L{objects.QueryFieldDefinition}, the data kind to help calling code
|
653 |
collect data and a retrieval function. The retrieval function is called
|
654 |
with two parameters, in order, the data container and the item in container
|
655 |
(see L{Query.Query}).
|
656 |
|
657 |
Users of this class can call L{RequestedData} before preparing the data
|
658 |
container to determine what data is needed.
|
659 |
|
660 |
@type fieldlist: dictionary
|
661 |
@param fieldlist: Field definitions
|
662 |
@type selected: list of strings
|
663 |
@param selected: List of selected fields
|
664 |
|
665 |
"""
|
666 |
assert namefield is None or namefield in fieldlist |
667 |
|
668 |
self._fields = _GetQueryFields(fieldlist, selected)
|
669 |
|
670 |
self._filter_fn = None |
671 |
self._requested_names = None |
672 |
self._filter_datakinds = frozenset() |
673 |
|
674 |
if qfilter is not None: |
675 |
# Collect requested names if wanted
|
676 |
if namefield:
|
677 |
hints = _FilterHints(namefield) |
678 |
else:
|
679 |
hints = None
|
680 |
|
681 |
# Build filter function
|
682 |
self._filter_fn = _CompileFilter(fieldlist, hints, qfilter)
|
683 |
if hints:
|
684 |
self._requested_names = hints.RequestedNames()
|
685 |
self._filter_datakinds = hints.ReferencedData()
|
686 |
|
687 |
if namefield is None: |
688 |
self._name_fn = None |
689 |
else:
|
690 |
(_, _, _, self._name_fn) = fieldlist[namefield]
|
691 |
|
692 |
def RequestedNames(self): |
693 |
"""Returns all names referenced in the filter.
|
694 |
|
695 |
If there is no filter or operators are preventing determining the exact
|
696 |
names, C{None} is returned.
|
697 |
|
698 |
"""
|
699 |
return self._requested_names |
700 |
|
701 |
def RequestedData(self): |
702 |
"""Gets requested kinds of data.
|
703 |
|
704 |
@rtype: frozenset
|
705 |
|
706 |
"""
|
707 |
return (self._filter_datakinds | |
708 |
frozenset(datakind for (_, datakind, _, _) in self._fields |
709 |
if datakind is not None)) |
710 |
|
711 |
def GetFields(self): |
712 |
"""Returns the list of fields for this query.
|
713 |
|
714 |
Includes unknown fields.
|
715 |
|
716 |
@rtype: List of L{objects.QueryFieldDefinition}
|
717 |
|
718 |
"""
|
719 |
return GetAllFields(self._fields) |
720 |
|
721 |
def Query(self, ctx, sort_by_name=True): |
722 |
"""Execute a query.
|
723 |
|
724 |
@param ctx: Data container passed to field retrieval functions, must
|
725 |
support iteration using C{__iter__}
|
726 |
@type sort_by_name: boolean
|
727 |
@param sort_by_name: Whether to sort by name or keep the input data's
|
728 |
ordering
|
729 |
|
730 |
"""
|
731 |
sort = (self._name_fn and sort_by_name) |
732 |
|
733 |
result = [] |
734 |
|
735 |
for idx, item in enumerate(ctx): |
736 |
if not (self._filter_fn is None or self._filter_fn(ctx, item)): |
737 |
continue
|
738 |
|
739 |
row = [_ProcessResult(fn(ctx, item)) for (_, _, _, fn) in self._fields] |
740 |
|
741 |
# Verify result
|
742 |
if __debug__:
|
743 |
_VerifyResultRow(self._fields, row)
|
744 |
|
745 |
if sort:
|
746 |
(status, name) = _ProcessResult(self._name_fn(ctx, item))
|
747 |
assert status == constants.RS_NORMAL
|
748 |
# TODO: Are there cases where we wouldn't want to use NiceSort?
|
749 |
# Answer: if the name field is non-string...
|
750 |
result.append((utils.NiceSortKey(name), idx, row)) |
751 |
else:
|
752 |
result.append(row) |
753 |
|
754 |
if not sort: |
755 |
return result
|
756 |
|
757 |
# TODO: Would "heapq" be more efficient than sorting?
|
758 |
|
759 |
# Sorting in-place instead of using "sorted()"
|
760 |
result.sort() |
761 |
|
762 |
assert not result or (len(result[0]) == 3 and len(result[-1]) == 3) |
763 |
|
764 |
return map(operator.itemgetter(2), result) |
765 |
|
766 |
def OldStyleQuery(self, ctx, sort_by_name=True): |
767 |
"""Query with "old" query result format.
|
768 |
|
769 |
See L{Query.Query} for arguments.
|
770 |
|
771 |
"""
|
772 |
unknown = set(fdef.name for (fdef, _, _, _) in self._fields |
773 |
if fdef.kind == QFT_UNKNOWN)
|
774 |
if unknown:
|
775 |
raise errors.OpPrereqError("Unknown output fields selected: %s" % |
776 |
(utils.CommaJoin(unknown), ), |
777 |
errors.ECODE_INVAL) |
778 |
|
779 |
return [[value for (_, value) in row] |
780 |
for row in self.Query(ctx, sort_by_name=sort_by_name)] |
781 |
|
782 |
|
783 |
def _ProcessResult(value): |
784 |
"""Converts result values into externally-visible ones.
|
785 |
|
786 |
"""
|
787 |
if value is _FS_UNKNOWN: |
788 |
return (RS_UNKNOWN, None) |
789 |
elif value is _FS_NODATA: |
790 |
return (RS_NODATA, None) |
791 |
elif value is _FS_UNAVAIL: |
792 |
return (RS_UNAVAIL, None) |
793 |
elif value is _FS_OFFLINE: |
794 |
return (RS_OFFLINE, None) |
795 |
else:
|
796 |
return (RS_NORMAL, value)
|
797 |
|
798 |
|
799 |
def _VerifyResultRow(fields, row): |
800 |
"""Verifies the contents of a query result row.
|
801 |
|
802 |
@type fields: list
|
803 |
@param fields: Field definitions for result
|
804 |
@type row: list of tuples
|
805 |
@param row: Row data
|
806 |
|
807 |
"""
|
808 |
assert len(row) == len(fields) |
809 |
errs = [] |
810 |
for ((status, value), (fdef, _, _, _)) in zip(row, fields): |
811 |
if status == RS_NORMAL:
|
812 |
if not _VERIFY_FN[fdef.kind](value): |
813 |
errs.append("normal field %s fails validation (value is %s)" %
|
814 |
(fdef.name, value)) |
815 |
elif value is not None: |
816 |
errs.append("abnormal field %s has a non-None value" % fdef.name)
|
817 |
assert not errs, ("Failed validation: %s in row %s" % |
818 |
(utils.CommaJoin(errs), row)) |
819 |
|
820 |
|
821 |
def _FieldDictKey((fdef, _, flags, fn)): |
822 |
"""Generates key for field dictionary.
|
823 |
|
824 |
"""
|
825 |
assert fdef.name and fdef.title, "Name and title are required" |
826 |
assert FIELD_NAME_RE.match(fdef.name)
|
827 |
assert TITLE_RE.match(fdef.title)
|
828 |
assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and |
829 |
fdef.doc.strip() == fdef.doc), \ |
830 |
"Invalid description for field '%s'" % fdef.name
|
831 |
assert callable(fn) |
832 |
assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name |
833 |
|
834 |
return fdef.name
|
835 |
|
836 |
|
837 |
def _PrepareFieldList(fields, aliases): |
838 |
"""Prepares field list for use by L{Query}.
|
839 |
|
840 |
Converts the list to a dictionary and does some verification.
|
841 |
|
842 |
@type fields: list of tuples; (L{objects.QueryFieldDefinition}, data
|
843 |
kind, retrieval function)
|
844 |
@param fields: List of fields, see L{Query.__init__} for a better
|
845 |
description
|
846 |
@type aliases: list of tuples; (alias, target)
|
847 |
@param aliases: list of tuples containing aliases; for each
|
848 |
alias/target pair, a duplicate will be created in the field list
|
849 |
@rtype: dict
|
850 |
@return: Field dictionary for L{Query}
|
851 |
|
852 |
"""
|
853 |
if __debug__:
|
854 |
duplicates = utils.FindDuplicates(fdef.title.lower() |
855 |
for (fdef, _, _, _) in fields) |
856 |
assert not duplicates, "Duplicate title(s) found: %r" % duplicates |
857 |
|
858 |
result = utils.SequenceToDict(fields, key=_FieldDictKey) |
859 |
|
860 |
for alias, target in aliases: |
861 |
assert alias not in result, "Alias %s overrides an existing field" % alias |
862 |
assert target in result, "Missing target %s for alias %s" % (target, alias) |
863 |
(fdef, k, flags, fn) = result[target] |
864 |
fdef = fdef.Copy() |
865 |
fdef.name = alias |
866 |
result[alias] = (fdef, k, flags, fn) |
867 |
|
868 |
assert len(result) == len(fields) + len(aliases) |
869 |
assert compat.all(name == fdef.name
|
870 |
for (name, (fdef, _, _, _)) in result.items()) |
871 |
|
872 |
return result
|
873 |
|
874 |
|
875 |
def GetQueryResponse(query, ctx, sort_by_name=True): |
876 |
"""Prepares the response for a query.
|
877 |
|
878 |
@type query: L{Query}
|
879 |
@param ctx: Data container, see L{Query.Query}
|
880 |
@type sort_by_name: boolean
|
881 |
@param sort_by_name: Whether to sort by name or keep the input data's
|
882 |
ordering
|
883 |
|
884 |
"""
|
885 |
return objects.QueryResponse(data=query.Query(ctx, sort_by_name=sort_by_name),
|
886 |
fields=query.GetFields()).ToDict() |
887 |
|
888 |
|
889 |
def QueryFields(fielddefs, selected): |
890 |
"""Returns list of available fields.
|
891 |
|
892 |
@type fielddefs: dict
|
893 |
@param fielddefs: Field definitions
|
894 |
@type selected: list of strings
|
895 |
@param selected: List of selected fields
|
896 |
@return: List of L{objects.QueryFieldDefinition}
|
897 |
|
898 |
"""
|
899 |
if selected is None: |
900 |
# Client requests all fields, sort by name
|
901 |
fdefs = utils.NiceSort(GetAllFields(fielddefs.values()), |
902 |
key=operator.attrgetter("name"))
|
903 |
else:
|
904 |
# Keep order as requested by client
|
905 |
fdefs = Query(fielddefs, selected).GetFields() |
906 |
|
907 |
return objects.QueryFieldsResponse(fields=fdefs).ToDict()
|
908 |
|
909 |
|
910 |
def _MakeField(name, title, kind, doc): |
911 |
"""Wrapper for creating L{objects.QueryFieldDefinition} instances.
|
912 |
|
913 |
@param name: Field name as a regular expression
|
914 |
@param title: Human-readable title
|
915 |
@param kind: Field type
|
916 |
@param doc: Human-readable description
|
917 |
|
918 |
"""
|
919 |
return objects.QueryFieldDefinition(name=name, title=title, kind=kind,
|
920 |
doc=doc) |
921 |
|
922 |
|
923 |
def _StaticValueInner(value, ctx, _): # pylint: disable=W0613 |
924 |
"""Returns a static value.
|
925 |
|
926 |
"""
|
927 |
return value
|
928 |
|
929 |
|
930 |
def _StaticValue(value): |
931 |
"""Prepares a function to return a static value.
|
932 |
|
933 |
"""
|
934 |
return compat.partial(_StaticValueInner, value)
|
935 |
|
936 |
|
937 |
def _GetNodeRole(node, master_name): |
938 |
"""Determine node role.
|
939 |
|
940 |
@type node: L{objects.Node}
|
941 |
@param node: Node object
|
942 |
@type master_name: string
|
943 |
@param master_name: Master node name
|
944 |
|
945 |
"""
|
946 |
if node.name == master_name:
|
947 |
return constants.NR_MASTER
|
948 |
elif node.master_candidate:
|
949 |
return constants.NR_MCANDIDATE
|
950 |
elif node.drained:
|
951 |
return constants.NR_DRAINED
|
952 |
elif node.offline:
|
953 |
return constants.NR_OFFLINE
|
954 |
else:
|
955 |
return constants.NR_REGULAR
|
956 |
|
957 |
|
958 |
def _GetItemAttr(attr): |
959 |
"""Returns a field function to return an attribute of the item.
|
960 |
|
961 |
@param attr: Attribute name
|
962 |
|
963 |
"""
|
964 |
getter = operator.attrgetter(attr) |
965 |
return lambda _, item: getter(item) |
966 |
|
967 |
|
968 |
def _GetNDParam(name): |
969 |
"""Return a field function to return an ND parameter out of the context.
|
970 |
|
971 |
"""
|
972 |
def _helper(ctx, _): |
973 |
if ctx.ndparams is None: |
974 |
return _FS_UNAVAIL
|
975 |
else:
|
976 |
return ctx.ndparams.get(name, None) |
977 |
return _helper
|
978 |
|
979 |
|
980 |
def _BuildNDFields(is_group): |
981 |
"""Builds all the ndparam fields.
|
982 |
|
983 |
@param is_group: whether this is called at group or node level
|
984 |
|
985 |
"""
|
986 |
if is_group:
|
987 |
field_kind = GQ_CONFIG |
988 |
else:
|
989 |
field_kind = NQ_GROUP |
990 |
return [(_MakeField("ndp/%s" % name, NDP_TITLE.get(name, "ndp/%s" % name), |
991 |
_VTToQFT[kind], "The \"%s\" node parameter" % name),
|
992 |
field_kind, 0, _GetNDParam(name))
|
993 |
for name, kind in constants.NDS_PARAMETER_TYPES.items()] |
994 |
|
995 |
|
996 |
def _ConvWrapInner(convert, fn, ctx, item): |
997 |
"""Wrapper for converting values.
|
998 |
|
999 |
@param convert: Conversion function receiving value as single parameter
|
1000 |
@param fn: Retrieval function
|
1001 |
|
1002 |
"""
|
1003 |
value = fn(ctx, item) |
1004 |
|
1005 |
# Is the value an abnormal status?
|
1006 |
if compat.any(value is fs for fs in _FS_ALL): |
1007 |
# Return right away
|
1008 |
return value
|
1009 |
|
1010 |
# TODO: Should conversion function also receive context, item or both?
|
1011 |
return convert(value)
|
1012 |
|
1013 |
|
1014 |
def _ConvWrap(convert, fn): |
1015 |
"""Convenience wrapper for L{_ConvWrapInner}.
|
1016 |
|
1017 |
@param convert: Conversion function receiving value as single parameter
|
1018 |
@param fn: Retrieval function
|
1019 |
|
1020 |
"""
|
1021 |
return compat.partial(_ConvWrapInner, convert, fn)
|
1022 |
|
1023 |
|
1024 |
def _GetItemTimestamp(getter): |
1025 |
"""Returns function for getting timestamp of item.
|
1026 |
|
1027 |
@type getter: callable
|
1028 |
@param getter: Function to retrieve timestamp attribute
|
1029 |
|
1030 |
"""
|
1031 |
def fn(_, item): |
1032 |
"""Returns a timestamp of item.
|
1033 |
|
1034 |
"""
|
1035 |
timestamp = getter(item) |
1036 |
if timestamp is None: |
1037 |
# Old configs might not have all timestamps
|
1038 |
return _FS_UNAVAIL
|
1039 |
else:
|
1040 |
return timestamp
|
1041 |
|
1042 |
return fn
|
1043 |
|
1044 |
|
1045 |
def _GetItemTimestampFields(datatype): |
1046 |
"""Returns common timestamp fields.
|
1047 |
|
1048 |
@param datatype: Field data type for use by L{Query.RequestedData}
|
1049 |
|
1050 |
"""
|
1051 |
return [
|
1052 |
(_MakeField("ctime", "CTime", QFT_TIMESTAMP, "Creation timestamp"), |
1053 |
datatype, 0, _GetItemTimestamp(operator.attrgetter("ctime"))), |
1054 |
(_MakeField("mtime", "MTime", QFT_TIMESTAMP, "Modification timestamp"), |
1055 |
datatype, 0, _GetItemTimestamp(operator.attrgetter("mtime"))), |
1056 |
] |
1057 |
|
1058 |
|
1059 |
class NodeQueryData: |
1060 |
"""Data container for node data queries.
|
1061 |
|
1062 |
"""
|
1063 |
def __init__(self, nodes, live_data, master_name, node_to_primary, |
1064 |
node_to_secondary, groups, oob_support, cluster): |
1065 |
"""Initializes this class.
|
1066 |
|
1067 |
"""
|
1068 |
self.nodes = nodes
|
1069 |
self.live_data = live_data
|
1070 |
self.master_name = master_name
|
1071 |
self.node_to_primary = node_to_primary
|
1072 |
self.node_to_secondary = node_to_secondary
|
1073 |
self.groups = groups
|
1074 |
self.oob_support = oob_support
|
1075 |
self.cluster = cluster
|
1076 |
|
1077 |
# Used for individual rows
|
1078 |
self.curlive_data = None |
1079 |
self.ndparams = None |
1080 |
|
1081 |
def __iter__(self): |
1082 |
"""Iterate over all nodes.
|
1083 |
|
1084 |
This function has side-effects and only one instance of the resulting
|
1085 |
generator should be used at a time.
|
1086 |
|
1087 |
"""
|
1088 |
for node in self.nodes: |
1089 |
group = self.groups.get(node.group, None) |
1090 |
if group is None: |
1091 |
self.ndparams = None |
1092 |
else:
|
1093 |
self.ndparams = self.cluster.FillND(node, group) |
1094 |
if self.live_data: |
1095 |
self.curlive_data = self.live_data.get(node.name, None) |
1096 |
else:
|
1097 |
self.curlive_data = None |
1098 |
yield node
|
1099 |
|
1100 |
|
1101 |
#: Fields that are direct attributes of an L{objects.Node} object
|
1102 |
_NODE_SIMPLE_FIELDS = { |
1103 |
"drained": ("Drained", QFT_BOOL, 0, "Whether node is drained"), |
1104 |
"master_candidate": ("MasterC", QFT_BOOL, 0, |
1105 |
"Whether node is a master candidate"),
|
1106 |
"master_capable": ("MasterCapable", QFT_BOOL, 0, |
1107 |
"Whether node can become a master candidate"),
|
1108 |
"name": ("Node", QFT_TEXT, QFF_HOSTNAME, "Node name"), |
1109 |
"offline": ("Offline", QFT_BOOL, 0, "Whether node is marked offline"), |
1110 |
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Node"), |
1111 |
"uuid": ("UUID", QFT_TEXT, 0, "Node UUID"), |
1112 |
"vm_capable": ("VMCapable", QFT_BOOL, 0, "Whether node can host instances"), |
1113 |
} |
1114 |
|
1115 |
|
1116 |
#: Fields requiring talking to the node
|
1117 |
# Note that none of these are available for non-vm_capable nodes
|
1118 |
_NODE_LIVE_FIELDS = { |
1119 |
"bootid": ("BootID", QFT_TEXT, "bootid", |
1120 |
"Random UUID renewed for each system reboot, can be used"
|
1121 |
" for detecting reboots by tracking changes"),
|
1122 |
"cnodes": ("CNodes", QFT_NUMBER, "cpu_nodes", |
1123 |
"Number of NUMA domains on node (if exported by hypervisor)"),
|
1124 |
"csockets": ("CSockets", QFT_NUMBER, "cpu_sockets", |
1125 |
"Number of physical CPU sockets (if exported by hypervisor)"),
|
1126 |
"ctotal": ("CTotal", QFT_NUMBER, "cpu_total", "Number of logical processors"), |
1127 |
"dfree": ("DFree", QFT_UNIT, "vg_free", |
1128 |
"Available disk space in volume group"),
|
1129 |
"dtotal": ("DTotal", QFT_UNIT, "vg_size", |
1130 |
"Total disk space in volume group used for instance disk"
|
1131 |
" allocation"),
|
1132 |
"mfree": ("MFree", QFT_UNIT, "memory_free", |
1133 |
"Memory available for instance allocations"),
|
1134 |
"mnode": ("MNode", QFT_UNIT, "memory_dom0", |
1135 |
"Amount of memory used by node (dom0 for Xen)"),
|
1136 |
"mtotal": ("MTotal", QFT_UNIT, "memory_total", |
1137 |
"Total amount of memory of physical machine"),
|
1138 |
} |
1139 |
|
1140 |
|
1141 |
def _GetGroup(cb): |
1142 |
"""Build function for calling another function with an node group.
|
1143 |
|
1144 |
@param cb: The callback to be called with the nodegroup
|
1145 |
|
1146 |
"""
|
1147 |
def fn(ctx, node): |
1148 |
"""Get group data for a node.
|
1149 |
|
1150 |
@type ctx: L{NodeQueryData}
|
1151 |
@type inst: L{objects.Node}
|
1152 |
@param inst: Node object
|
1153 |
|
1154 |
"""
|
1155 |
ng = ctx.groups.get(node.group, None)
|
1156 |
if ng is None: |
1157 |
# Nodes always have a group, or the configuration is corrupt
|
1158 |
return _FS_UNAVAIL
|
1159 |
|
1160 |
return cb(ctx, node, ng)
|
1161 |
|
1162 |
return fn
|
1163 |
|
1164 |
|
1165 |
def _GetNodeGroup(ctx, node, ng): # pylint: disable=W0613 |
1166 |
"""Returns the name of a node's group.
|
1167 |
|
1168 |
@type ctx: L{NodeQueryData}
|
1169 |
@type node: L{objects.Node}
|
1170 |
@param node: Node object
|
1171 |
@type ng: L{objects.NodeGroup}
|
1172 |
@param ng: The node group this node belongs to
|
1173 |
|
1174 |
"""
|
1175 |
return ng.name
|
1176 |
|
1177 |
|
1178 |
def _GetNodePower(ctx, node): |
1179 |
"""Returns the node powered state
|
1180 |
|
1181 |
@type ctx: L{NodeQueryData}
|
1182 |
@type node: L{objects.Node}
|
1183 |
@param node: Node object
|
1184 |
|
1185 |
"""
|
1186 |
if ctx.oob_support[node.name]:
|
1187 |
return node.powered
|
1188 |
|
1189 |
return _FS_UNAVAIL
|
1190 |
|
1191 |
|
1192 |
def _GetNdParams(ctx, node, ng): |
1193 |
"""Returns the ndparams for this node.
|
1194 |
|
1195 |
@type ctx: L{NodeQueryData}
|
1196 |
@type node: L{objects.Node}
|
1197 |
@param node: Node object
|
1198 |
@type ng: L{objects.NodeGroup}
|
1199 |
@param ng: The node group this node belongs to
|
1200 |
|
1201 |
"""
|
1202 |
return ctx.cluster.SimpleFillND(ng.FillND(node))
|
1203 |
|
1204 |
|
1205 |
def _GetLiveNodeField(field, kind, ctx, node): |
1206 |
"""Gets the value of a "live" field from L{NodeQueryData}.
|
1207 |
|
1208 |
@param field: Live field name
|
1209 |
@param kind: Data kind, one of L{constants.QFT_ALL}
|
1210 |
@type ctx: L{NodeQueryData}
|
1211 |
@type node: L{objects.Node}
|
1212 |
@param node: Node object
|
1213 |
|
1214 |
"""
|
1215 |
if node.offline:
|
1216 |
return _FS_OFFLINE
|
1217 |
|
1218 |
if not node.vm_capable: |
1219 |
return _FS_UNAVAIL
|
1220 |
|
1221 |
if not ctx.curlive_data: |
1222 |
return _FS_NODATA
|
1223 |
|
1224 |
try:
|
1225 |
value = ctx.curlive_data[field] |
1226 |
except KeyError: |
1227 |
return _FS_UNAVAIL
|
1228 |
|
1229 |
if kind == QFT_TEXT:
|
1230 |
return value
|
1231 |
|
1232 |
assert kind in (QFT_NUMBER, QFT_UNIT) |
1233 |
|
1234 |
# Try to convert into number
|
1235 |
try:
|
1236 |
return int(value) |
1237 |
except (ValueError, TypeError): |
1238 |
logging.exception("Failed to convert node field '%s' (value %r) to int",
|
1239 |
value, field) |
1240 |
return _FS_UNAVAIL
|
1241 |
|
1242 |
|
1243 |
def _GetNodeHvState(_, node): |
1244 |
"""Converts node's hypervisor state for query result.
|
1245 |
|
1246 |
"""
|
1247 |
hv_state = node.hv_state |
1248 |
|
1249 |
if hv_state is None: |
1250 |
return _FS_UNAVAIL
|
1251 |
|
1252 |
return dict((name, value.ToDict()) for (name, value) in hv_state.items()) |
1253 |
|
1254 |
|
1255 |
def _GetNodeDiskState(_, node): |
1256 |
"""Converts node's disk state for query result.
|
1257 |
|
1258 |
"""
|
1259 |
disk_state = node.disk_state |
1260 |
|
1261 |
if disk_state is None: |
1262 |
return _FS_UNAVAIL
|
1263 |
|
1264 |
return dict((disk_kind, dict((name, value.ToDict()) |
1265 |
for (name, value) in kind_state.items())) |
1266 |
for (disk_kind, kind_state) in disk_state.items()) |
1267 |
|
1268 |
|
1269 |
def _BuildNodeFields(): |
1270 |
"""Builds list of fields for node queries.
|
1271 |
|
1272 |
"""
|
1273 |
fields = [ |
1274 |
(_MakeField("pip", "PrimaryIP", QFT_TEXT, "Primary IP address"), |
1275 |
NQ_CONFIG, 0, _GetItemAttr("primary_ip")), |
1276 |
(_MakeField("sip", "SecondaryIP", QFT_TEXT, "Secondary IP address"), |
1277 |
NQ_CONFIG, 0, _GetItemAttr("secondary_ip")), |
1278 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), NQ_CONFIG, 0, |
1279 |
lambda ctx, node: list(node.GetTags())), |
1280 |
(_MakeField("master", "IsMaster", QFT_BOOL, "Whether node is master"), |
1281 |
NQ_CONFIG, 0, lambda ctx, node: node.name == ctx.master_name), |
1282 |
(_MakeField("group", "Group", QFT_TEXT, "Node group"), NQ_GROUP, 0, |
1283 |
_GetGroup(_GetNodeGroup)), |
1284 |
(_MakeField("group.uuid", "GroupUUID", QFT_TEXT, "UUID of node group"), |
1285 |
NQ_CONFIG, 0, _GetItemAttr("group")), |
1286 |
(_MakeField("powered", "Powered", QFT_BOOL, |
1287 |
"Whether node is thought to be powered on"),
|
1288 |
NQ_OOB, 0, _GetNodePower),
|
1289 |
(_MakeField("ndparams", "NodeParameters", QFT_OTHER, |
1290 |
"Merged node parameters"),
|
1291 |
NQ_GROUP, 0, _GetGroup(_GetNdParams)),
|
1292 |
(_MakeField("custom_ndparams", "CustomNodeParameters", QFT_OTHER, |
1293 |
"Custom node parameters"),
|
1294 |
NQ_GROUP, 0, _GetItemAttr("ndparams")), |
1295 |
(_MakeField("hv_state", "HypervisorState", QFT_OTHER, "Hypervisor state"), |
1296 |
NQ_CONFIG, 0, _GetNodeHvState),
|
1297 |
(_MakeField("disk_state", "DiskState", QFT_OTHER, "Disk state"), |
1298 |
NQ_CONFIG, 0, _GetNodeDiskState),
|
1299 |
] |
1300 |
|
1301 |
fields.extend(_BuildNDFields(False))
|
1302 |
|
1303 |
# Node role
|
1304 |
role_values = (constants.NR_MASTER, constants.NR_MCANDIDATE, |
1305 |
constants.NR_REGULAR, constants.NR_DRAINED, |
1306 |
constants.NR_OFFLINE) |
1307 |
role_doc = ("Node role; \"%s\" for master, \"%s\" for master candidate,"
|
1308 |
" \"%s\" for regular, \"%s\" for drained, \"%s\" for offline" %
|
1309 |
role_values) |
1310 |
fields.append((_MakeField("role", "Role", QFT_TEXT, role_doc), NQ_CONFIG, 0, |
1311 |
lambda ctx, node: _GetNodeRole(node, ctx.master_name)))
|
1312 |
assert set(role_values) == constants.NR_ALL |
1313 |
|
1314 |
def _GetLength(getter): |
1315 |
return lambda ctx, node: len(getter(ctx)[node.name]) |
1316 |
|
1317 |
def _GetList(getter): |
1318 |
return lambda ctx, node: list(getter(ctx)[node.name]) |
1319 |
|
1320 |
# Add fields operating on instance lists
|
1321 |
for prefix, titleprefix, docword, getter in \ |
1322 |
[("p", "Pri", "primary", operator.attrgetter("node_to_primary")), |
1323 |
("s", "Sec", "secondary", operator.attrgetter("node_to_secondary"))]: |
1324 |
# TODO: Allow filterting by hostname in list
|
1325 |
fields.extend([ |
1326 |
(_MakeField("%sinst_cnt" % prefix, "%sinst" % prefix.upper(), QFT_NUMBER, |
1327 |
"Number of instances with this node as %s" % docword),
|
1328 |
NQ_INST, 0, _GetLength(getter)),
|
1329 |
(_MakeField("%sinst_list" % prefix, "%sInstances" % titleprefix, |
1330 |
QFT_OTHER, |
1331 |
"List of instances with this node as %s" % docword),
|
1332 |
NQ_INST, 0, _GetList(getter)),
|
1333 |
]) |
1334 |
|
1335 |
# Add simple fields
|
1336 |
fields.extend([ |
1337 |
(_MakeField(name, title, kind, doc), NQ_CONFIG, flags, _GetItemAttr(name)) |
1338 |
for (name, (title, kind, flags, doc)) in _NODE_SIMPLE_FIELDS.items() |
1339 |
]) |
1340 |
|
1341 |
# Add fields requiring live data
|
1342 |
fields.extend([ |
1343 |
(_MakeField(name, title, kind, doc), NQ_LIVE, 0,
|
1344 |
compat.partial(_GetLiveNodeField, nfield, kind)) |
1345 |
for (name, (title, kind, nfield, doc)) in _NODE_LIVE_FIELDS.items() |
1346 |
]) |
1347 |
|
1348 |
# Add timestamps
|
1349 |
fields.extend(_GetItemTimestampFields(NQ_CONFIG)) |
1350 |
|
1351 |
return _PrepareFieldList(fields, [])
|
1352 |
|
1353 |
|
1354 |
class InstanceQueryData: |
1355 |
"""Data container for instance data queries.
|
1356 |
|
1357 |
"""
|
1358 |
def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes, |
1359 |
live_data, wrongnode_inst, console, nodes, groups): |
1360 |
"""Initializes this class.
|
1361 |
|
1362 |
@param instances: List of instance objects
|
1363 |
@param cluster: Cluster object
|
1364 |
@type disk_usage: dict; instance name as key
|
1365 |
@param disk_usage: Per-instance disk usage
|
1366 |
@type offline_nodes: list of strings
|
1367 |
@param offline_nodes: List of offline nodes
|
1368 |
@type bad_nodes: list of strings
|
1369 |
@param bad_nodes: List of faulty nodes
|
1370 |
@type live_data: dict; instance name as key
|
1371 |
@param live_data: Per-instance live data
|
1372 |
@type wrongnode_inst: set
|
1373 |
@param wrongnode_inst: Set of instances running on wrong node(s)
|
1374 |
@type console: dict; instance name as key
|
1375 |
@param console: Per-instance console information
|
1376 |
@type nodes: dict; node name as key
|
1377 |
@param nodes: Node objects
|
1378 |
|
1379 |
"""
|
1380 |
assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \ |
1381 |
"Offline nodes not included in bad nodes"
|
1382 |
assert not (set(live_data.keys()) & set(bad_nodes)), \ |
1383 |
"Found live data for bad or offline nodes"
|
1384 |
|
1385 |
self.instances = instances
|
1386 |
self.cluster = cluster
|
1387 |
self.disk_usage = disk_usage
|
1388 |
self.offline_nodes = offline_nodes
|
1389 |
self.bad_nodes = bad_nodes
|
1390 |
self.live_data = live_data
|
1391 |
self.wrongnode_inst = wrongnode_inst
|
1392 |
self.console = console
|
1393 |
self.nodes = nodes
|
1394 |
self.groups = groups
|
1395 |
|
1396 |
# Used for individual rows
|
1397 |
self.inst_hvparams = None |
1398 |
self.inst_beparams = None |
1399 |
self.inst_osparams = None |
1400 |
self.inst_nicparams = None |
1401 |
|
1402 |
def __iter__(self): |
1403 |
"""Iterate over all instances.
|
1404 |
|
1405 |
This function has side-effects and only one instance of the resulting
|
1406 |
generator should be used at a time.
|
1407 |
|
1408 |
"""
|
1409 |
for inst in self.instances: |
1410 |
self.inst_hvparams = self.cluster.FillHV(inst, skip_globals=True) |
1411 |
self.inst_beparams = self.cluster.FillBE(inst) |
1412 |
self.inst_osparams = self.cluster.SimpleFillOS(inst.os, inst.osparams) |
1413 |
self.inst_nicparams = [self.cluster.SimpleFillNIC(nic.nicparams) |
1414 |
for nic in inst.nics] |
1415 |
|
1416 |
yield inst
|
1417 |
|
1418 |
|
1419 |
def _GetInstOperState(ctx, inst): |
1420 |
"""Get instance's operational status.
|
1421 |
|
1422 |
@type ctx: L{InstanceQueryData}
|
1423 |
@type inst: L{objects.Instance}
|
1424 |
@param inst: Instance object
|
1425 |
|
1426 |
"""
|
1427 |
# Can't use RS_OFFLINE here as it would describe the instance to
|
1428 |
# be offline when we actually don't know due to missing data
|
1429 |
if inst.primary_node in ctx.bad_nodes: |
1430 |
return _FS_NODATA
|
1431 |
else:
|
1432 |
return bool(ctx.live_data.get(inst.name)) |
1433 |
|
1434 |
|
1435 |
def _GetInstLiveData(name): |
1436 |
"""Build function for retrieving live data.
|
1437 |
|
1438 |
@type name: string
|
1439 |
@param name: Live data field name
|
1440 |
|
1441 |
"""
|
1442 |
def fn(ctx, inst): |
1443 |
"""Get live data for an instance.
|
1444 |
|
1445 |
@type ctx: L{InstanceQueryData}
|
1446 |
@type inst: L{objects.Instance}
|
1447 |
@param inst: Instance object
|
1448 |
|
1449 |
"""
|
1450 |
if (inst.primary_node in ctx.bad_nodes or |
1451 |
inst.primary_node in ctx.offline_nodes):
|
1452 |
# Can't use RS_OFFLINE here as it would describe the instance to be
|
1453 |
# offline when we actually don't know due to missing data
|
1454 |
return _FS_NODATA
|
1455 |
|
1456 |
if inst.name in ctx.live_data: |
1457 |
data = ctx.live_data[inst.name] |
1458 |
if name in data: |
1459 |
return data[name]
|
1460 |
|
1461 |
return _FS_UNAVAIL
|
1462 |
|
1463 |
return fn
|
1464 |
|
1465 |
|
1466 |
def _GetInstStatus(ctx, inst): |
1467 |
"""Get instance status.
|
1468 |
|
1469 |
@type ctx: L{InstanceQueryData}
|
1470 |
@type inst: L{objects.Instance}
|
1471 |
@param inst: Instance object
|
1472 |
|
1473 |
"""
|
1474 |
if inst.primary_node in ctx.offline_nodes: |
1475 |
return constants.INSTST_NODEOFFLINE
|
1476 |
|
1477 |
if inst.primary_node in ctx.bad_nodes: |
1478 |
return constants.INSTST_NODEDOWN
|
1479 |
|
1480 |
if bool(ctx.live_data.get(inst.name)): |
1481 |
if inst.name in ctx.wrongnode_inst: |
1482 |
return constants.INSTST_WRONGNODE
|
1483 |
elif inst.admin_state == constants.ADMINST_UP:
|
1484 |
return constants.INSTST_RUNNING
|
1485 |
else:
|
1486 |
return constants.INSTST_ERRORUP
|
1487 |
|
1488 |
if inst.admin_state == constants.ADMINST_UP:
|
1489 |
return constants.INSTST_ERRORDOWN
|
1490 |
elif inst.admin_state == constants.ADMINST_DOWN:
|
1491 |
return constants.INSTST_ADMINDOWN
|
1492 |
|
1493 |
return constants.INSTST_ADMINOFFLINE
|
1494 |
|
1495 |
|
1496 |
def _GetInstDiskSize(index): |
1497 |
"""Build function for retrieving disk size.
|
1498 |
|
1499 |
@type index: int
|
1500 |
@param index: Disk index
|
1501 |
|
1502 |
"""
|
1503 |
def fn(_, inst): |
1504 |
"""Get size of a disk.
|
1505 |
|
1506 |
@type inst: L{objects.Instance}
|
1507 |
@param inst: Instance object
|
1508 |
|
1509 |
"""
|
1510 |
try:
|
1511 |
return inst.disks[index].size
|
1512 |
except IndexError: |
1513 |
return _FS_UNAVAIL
|
1514 |
|
1515 |
return fn
|
1516 |
|
1517 |
|
1518 |
def _GetInstNic(index, cb): |
1519 |
"""Build function for calling another function with an instance NIC.
|
1520 |
|
1521 |
@type index: int
|
1522 |
@param index: NIC index
|
1523 |
@type cb: callable
|
1524 |
@param cb: Callback
|
1525 |
|
1526 |
"""
|
1527 |
def fn(ctx, inst): |
1528 |
"""Call helper function with instance NIC.
|
1529 |
|
1530 |
@type ctx: L{InstanceQueryData}
|
1531 |
@type inst: L{objects.Instance}
|
1532 |
@param inst: Instance object
|
1533 |
|
1534 |
"""
|
1535 |
try:
|
1536 |
nic = inst.nics[index] |
1537 |
except IndexError: |
1538 |
return _FS_UNAVAIL
|
1539 |
|
1540 |
return cb(ctx, index, nic)
|
1541 |
|
1542 |
return fn
|
1543 |
|
1544 |
|
1545 |
def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613 |
1546 |
"""Get a NIC's IP address.
|
1547 |
|
1548 |
@type ctx: L{InstanceQueryData}
|
1549 |
@type nic: L{objects.NIC}
|
1550 |
@param nic: NIC object
|
1551 |
|
1552 |
"""
|
1553 |
if nic.ip is None: |
1554 |
return _FS_UNAVAIL
|
1555 |
else:
|
1556 |
return nic.ip
|
1557 |
|
1558 |
|
1559 |
def _GetInstNicBridge(ctx, index, _): |
1560 |
"""Get a NIC's bridge.
|
1561 |
|
1562 |
@type ctx: L{InstanceQueryData}
|
1563 |
@type index: int
|
1564 |
@param index: NIC index
|
1565 |
|
1566 |
"""
|
1567 |
assert len(ctx.inst_nicparams) >= index |
1568 |
|
1569 |
nicparams = ctx.inst_nicparams[index] |
1570 |
|
1571 |
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
1572 |
return nicparams[constants.NIC_LINK]
|
1573 |
else:
|
1574 |
return _FS_UNAVAIL
|
1575 |
|
1576 |
|
1577 |
def _GetInstAllNicBridges(ctx, inst): |
1578 |
"""Get all network bridges for an instance.
|
1579 |
|
1580 |
@type ctx: L{InstanceQueryData}
|
1581 |
@type inst: L{objects.Instance}
|
1582 |
@param inst: Instance object
|
1583 |
|
1584 |
"""
|
1585 |
assert len(ctx.inst_nicparams) == len(inst.nics) |
1586 |
|
1587 |
result = [] |
1588 |
|
1589 |
for nicp in ctx.inst_nicparams: |
1590 |
if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
1591 |
result.append(nicp[constants.NIC_LINK]) |
1592 |
else:
|
1593 |
result.append(None)
|
1594 |
|
1595 |
assert len(result) == len(inst.nics) |
1596 |
|
1597 |
return result
|
1598 |
|
1599 |
|
1600 |
def _GetInstNicParam(name): |
1601 |
"""Build function for retrieving a NIC parameter.
|
1602 |
|
1603 |
@type name: string
|
1604 |
@param name: Parameter name
|
1605 |
|
1606 |
"""
|
1607 |
def fn(ctx, index, _): |
1608 |
"""Get a NIC's bridge.
|
1609 |
|
1610 |
@type ctx: L{InstanceQueryData}
|
1611 |
@type inst: L{objects.Instance}
|
1612 |
@param inst: Instance object
|
1613 |
@type nic: L{objects.NIC}
|
1614 |
@param nic: NIC object
|
1615 |
|
1616 |
"""
|
1617 |
assert len(ctx.inst_nicparams) >= index |
1618 |
return ctx.inst_nicparams[index][name]
|
1619 |
|
1620 |
return fn
|
1621 |
|
1622 |
|
1623 |
def _GetInstanceNetworkFields(): |
1624 |
"""Get instance fields involving network interfaces.
|
1625 |
|
1626 |
@return: Tuple containing list of field definitions used as input for
|
1627 |
L{_PrepareFieldList} and a list of aliases
|
1628 |
|
1629 |
"""
|
1630 |
nic_mac_fn = lambda ctx, _, nic: nic.mac
|
1631 |
nic_mode_fn = _GetInstNicParam(constants.NIC_MODE) |
1632 |
nic_link_fn = _GetInstNicParam(constants.NIC_LINK) |
1633 |
|
1634 |
fields = [ |
1635 |
# All NICs
|
1636 |
(_MakeField("nic.count", "NICs", QFT_NUMBER, |
1637 |
"Number of network interfaces"),
|
1638 |
IQ_CONFIG, 0, lambda ctx, inst: len(inst.nics)), |
1639 |
(_MakeField("nic.macs", "NIC_MACs", QFT_OTHER, |
1640 |
"List containing each network interface's MAC address"),
|
1641 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.mac for nic in inst.nics]), |
1642 |
(_MakeField("nic.ips", "NIC_IPs", QFT_OTHER, |
1643 |
"List containing each network interface's IP address"),
|
1644 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.ip for nic in inst.nics]), |
1645 |
(_MakeField("nic.modes", "NIC_modes", QFT_OTHER, |
1646 |
"List containing each network interface's mode"), IQ_CONFIG, 0, |
1647 |
lambda ctx, inst: [nicp[constants.NIC_MODE]
|
1648 |
for nicp in ctx.inst_nicparams]), |
1649 |
(_MakeField("nic.links", "NIC_links", QFT_OTHER, |
1650 |
"List containing each network interface's link"), IQ_CONFIG, 0, |
1651 |
lambda ctx, inst: [nicp[constants.NIC_LINK]
|
1652 |
for nicp in ctx.inst_nicparams]), |
1653 |
(_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER, |
1654 |
"List containing each network interface's bridge"),
|
1655 |
IQ_CONFIG, 0, _GetInstAllNicBridges),
|
1656 |
] |
1657 |
|
1658 |
# NICs by number
|
1659 |
for i in range(constants.MAX_NICS): |
1660 |
numtext = utils.FormatOrdinal(i + 1)
|
1661 |
fields.extend([ |
1662 |
(_MakeField("nic.ip/%s" % i, "NicIP/%s" % i, QFT_TEXT, |
1663 |
"IP address of %s network interface" % numtext),
|
1664 |
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicIp)),
|
1665 |
(_MakeField("nic.mac/%s" % i, "NicMAC/%s" % i, QFT_TEXT, |
1666 |
"MAC address of %s network interface" % numtext),
|
1667 |
IQ_CONFIG, 0, _GetInstNic(i, nic_mac_fn)),
|
1668 |
(_MakeField("nic.mode/%s" % i, "NicMode/%s" % i, QFT_TEXT, |
1669 |
"Mode of %s network interface" % numtext),
|
1670 |
IQ_CONFIG, 0, _GetInstNic(i, nic_mode_fn)),
|
1671 |
(_MakeField("nic.link/%s" % i, "NicLink/%s" % i, QFT_TEXT, |
1672 |
"Link of %s network interface" % numtext),
|
1673 |
IQ_CONFIG, 0, _GetInstNic(i, nic_link_fn)),
|
1674 |
(_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT, |
1675 |
"Bridge of %s network interface" % numtext),
|
1676 |
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
|
1677 |
]) |
1678 |
|
1679 |
aliases = [ |
1680 |
# Legacy fields for first NIC
|
1681 |
("ip", "nic.ip/0"), |
1682 |
("mac", "nic.mac/0"), |
1683 |
("bridge", "nic.bridge/0"), |
1684 |
("nic_mode", "nic.mode/0"), |
1685 |
("nic_link", "nic.link/0"), |
1686 |
] |
1687 |
|
1688 |
return (fields, aliases)
|
1689 |
|
1690 |
|
1691 |
def _GetInstDiskUsage(ctx, inst): |
1692 |
"""Get disk usage for an instance.
|
1693 |
|
1694 |
@type ctx: L{InstanceQueryData}
|
1695 |
@type inst: L{objects.Instance}
|
1696 |
@param inst: Instance object
|
1697 |
|
1698 |
"""
|
1699 |
usage = ctx.disk_usage[inst.name] |
1700 |
|
1701 |
if usage is None: |
1702 |
usage = 0
|
1703 |
|
1704 |
return usage
|
1705 |
|
1706 |
|
1707 |
def _GetInstanceConsole(ctx, inst): |
1708 |
"""Get console information for instance.
|
1709 |
|
1710 |
@type ctx: L{InstanceQueryData}
|
1711 |
@type inst: L{objects.Instance}
|
1712 |
@param inst: Instance object
|
1713 |
|
1714 |
"""
|
1715 |
consinfo = ctx.console[inst.name] |
1716 |
|
1717 |
if consinfo is None: |
1718 |
return _FS_UNAVAIL
|
1719 |
|
1720 |
return consinfo
|
1721 |
|
1722 |
|
1723 |
def _GetInstanceDiskFields(): |
1724 |
"""Get instance fields involving disks.
|
1725 |
|
1726 |
@return: List of field definitions used as input for L{_PrepareFieldList}
|
1727 |
|
1728 |
"""
|
1729 |
fields = [ |
1730 |
(_MakeField("disk_usage", "DiskUsage", QFT_UNIT, |
1731 |
"Total disk space used by instance on each of its nodes;"
|
1732 |
" this is not the disk size visible to the instance, but"
|
1733 |
" the usage on the node"),
|
1734 |
IQ_DISKUSAGE, 0, _GetInstDiskUsage),
|
1735 |
(_MakeField("disk.count", "Disks", QFT_NUMBER, "Number of disks"), |
1736 |
IQ_CONFIG, 0, lambda ctx, inst: len(inst.disks)), |
1737 |
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER, "List of disk sizes"), |
1738 |
IQ_CONFIG, 0, lambda ctx, inst: [disk.size for disk in inst.disks]), |
1739 |
] |
1740 |
|
1741 |
# Disks by number
|
1742 |
fields.extend([ |
1743 |
(_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT, |
1744 |
"Disk size of %s disk" % utils.FormatOrdinal(i + 1)), |
1745 |
IQ_CONFIG, 0, _GetInstDiskSize(i))
|
1746 |
for i in range(constants.MAX_DISKS) |
1747 |
]) |
1748 |
|
1749 |
return fields
|
1750 |
|
1751 |
|
1752 |
def _GetInstanceParameterFields(): |
1753 |
"""Get instance fields involving parameters.
|
1754 |
|
1755 |
@return: List of field definitions used as input for L{_PrepareFieldList}
|
1756 |
|
1757 |
"""
|
1758 |
# TODO: Consider moving titles closer to constants
|
1759 |
be_title = { |
1760 |
constants.BE_AUTO_BALANCE: "Auto_balance",
|
1761 |
constants.BE_MAXMEM: "ConfigMaxMem",
|
1762 |
constants.BE_MINMEM: "ConfigMinMem",
|
1763 |
constants.BE_VCPUS: "ConfigVCPUs",
|
1764 |
} |
1765 |
|
1766 |
hv_title = { |
1767 |
constants.HV_ACPI: "ACPI",
|
1768 |
constants.HV_BOOT_ORDER: "Boot_order",
|
1769 |
constants.HV_CDROM_IMAGE_PATH: "CDROM_image_path",
|
1770 |
constants.HV_DISK_TYPE: "Disk_type",
|
1771 |
constants.HV_INITRD_PATH: "Initrd_path",
|
1772 |
constants.HV_KERNEL_PATH: "Kernel_path",
|
1773 |
constants.HV_NIC_TYPE: "NIC_type",
|
1774 |
constants.HV_PAE: "PAE",
|
1775 |
constants.HV_VNC_BIND_ADDRESS: "VNC_bind_address",
|
1776 |
constants.HV_PASSTHROUGH: "pci_pass",
|
1777 |
constants.HV_CPU_TYPE: "cpu_type",
|
1778 |
} |
1779 |
|
1780 |
fields = [ |
1781 |
# Filled parameters
|
1782 |
(_MakeField("hvparams", "HypervisorParameters", QFT_OTHER, |
1783 |
"Hypervisor parameters (merged)"),
|
1784 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_hvparams), |
1785 |
(_MakeField("beparams", "BackendParameters", QFT_OTHER, |
1786 |
"Backend parameters (merged)"),
|
1787 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_beparams), |
1788 |
(_MakeField("osparams", "OpSysParameters", QFT_OTHER, |
1789 |
"Operating system parameters (merged)"),
|
1790 |
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_osparams), |
1791 |
|
1792 |
# Unfilled parameters
|
1793 |
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER, |
1794 |
"Custom hypervisor parameters"),
|
1795 |
IQ_CONFIG, 0, _GetItemAttr("hvparams")), |
1796 |
(_MakeField("custom_beparams", "CustomBackendParameters", QFT_OTHER, |
1797 |
"Custom backend parameters",),
|
1798 |
IQ_CONFIG, 0, _GetItemAttr("beparams")), |
1799 |
(_MakeField("custom_osparams", "CustomOpSysParameters", QFT_OTHER, |
1800 |
"Custom operating system parameters",),
|
1801 |
IQ_CONFIG, 0, _GetItemAttr("osparams")), |
1802 |
(_MakeField("custom_nicparams", "CustomNicParameters", QFT_OTHER, |
1803 |
"Custom network interface parameters"),
|
1804 |
IQ_CONFIG, 0, lambda ctx, inst: [nic.nicparams for nic in inst.nics]), |
1805 |
] |
1806 |
|
1807 |
# HV params
|
1808 |
def _GetInstHvParam(name): |
1809 |
return lambda ctx, _: ctx.inst_hvparams.get(name, _FS_UNAVAIL) |
1810 |
|
1811 |
fields.extend([ |
1812 |
(_MakeField("hv/%s" % name, hv_title.get(name, "hv/%s" % name), |
1813 |
_VTToQFT[kind], "The \"%s\" hypervisor parameter" % name),
|
1814 |
IQ_CONFIG, 0, _GetInstHvParam(name))
|
1815 |
for name, kind in constants.HVS_PARAMETER_TYPES.items() |
1816 |
if name not in constants.HVC_GLOBALS |
1817 |
]) |
1818 |
|
1819 |
# BE params
|
1820 |
def _GetInstBeParam(name): |
1821 |
return lambda ctx, _: ctx.inst_beparams.get(name, None) |
1822 |
|
1823 |
fields.extend([ |
1824 |
(_MakeField("be/%s" % name, be_title.get(name, "be/%s" % name), |
1825 |
_VTToQFT[kind], "The \"%s\" backend parameter" % name),
|
1826 |
IQ_CONFIG, 0, _GetInstBeParam(name))
|
1827 |
for name, kind in constants.BES_PARAMETER_TYPES.items() |
1828 |
]) |
1829 |
|
1830 |
return fields
|
1831 |
|
1832 |
|
1833 |
_INST_SIMPLE_FIELDS = { |
1834 |
"disk_template": ("Disk_template", QFT_TEXT, 0, "Instance disk template"), |
1835 |
"hypervisor": ("Hypervisor", QFT_TEXT, 0, "Hypervisor name"), |
1836 |
"name": ("Instance", QFT_TEXT, QFF_HOSTNAME, "Instance name"), |
1837 |
# Depending on the hypervisor, the port can be None
|
1838 |
"network_port": ("Network_port", QFT_OTHER, 0, |
1839 |
"Instance network port if available (e.g. for VNC console)"),
|
1840 |
"os": ("OS", QFT_TEXT, 0, "Operating system"), |
1841 |
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Instance"), |
1842 |
"uuid": ("UUID", QFT_TEXT, 0, "Instance UUID"), |
1843 |
} |
1844 |
|
1845 |
|
1846 |
def _GetInstNodeGroup(ctx, default, node_name): |
1847 |
"""Gets group UUID of an instance node.
|
1848 |
|
1849 |
@type ctx: L{InstanceQueryData}
|
1850 |
@param default: Default value
|
1851 |
@type node_name: string
|
1852 |
@param node_name: Node name
|
1853 |
|
1854 |
"""
|
1855 |
try:
|
1856 |
node = ctx.nodes[node_name] |
1857 |
except KeyError: |
1858 |
return default
|
1859 |
else:
|
1860 |
return node.group
|
1861 |
|
1862 |
|
1863 |
def _GetInstNodeGroupName(ctx, default, node_name): |
1864 |
"""Gets group name of an instance node.
|
1865 |
|
1866 |
@type ctx: L{InstanceQueryData}
|
1867 |
@param default: Default value
|
1868 |
@type node_name: string
|
1869 |
@param node_name: Node name
|
1870 |
|
1871 |
"""
|
1872 |
try:
|
1873 |
node = ctx.nodes[node_name] |
1874 |
except KeyError: |
1875 |
return default
|
1876 |
|
1877 |
try:
|
1878 |
group = ctx.groups[node.group] |
1879 |
except KeyError: |
1880 |
return default
|
1881 |
|
1882 |
return group.name
|
1883 |
|
1884 |
|
1885 |
def _BuildInstanceFields(): |
1886 |
"""Builds list of fields for instance queries.
|
1887 |
|
1888 |
"""
|
1889 |
fields = [ |
1890 |
(_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"), |
1891 |
IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")),
|
1892 |
(_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT, |
1893 |
"Primary node's group"),
|
1894 |
IQ_NODES, 0,
|
1895 |
lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL,
|
1896 |
inst.primary_node)), |
1897 |
(_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT, |
1898 |
"Primary node's group UUID"),
|
1899 |
IQ_NODES, 0,
|
1900 |
lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)),
|
1901 |
# TODO: Allow filtering by secondary node as hostname
|
1902 |
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER, |
1903 |
"Secondary nodes; usually this will just be one node"),
|
1904 |
IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)), |
1905 |
(_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER, |
1906 |
"Node groups of secondary nodes"),
|
1907 |
IQ_NODES, 0,
|
1908 |
lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None), |
1909 |
inst.secondary_nodes)), |
1910 |
(_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER, |
1911 |
"Node group UUIDs of secondary nodes"),
|
1912 |
IQ_NODES, 0,
|
1913 |
lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None), |
1914 |
inst.secondary_nodes)), |
1915 |
(_MakeField("admin_state", "InstanceState", QFT_TEXT, |
1916 |
"Desired state of instance"),
|
1917 |
IQ_CONFIG, 0, _GetItemAttr("admin_state")), |
1918 |
(_MakeField("admin_up", "Autostart", QFT_BOOL, |
1919 |
"Desired state of instance"),
|
1920 |
IQ_CONFIG, 0, lambda ctx, inst: inst.admin_state == constants.ADMINST_UP), |
1921 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0, |
1922 |
lambda ctx, inst: list(inst.GetTags())), |
1923 |
(_MakeField("console", "Console", QFT_OTHER, |
1924 |
"Instance console information"), IQ_CONSOLE, 0, |
1925 |
_GetInstanceConsole), |
1926 |
] |
1927 |
|
1928 |
# Add simple fields
|
1929 |
fields.extend([ |
1930 |
(_MakeField(name, title, kind, doc), IQ_CONFIG, flags, _GetItemAttr(name)) |
1931 |
for (name, (title, kind, flags, doc)) in _INST_SIMPLE_FIELDS.items() |
1932 |
]) |
1933 |
|
1934 |
# Fields requiring talking to the node
|
1935 |
fields.extend([ |
1936 |
(_MakeField("oper_state", "Running", QFT_BOOL, "Actual state of instance"), |
1937 |
IQ_LIVE, 0, _GetInstOperState),
|
1938 |
(_MakeField("oper_ram", "Memory", QFT_UNIT, |
1939 |
"Actual memory usage as seen by hypervisor"),
|
1940 |
IQ_LIVE, 0, _GetInstLiveData("memory")), |
1941 |
(_MakeField("oper_vcpus", "VCPUs", QFT_NUMBER, |
1942 |
"Actual number of VCPUs as seen by hypervisor"),
|
1943 |
IQ_LIVE, 0, _GetInstLiveData("vcpus")), |
1944 |
]) |
1945 |
|
1946 |
# Status field
|
1947 |
status_values = (constants.INSTST_RUNNING, constants.INSTST_ADMINDOWN, |
1948 |
constants.INSTST_WRONGNODE, constants.INSTST_ERRORUP, |
1949 |
constants.INSTST_ERRORDOWN, constants.INSTST_NODEDOWN, |
1950 |
constants.INSTST_NODEOFFLINE, constants.INSTST_ADMINOFFLINE) |
1951 |
status_doc = ("Instance status; \"%s\" if instance is set to be running"
|
1952 |
" and actually is, \"%s\" if instance is stopped and"
|
1953 |
" is not running, \"%s\" if instance running, but not on its"
|
1954 |
" designated primary node, \"%s\" if instance should be"
|
1955 |
" stopped, but is actually running, \"%s\" if instance should"
|
1956 |
" run, but doesn't, \"%s\" if instance's primary node is down,"
|
1957 |
" \"%s\" if instance's primary node is marked offline,"
|
1958 |
" \"%s\" if instance is offline and does not use dynamic"
|
1959 |
" resources" % status_values)
|
1960 |
fields.append((_MakeField("status", "Status", QFT_TEXT, status_doc), |
1961 |
IQ_LIVE, 0, _GetInstStatus))
|
1962 |
assert set(status_values) == constants.INSTST_ALL, \ |
1963 |
"Status documentation mismatch"
|
1964 |
|
1965 |
(network_fields, network_aliases) = _GetInstanceNetworkFields() |
1966 |
|
1967 |
fields.extend(network_fields) |
1968 |
fields.extend(_GetInstanceParameterFields()) |
1969 |
fields.extend(_GetInstanceDiskFields()) |
1970 |
fields.extend(_GetItemTimestampFields(IQ_CONFIG)) |
1971 |
|
1972 |
aliases = [ |
1973 |
("vcpus", "be/vcpus"), |
1974 |
("be/memory", "be/maxmem"), |
1975 |
("sda_size", "disk.size/0"), |
1976 |
("sdb_size", "disk.size/1"), |
1977 |
] + network_aliases |
1978 |
|
1979 |
return _PrepareFieldList(fields, aliases)
|
1980 |
|
1981 |
|
1982 |
class LockQueryData: |
1983 |
"""Data container for lock data queries.
|
1984 |
|
1985 |
"""
|
1986 |
def __init__(self, lockdata): |
1987 |
"""Initializes this class.
|
1988 |
|
1989 |
"""
|
1990 |
self.lockdata = lockdata
|
1991 |
|
1992 |
def __iter__(self): |
1993 |
"""Iterate over all locks.
|
1994 |
|
1995 |
"""
|
1996 |
return iter(self.lockdata) |
1997 |
|
1998 |
|
1999 |
def _GetLockOwners(_, data): |
2000 |
"""Returns a sorted list of a lock's current owners.
|
2001 |
|
2002 |
"""
|
2003 |
(_, _, owners, _) = data |
2004 |
|
2005 |
if owners:
|
2006 |
owners = utils.NiceSort(owners) |
2007 |
|
2008 |
return owners
|
2009 |
|
2010 |
|
2011 |
def _GetLockPending(_, data): |
2012 |
"""Returns a sorted list of a lock's pending acquires.
|
2013 |
|
2014 |
"""
|
2015 |
(_, _, _, pending) = data |
2016 |
|
2017 |
if pending:
|
2018 |
pending = [(mode, utils.NiceSort(names)) |
2019 |
for (mode, names) in pending] |
2020 |
|
2021 |
return pending
|
2022 |
|
2023 |
|
2024 |
def _BuildLockFields(): |
2025 |
"""Builds list of fields for lock queries.
|
2026 |
|
2027 |
"""
|
2028 |
return _PrepareFieldList([
|
2029 |
# TODO: Lock names are not always hostnames. Should QFF_HOSTNAME be used?
|
2030 |
(_MakeField("name", "Name", QFT_TEXT, "Lock name"), None, 0, |
2031 |
lambda ctx, (name, mode, owners, pending): name),
|
2032 |
(_MakeField("mode", "Mode", QFT_OTHER, |
2033 |
"Mode in which the lock is currently acquired"
|
2034 |
" (exclusive or shared)"),
|
2035 |
LQ_MODE, 0, lambda ctx, (name, mode, owners, pending): mode), |
2036 |
(_MakeField("owner", "Owner", QFT_OTHER, "Current lock owner(s)"), |
2037 |
LQ_OWNER, 0, _GetLockOwners),
|
2038 |
(_MakeField("pending", "Pending", QFT_OTHER, |
2039 |
"Threads waiting for the lock"),
|
2040 |
LQ_PENDING, 0, _GetLockPending),
|
2041 |
], []) |
2042 |
|
2043 |
|
2044 |
class GroupQueryData: |
2045 |
"""Data container for node group data queries.
|
2046 |
|
2047 |
"""
|
2048 |
def __init__(self, cluster, groups, group_to_nodes, group_to_instances, |
2049 |
want_diskparams): |
2050 |
"""Initializes this class.
|
2051 |
|
2052 |
@param cluster: Cluster object
|
2053 |
@param groups: List of node group objects
|
2054 |
@type group_to_nodes: dict; group UUID as key
|
2055 |
@param group_to_nodes: Per-group list of nodes
|
2056 |
@type group_to_instances: dict; group UUID as key
|
2057 |
@param group_to_instances: Per-group list of (primary) instances
|
2058 |
@type want_diskparams: bool
|
2059 |
@param want_diskparams: Whether diskparamters should be calculated
|
2060 |
|
2061 |
"""
|
2062 |
self.groups = groups
|
2063 |
self.group_to_nodes = group_to_nodes
|
2064 |
self.group_to_instances = group_to_instances
|
2065 |
self.cluster = cluster
|
2066 |
self.want_diskparams = want_diskparams
|
2067 |
|
2068 |
# Used for individual rows
|
2069 |
self.group_ipolicy = None |
2070 |
self.ndparams = None |
2071 |
self.group_dp = None |
2072 |
|
2073 |
def __iter__(self): |
2074 |
"""Iterate over all node groups.
|
2075 |
|
2076 |
This function has side-effects and only one instance of the resulting
|
2077 |
generator should be used at a time.
|
2078 |
|
2079 |
"""
|
2080 |
for group in self.groups: |
2081 |
self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy) |
2082 |
self.ndparams = self.cluster.SimpleFillND(group.ndparams) |
2083 |
if self.want_diskparams: |
2084 |
self.group_dp = self.cluster.SimpleFillDP(group.diskparams) |
2085 |
else:
|
2086 |
self.group_dp = None |
2087 |
yield group
|
2088 |
|
2089 |
|
2090 |
_GROUP_SIMPLE_FIELDS = { |
2091 |
"alloc_policy": ("AllocPolicy", QFT_TEXT, "Allocation policy for group"), |
2092 |
"name": ("Group", QFT_TEXT, "Group name"), |
2093 |
"serial_no": ("SerialNo", QFT_NUMBER, _SERIAL_NO_DOC % "Group"), |
2094 |
"uuid": ("UUID", QFT_TEXT, "Group UUID"), |
2095 |
} |
2096 |
|
2097 |
|
2098 |
def _BuildGroupFields(): |
2099 |
"""Builds list of fields for node group queries.
|
2100 |
|
2101 |
"""
|
2102 |
# Add simple fields
|
2103 |
fields = [(_MakeField(name, title, kind, doc), GQ_CONFIG, 0,
|
2104 |
_GetItemAttr(name)) |
2105 |
for (name, (title, kind, doc)) in _GROUP_SIMPLE_FIELDS.items()] |
2106 |
|
2107 |
def _GetLength(getter): |
2108 |
return lambda ctx, group: len(getter(ctx)[group.uuid]) |
2109 |
|
2110 |
def _GetSortedList(getter): |
2111 |
return lambda ctx, group: utils.NiceSort(getter(ctx)[group.uuid]) |
2112 |
|
2113 |
group_to_nodes = operator.attrgetter("group_to_nodes")
|
2114 |
group_to_instances = operator.attrgetter("group_to_instances")
|
2115 |
|
2116 |
# Add fields for nodes
|
2117 |
fields.extend([ |
2118 |
(_MakeField("node_cnt", "Nodes", QFT_NUMBER, "Number of nodes"), |
2119 |
GQ_NODE, 0, _GetLength(group_to_nodes)),
|
2120 |
(_MakeField("node_list", "NodeList", QFT_OTHER, "List of nodes"), |
2121 |
GQ_NODE, 0, _GetSortedList(group_to_nodes)),
|
2122 |
]) |
2123 |
|
2124 |
# Add fields for instances
|
2125 |
fields.extend([ |
2126 |
(_MakeField("pinst_cnt", "Instances", QFT_NUMBER, |
2127 |
"Number of primary instances"),
|
2128 |
GQ_INST, 0, _GetLength(group_to_instances)),
|
2129 |
(_MakeField("pinst_list", "InstanceList", QFT_OTHER, |
2130 |
"List of primary instances"),
|
2131 |
GQ_INST, 0, _GetSortedList(group_to_instances)),
|
2132 |
]) |
2133 |
|
2134 |
# Other fields
|
2135 |
fields.extend([ |
2136 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0, |
2137 |
lambda ctx, group: list(group.GetTags())), |
2138 |
(_MakeField("ipolicy", "InstancePolicy", QFT_OTHER, |
2139 |
"Instance policy limitations (merged)"),
|
2140 |
GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy), |
2141 |
(_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER, |
2142 |
"Custom instance policy limitations"),
|
2143 |
GQ_CONFIG, 0, _GetItemAttr("ipolicy")), |
2144 |
(_MakeField("custom_ndparams", "CustomNDParams", QFT_OTHER, |
2145 |
"Custom node parameters"),
|
2146 |
GQ_CONFIG, 0, _GetItemAttr("ndparams")), |
2147 |
(_MakeField("ndparams", "NDParams", QFT_OTHER, |
2148 |
"Node parameters"),
|
2149 |
GQ_CONFIG, 0, lambda ctx, _: ctx.ndparams), |
2150 |
(_MakeField("diskparams", "DiskParameters", QFT_OTHER, |
2151 |
"Disk parameters (merged)"),
|
2152 |
GQ_DISKPARAMS, 0, lambda ctx, _: ctx.group_dp), |
2153 |
(_MakeField("custom_diskparams", "CustomDiskParameters", QFT_OTHER, |
2154 |
"Custom disk parameters"),
|
2155 |
GQ_CONFIG, 0, _GetItemAttr("diskparams")), |
2156 |
]) |
2157 |
|
2158 |
# ND parameters
|
2159 |
fields.extend(_BuildNDFields(True))
|
2160 |
|
2161 |
fields.extend(_GetItemTimestampFields(GQ_CONFIG)) |
2162 |
|
2163 |
return _PrepareFieldList(fields, [])
|
2164 |
|
2165 |
|
2166 |
class OsInfo(objects.ConfigObject): |
2167 |
__slots__ = [ |
2168 |
"name",
|
2169 |
"valid",
|
2170 |
"hidden",
|
2171 |
"blacklisted",
|
2172 |
"variants",
|
2173 |
"api_versions",
|
2174 |
"parameters",
|
2175 |
"node_status",
|
2176 |
] |
2177 |
|
2178 |
|
2179 |
def _BuildOsFields(): |
2180 |
"""Builds list of fields for operating system queries.
|
2181 |
|
2182 |
"""
|
2183 |
fields = [ |
2184 |
(_MakeField("name", "Name", QFT_TEXT, "Operating system name"), |
2185 |
None, 0, _GetItemAttr("name")), |
2186 |
(_MakeField("valid", "Valid", QFT_BOOL, |
2187 |
"Whether operating system definition is valid"),
|
2188 |
None, 0, _GetItemAttr("valid")), |
2189 |
(_MakeField("hidden", "Hidden", QFT_BOOL, |
2190 |
"Whether operating system is hidden"),
|
2191 |
None, 0, _GetItemAttr("hidden")), |
2192 |
(_MakeField("blacklisted", "Blacklisted", QFT_BOOL, |
2193 |
"Whether operating system is blacklisted"),
|
2194 |
None, 0, _GetItemAttr("blacklisted")), |
2195 |
(_MakeField("variants", "Variants", QFT_OTHER, |
2196 |
"Operating system variants"),
|
2197 |
None, 0, _ConvWrap(utils.NiceSort, _GetItemAttr("variants"))), |
2198 |
(_MakeField("api_versions", "ApiVersions", QFT_OTHER, |
2199 |
"Operating system API versions"),
|
2200 |
None, 0, _ConvWrap(sorted, _GetItemAttr("api_versions"))), |
2201 |
(_MakeField("parameters", "Parameters", QFT_OTHER, |
2202 |
"Operating system parameters"),
|
2203 |
None, 0, _ConvWrap(compat.partial(utils.NiceSort, key=compat.fst), |
2204 |
_GetItemAttr("parameters"))),
|
2205 |
(_MakeField("node_status", "NodeStatus", QFT_OTHER, |
2206 |
"Status from node"),
|
2207 |
None, 0, _GetItemAttr("node_status")), |
2208 |
] |
2209 |
|
2210 |
return _PrepareFieldList(fields, [])
|
2211 |
|
2212 |
|
2213 |
def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613 |
2214 |
"""Return L{_FS_UNAVAIL} if job is None.
|
2215 |
|
2216 |
When listing specifc jobs (e.g. "gnt-job list 1 2 3"), a job may not be
|
2217 |
found, in which case this function converts it to L{_FS_UNAVAIL}.
|
2218 |
|
2219 |
"""
|
2220 |
if job is None: |
2221 |
return _FS_UNAVAIL
|
2222 |
else:
|
2223 |
return fn(job)
|
2224 |
|
2225 |
|
2226 |
def _JobUnavail(inner): |
2227 |
"""Wrapper for L{_JobUnavailInner}.
|
2228 |
|
2229 |
"""
|
2230 |
return compat.partial(_JobUnavailInner, inner)
|
2231 |
|
2232 |
|
2233 |
def _PerJobOpInner(fn, job): |
2234 |
"""Executes a function per opcode in a job.
|
2235 |
|
2236 |
"""
|
2237 |
return map(fn, job.ops) |
2238 |
|
2239 |
|
2240 |
def _PerJobOp(fn): |
2241 |
"""Wrapper for L{_PerJobOpInner}.
|
2242 |
|
2243 |
"""
|
2244 |
return _JobUnavail(compat.partial(_PerJobOpInner, fn))
|
2245 |
|
2246 |
|
2247 |
def _JobTimestampInner(fn, job): |
2248 |
"""Converts unavailable timestamp to L{_FS_UNAVAIL}.
|
2249 |
|
2250 |
"""
|
2251 |
timestamp = fn(job) |
2252 |
|
2253 |
if timestamp is None: |
2254 |
return _FS_UNAVAIL
|
2255 |
else:
|
2256 |
return timestamp
|
2257 |
|
2258 |
|
2259 |
def _JobTimestamp(fn): |
2260 |
"""Wrapper for L{_JobTimestampInner}.
|
2261 |
|
2262 |
"""
|
2263 |
return _JobUnavail(compat.partial(_JobTimestampInner, fn))
|
2264 |
|
2265 |
|
2266 |
def _BuildJobFields(): |
2267 |
"""Builds list of fields for job queries.
|
2268 |
|
2269 |
"""
|
2270 |
fields = [ |
2271 |
(_MakeField("id", "ID", QFT_NUMBER, "Job ID"), |
2272 |
None, QFF_JOB_ID, lambda _, (job_id, job): job_id), |
2273 |
(_MakeField("status", "Status", QFT_TEXT, "Job status"), |
2274 |
None, 0, _JobUnavail(lambda job: job.CalcStatus())), |
2275 |
(_MakeField("priority", "Priority", QFT_NUMBER, |
2276 |
("Current job priority (%s to %s)" %
|
2277 |
(constants.OP_PRIO_LOWEST, constants.OP_PRIO_HIGHEST))), |
2278 |
None, 0, _JobUnavail(lambda job: job.CalcPriority())), |
2279 |
(_MakeField("ops", "OpCodes", QFT_OTHER, "List of all opcodes"), |
2280 |
None, 0, _PerJobOp(lambda op: op.input.__getstate__())), |
2281 |
(_MakeField("opresult", "OpCode_result", QFT_OTHER, |
2282 |
"List of opcodes results"),
|
2283 |
None, 0, _PerJobOp(operator.attrgetter("result"))), |
2284 |
(_MakeField("opstatus", "OpCode_status", QFT_OTHER, |
2285 |
"List of opcodes status"),
|
2286 |
None, 0, _PerJobOp(operator.attrgetter("status"))), |
2287 |
(_MakeField("oplog", "OpCode_log", QFT_OTHER, |
2288 |
"List of opcode output logs"),
|
2289 |
None, 0, _PerJobOp(operator.attrgetter("log"))), |
2290 |
(_MakeField("opstart", "OpCode_start", QFT_OTHER, |
2291 |
"List of opcode start timestamps (before acquiring locks)"),
|
2292 |
None, 0, _PerJobOp(operator.attrgetter("start_timestamp"))), |
2293 |
(_MakeField("opexec", "OpCode_exec", QFT_OTHER, |
2294 |
"List of opcode execution start timestamps (after acquiring"
|
2295 |
" locks)"),
|
2296 |
None, 0, _PerJobOp(operator.attrgetter("exec_timestamp"))), |
2297 |
(_MakeField("opend", "OpCode_end", QFT_OTHER, |
2298 |
"List of opcode execution end timestamps"),
|
2299 |
None, 0, _PerJobOp(operator.attrgetter("end_timestamp"))), |
2300 |
(_MakeField("oppriority", "OpCode_prio", QFT_OTHER, |
2301 |
"List of opcode priorities"),
|
2302 |
None, 0, _PerJobOp(operator.attrgetter("priority"))), |
2303 |
(_MakeField("summary", "Summary", QFT_OTHER, |
2304 |
"List of per-opcode summaries"),
|
2305 |
None, 0, _PerJobOp(lambda op: op.input.Summary())), |
2306 |
] |
2307 |
|
2308 |
# Timestamp fields
|
2309 |
for (name, attr, title, desc) in [ |
2310 |
("received_ts", "received_timestamp", "Received", |
2311 |
"Timestamp of when job was received"),
|
2312 |
("start_ts", "start_timestamp", "Start", "Timestamp of job start"), |
2313 |
("end_ts", "end_timestamp", "End", "Timestamp of job end"), |
2314 |
]: |
2315 |
getter = operator.attrgetter(attr) |
2316 |
fields.extend([ |
2317 |
(_MakeField(name, title, QFT_OTHER, |
2318 |
"%s (tuple containing seconds and microseconds)" % desc),
|
2319 |
None, QFF_SPLIT_TIMESTAMP, _JobTimestamp(getter)),
|
2320 |
]) |
2321 |
|
2322 |
return _PrepareFieldList(fields, [])
|
2323 |
|
2324 |
|
2325 |
def _GetExportName(_, (node_name, expname)): # pylint: disable=W0613 |
2326 |
"""Returns an export name if available.
|
2327 |
|
2328 |
"""
|
2329 |
if expname is None: |
2330 |
return _FS_UNAVAIL
|
2331 |
else:
|
2332 |
return expname
|
2333 |
|
2334 |
|
2335 |
def _BuildExportFields(): |
2336 |
"""Builds list of fields for exports.
|
2337 |
|
2338 |
"""
|
2339 |
fields = [ |
2340 |
(_MakeField("node", "Node", QFT_TEXT, "Node name"), |
2341 |
None, QFF_HOSTNAME, lambda _, (node_name, expname): node_name), |
2342 |
(_MakeField("export", "Export", QFT_TEXT, "Export name"), |
2343 |
None, 0, _GetExportName), |
2344 |
] |
2345 |
|
2346 |
return _PrepareFieldList(fields, [])
|
2347 |
|
2348 |
|
2349 |
_CLUSTER_VERSION_FIELDS = { |
2350 |
"software_version": ("SoftwareVersion", QFT_TEXT, constants.RELEASE_VERSION, |
2351 |
"Software version"),
|
2352 |
"protocol_version": ("ProtocolVersion", QFT_NUMBER, |
2353 |
constants.PROTOCOL_VERSION, |
2354 |
"RPC protocol version"),
|
2355 |
"config_version": ("ConfigVersion", QFT_NUMBER, constants.CONFIG_VERSION, |
2356 |
"Configuration format version"),
|
2357 |
"os_api_version": ("OsApiVersion", QFT_NUMBER, max(constants.OS_API_VERSIONS), |
2358 |
"API version for OS template scripts"),
|
2359 |
"export_version": ("ExportVersion", QFT_NUMBER, constants.EXPORT_VERSION, |
2360 |
"Import/export file format version"),
|
2361 |
} |
2362 |
|
2363 |
|
2364 |
_CLUSTER_SIMPLE_FIELDS = { |
2365 |
"cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"), |
2366 |
"master_node": ("Master", QFT_TEXT, QFF_HOSTNAME, "Master node name"), |
2367 |
"volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"), |
2368 |
} |
2369 |
|
2370 |
|
2371 |
class ClusterQueryData: |
2372 |
def __init__(self, cluster, drain_flag, watcher_pause): |
2373 |
"""Initializes this class.
|
2374 |
|
2375 |
@type cluster: L{objects.Cluster}
|
2376 |
@param cluster: Instance of cluster object
|
2377 |
@type drain_flag: bool
|
2378 |
@param drain_flag: Whether job queue is drained
|
2379 |
@type watcher_pause: number
|
2380 |
@param watcher_pause: Until when watcher is paused (Unix timestamp)
|
2381 |
|
2382 |
"""
|
2383 |
self._cluster = cluster
|
2384 |
self.drain_flag = drain_flag
|
2385 |
self.watcher_pause = watcher_pause
|
2386 |
|
2387 |
def __iter__(self): |
2388 |
return iter([self._cluster]) |
2389 |
|
2390 |
|
2391 |
def _ClusterWatcherPause(ctx, _): |
2392 |
"""Returns until when watcher is paused (if available).
|
2393 |
|
2394 |
"""
|
2395 |
if ctx.watcher_pause is None: |
2396 |
return _FS_UNAVAIL
|
2397 |
else:
|
2398 |
return ctx.watcher_pause
|
2399 |
|
2400 |
|
2401 |
def _BuildClusterFields(): |
2402 |
"""Builds list of fields for cluster information.
|
2403 |
|
2404 |
"""
|
2405 |
fields = [ |
2406 |
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), CQ_CONFIG, 0, |
2407 |
lambda ctx, cluster: list(cluster.GetTags())), |
2408 |
(_MakeField("architecture", "ArchInfo", QFT_OTHER, |
2409 |
"Architecture information"), None, 0, |
2410 |
lambda ctx, _: runtime.GetArchInfo()),
|
2411 |
(_MakeField("drain_flag", "QueueDrained", QFT_BOOL, |
2412 |
"Flag whether job queue is drained"), CQ_QUEUE_DRAINED, 0, |
2413 |
lambda ctx, _: ctx.drain_flag),
|
2414 |
(_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP, |
2415 |
"Until when watcher is paused"), CQ_WATCHER_PAUSE, 0, |
2416 |
_ClusterWatcherPause), |
2417 |
] |
2418 |
|
2419 |
# Simple fields
|
2420 |
fields.extend([ |
2421 |
(_MakeField(name, title, kind, doc), CQ_CONFIG, flags, _GetItemAttr(name)) |
2422 |
for (name, (title, kind, flags, doc)) in _CLUSTER_SIMPLE_FIELDS.items() |
2423 |
]) |
2424 |
|
2425 |
# Version fields
|
2426 |
fields.extend([ |
2427 |
(_MakeField(name, title, kind, doc), None, 0, _StaticValue(value)) |
2428 |
for (name, (title, kind, value, doc)) in _CLUSTER_VERSION_FIELDS.items() |
2429 |
]) |
2430 |
|
2431 |
# Add timestamps
|
2432 |
fields.extend(_GetItemTimestampFields(CQ_CONFIG)) |
2433 |
|
2434 |
return _PrepareFieldList(fields, [
|
2435 |
("name", "cluster_name"), |
2436 |
]) |
2437 |
|
2438 |
|
2439 |
#: Fields for cluster information
|
2440 |
CLUSTER_FIELDS = _BuildClusterFields() |
2441 |
|
2442 |
#: Fields available for node queries
|
2443 |
NODE_FIELDS = _BuildNodeFields() |
2444 |
|
2445 |
#: Fields available for instance queries
|
2446 |
INSTANCE_FIELDS = _BuildInstanceFields() |
2447 |
|
2448 |
#: Fields available for lock queries
|
2449 |
LOCK_FIELDS = _BuildLockFields() |
2450 |
|
2451 |
#: Fields available for node group queries
|
2452 |
GROUP_FIELDS = _BuildGroupFields() |
2453 |
|
2454 |
#: Fields available for operating system queries
|
2455 |
OS_FIELDS = _BuildOsFields() |
2456 |
|
2457 |
#: Fields available for job queries
|
2458 |
JOB_FIELDS = _BuildJobFields() |
2459 |
|
2460 |
#: Fields available for exports
|
2461 |
EXPORT_FIELDS = _BuildExportFields() |
2462 |
|
2463 |
#: All available resources
|
2464 |
ALL_FIELDS = { |
2465 |
constants.QR_CLUSTER: CLUSTER_FIELDS, |
2466 |
constants.QR_INSTANCE: INSTANCE_FIELDS, |
2467 |
constants.QR_NODE: NODE_FIELDS, |
2468 |
constants.QR_LOCK: LOCK_FIELDS, |
2469 |
constants.QR_GROUP: GROUP_FIELDS, |
2470 |
constants.QR_OS: OS_FIELDS, |
2471 |
constants.QR_JOB: JOB_FIELDS, |
2472 |
constants.QR_EXPORT: EXPORT_FIELDS, |
2473 |
} |
2474 |
|
2475 |
#: All available field lists
|
2476 |
ALL_FIELD_LISTS = ALL_FIELDS.values() |