root / lib / cmdlib.py @ e588764d
History | View | Annotate | Download (337 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable-msg=W0201
|
25 |
|
26 |
# W0201 since most LU attributes are defined in CheckPrereq or similar
|
27 |
# functions
|
28 |
|
29 |
import os |
30 |
import os.path |
31 |
import time |
32 |
import re |
33 |
import platform |
34 |
import logging |
35 |
import copy |
36 |
import OpenSSL |
37 |
|
38 |
from ganeti import ssh |
39 |
from ganeti import utils |
40 |
from ganeti import errors |
41 |
from ganeti import hypervisor |
42 |
from ganeti import locking |
43 |
from ganeti import constants |
44 |
from ganeti import objects |
45 |
from ganeti import serializer |
46 |
from ganeti import ssconf |
47 |
|
48 |
|
49 |
class LogicalUnit(object): |
50 |
"""Logical Unit base class.
|
51 |
|
52 |
Subclasses must follow these rules:
|
53 |
- implement ExpandNames
|
54 |
- implement CheckPrereq (except when tasklets are used)
|
55 |
- implement Exec (except when tasklets are used)
|
56 |
- implement BuildHooksEnv
|
57 |
- redefine HPATH and HTYPE
|
58 |
- optionally redefine their run requirements:
|
59 |
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
60 |
|
61 |
Note that all commands require root permissions.
|
62 |
|
63 |
@ivar dry_run_result: the value (if any) that will be returned to the caller
|
64 |
in dry-run mode (signalled by opcode dry_run parameter)
|
65 |
|
66 |
"""
|
67 |
HPATH = None
|
68 |
HTYPE = None
|
69 |
_OP_REQP = [] |
70 |
REQ_BGL = True
|
71 |
|
72 |
def __init__(self, processor, op, context, rpc): |
73 |
"""Constructor for LogicalUnit.
|
74 |
|
75 |
This needs to be overridden in derived classes in order to check op
|
76 |
validity.
|
77 |
|
78 |
"""
|
79 |
self.proc = processor
|
80 |
self.op = op
|
81 |
self.cfg = context.cfg
|
82 |
self.context = context
|
83 |
self.rpc = rpc
|
84 |
# Dicts used to declare locking needs to mcpu
|
85 |
self.needed_locks = None |
86 |
self.acquired_locks = {}
|
87 |
self.share_locks = dict.fromkeys(locking.LEVELS, 0) |
88 |
self.add_locks = {}
|
89 |
self.remove_locks = {}
|
90 |
# Used to force good behavior when calling helper functions
|
91 |
self.recalculate_locks = {}
|
92 |
self.__ssh = None |
93 |
# logging
|
94 |
self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103 |
95 |
self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103 |
96 |
self.LogStep = processor.LogStep # pylint: disable-msg=C0103 |
97 |
# support for dry-run
|
98 |
self.dry_run_result = None |
99 |
# support for generic debug attribute
|
100 |
if (not hasattr(self.op, "debug_level") or |
101 |
not isinstance(self.op.debug_level, int)): |
102 |
self.op.debug_level = 0 |
103 |
|
104 |
# Tasklets
|
105 |
self.tasklets = None |
106 |
|
107 |
for attr_name in self._OP_REQP: |
108 |
attr_val = getattr(op, attr_name, None) |
109 |
if attr_val is None: |
110 |
raise errors.OpPrereqError("Required parameter '%s' missing" % |
111 |
attr_name, errors.ECODE_INVAL) |
112 |
|
113 |
self.CheckArguments()
|
114 |
|
115 |
def __GetSSH(self): |
116 |
"""Returns the SshRunner object
|
117 |
|
118 |
"""
|
119 |
if not self.__ssh: |
120 |
self.__ssh = ssh.SshRunner(self.cfg.GetClusterName()) |
121 |
return self.__ssh |
122 |
|
123 |
ssh = property(fget=__GetSSH)
|
124 |
|
125 |
def CheckArguments(self): |
126 |
"""Check syntactic validity for the opcode arguments.
|
127 |
|
128 |
This method is for doing a simple syntactic check and ensure
|
129 |
validity of opcode parameters, without any cluster-related
|
130 |
checks. While the same can be accomplished in ExpandNames and/or
|
131 |
CheckPrereq, doing these separate is better because:
|
132 |
|
133 |
- ExpandNames is left as as purely a lock-related function
|
134 |
- CheckPrereq is run after we have acquired locks (and possible
|
135 |
waited for them)
|
136 |
|
137 |
The function is allowed to change the self.op attribute so that
|
138 |
later methods can no longer worry about missing parameters.
|
139 |
|
140 |
"""
|
141 |
pass
|
142 |
|
143 |
def ExpandNames(self): |
144 |
"""Expand names for this LU.
|
145 |
|
146 |
This method is called before starting to execute the opcode, and it should
|
147 |
update all the parameters of the opcode to their canonical form (e.g. a
|
148 |
short node name must be fully expanded after this method has successfully
|
149 |
completed). This way locking, hooks, logging, ecc. can work correctly.
|
150 |
|
151 |
LUs which implement this method must also populate the self.needed_locks
|
152 |
member, as a dict with lock levels as keys, and a list of needed lock names
|
153 |
as values. Rules:
|
154 |
|
155 |
- use an empty dict if you don't need any lock
|
156 |
- if you don't need any lock at a particular level omit that level
|
157 |
- don't put anything for the BGL level
|
158 |
- if you want all locks at a level use locking.ALL_SET as a value
|
159 |
|
160 |
If you need to share locks (rather than acquire them exclusively) at one
|
161 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
162 |
that level. By default locks are not shared.
|
163 |
|
164 |
This function can also define a list of tasklets, which then will be
|
165 |
executed in order instead of the usual LU-level CheckPrereq and Exec
|
166 |
functions, if those are not defined by the LU.
|
167 |
|
168 |
Examples::
|
169 |
|
170 |
# Acquire all nodes and one instance
|
171 |
self.needed_locks = {
|
172 |
locking.LEVEL_NODE: locking.ALL_SET,
|
173 |
locking.LEVEL_INSTANCE: ['instance1.example.tld'],
|
174 |
}
|
175 |
# Acquire just two nodes
|
176 |
self.needed_locks = {
|
177 |
locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
|
178 |
}
|
179 |
# Acquire no locks
|
180 |
self.needed_locks = {} # No, you can't leave it to the default value None
|
181 |
|
182 |
"""
|
183 |
# The implementation of this method is mandatory only if the new LU is
|
184 |
# concurrent, so that old LUs don't need to be changed all at the same
|
185 |
# time.
|
186 |
if self.REQ_BGL: |
187 |
self.needed_locks = {} # Exclusive LUs don't need locks. |
188 |
else:
|
189 |
raise NotImplementedError |
190 |
|
191 |
def DeclareLocks(self, level): |
192 |
"""Declare LU locking needs for a level
|
193 |
|
194 |
While most LUs can just declare their locking needs at ExpandNames time,
|
195 |
sometimes there's the need to calculate some locks after having acquired
|
196 |
the ones before. This function is called just before acquiring locks at a
|
197 |
particular level, but after acquiring the ones at lower levels, and permits
|
198 |
such calculations. It can be used to modify self.needed_locks, and by
|
199 |
default it does nothing.
|
200 |
|
201 |
This function is only called if you have something already set in
|
202 |
self.needed_locks for the level.
|
203 |
|
204 |
@param level: Locking level which is going to be locked
|
205 |
@type level: member of ganeti.locking.LEVELS
|
206 |
|
207 |
"""
|
208 |
|
209 |
def CheckPrereq(self): |
210 |
"""Check prerequisites for this LU.
|
211 |
|
212 |
This method should check that the prerequisites for the execution
|
213 |
of this LU are fulfilled. It can do internode communication, but
|
214 |
it should be idempotent - no cluster or system changes are
|
215 |
allowed.
|
216 |
|
217 |
The method should raise errors.OpPrereqError in case something is
|
218 |
not fulfilled. Its return value is ignored.
|
219 |
|
220 |
This method should also update all the parameters of the opcode to
|
221 |
their canonical form if it hasn't been done by ExpandNames before.
|
222 |
|
223 |
"""
|
224 |
if self.tasklets is not None: |
225 |
for (idx, tl) in enumerate(self.tasklets): |
226 |
logging.debug("Checking prerequisites for tasklet %s/%s",
|
227 |
idx + 1, len(self.tasklets)) |
228 |
tl.CheckPrereq() |
229 |
else:
|
230 |
raise NotImplementedError |
231 |
|
232 |
def Exec(self, feedback_fn): |
233 |
"""Execute the LU.
|
234 |
|
235 |
This method should implement the actual work. It should raise
|
236 |
errors.OpExecError for failures that are somewhat dealt with in
|
237 |
code, or expected.
|
238 |
|
239 |
"""
|
240 |
if self.tasklets is not None: |
241 |
for (idx, tl) in enumerate(self.tasklets): |
242 |
logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets)) |
243 |
tl.Exec(feedback_fn) |
244 |
else:
|
245 |
raise NotImplementedError |
246 |
|
247 |
def BuildHooksEnv(self): |
248 |
"""Build hooks environment for this LU.
|
249 |
|
250 |
This method should return a three-node tuple consisting of: a dict
|
251 |
containing the environment that will be used for running the
|
252 |
specific hook for this LU, a list of node names on which the hook
|
253 |
should run before the execution, and a list of node names on which
|
254 |
the hook should run after the execution.
|
255 |
|
256 |
The keys of the dict must not have 'GANETI_' prefixed as this will
|
257 |
be handled in the hooks runner. Also note additional keys will be
|
258 |
added by the hooks runner. If the LU doesn't define any
|
259 |
environment, an empty dict (and not None) should be returned.
|
260 |
|
261 |
No nodes should be returned as an empty list (and not None).
|
262 |
|
263 |
Note that if the HPATH for a LU class is None, this function will
|
264 |
not be called.
|
265 |
|
266 |
"""
|
267 |
raise NotImplementedError |
268 |
|
269 |
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result): |
270 |
"""Notify the LU about the results of its hooks.
|
271 |
|
272 |
This method is called every time a hooks phase is executed, and notifies
|
273 |
the Logical Unit about the hooks' result. The LU can then use it to alter
|
274 |
its result based on the hooks. By default the method does nothing and the
|
275 |
previous result is passed back unchanged but any LU can define it if it
|
276 |
wants to use the local cluster hook-scripts somehow.
|
277 |
|
278 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
279 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
280 |
@param hook_results: the results of the multi-node hooks rpc call
|
281 |
@param feedback_fn: function used send feedback back to the caller
|
282 |
@param lu_result: the previous Exec result this LU had, or None
|
283 |
in the PRE phase
|
284 |
@return: the new Exec result, based on the previous result
|
285 |
and hook results
|
286 |
|
287 |
"""
|
288 |
# API must be kept, thus we ignore the unused argument and could
|
289 |
# be a function warnings
|
290 |
# pylint: disable-msg=W0613,R0201
|
291 |
return lu_result
|
292 |
|
293 |
def _ExpandAndLockInstance(self): |
294 |
"""Helper function to expand and lock an instance.
|
295 |
|
296 |
Many LUs that work on an instance take its name in self.op.instance_name
|
297 |
and need to expand it and then declare the expanded name for locking. This
|
298 |
function does it, and then updates self.op.instance_name to the expanded
|
299 |
name. It also initializes needed_locks as a dict, if this hasn't been done
|
300 |
before.
|
301 |
|
302 |
"""
|
303 |
if self.needed_locks is None: |
304 |
self.needed_locks = {}
|
305 |
else:
|
306 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
307 |
"_ExpandAndLockInstance called with instance-level locks set"
|
308 |
self.op.instance_name = _ExpandInstanceName(self.cfg, |
309 |
self.op.instance_name)
|
310 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name |
311 |
|
312 |
def _LockInstancesNodes(self, primary_only=False): |
313 |
"""Helper function to declare instances' nodes for locking.
|
314 |
|
315 |
This function should be called after locking one or more instances to lock
|
316 |
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
317 |
with all primary or secondary nodes for instances already locked and
|
318 |
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
319 |
|
320 |
It should be called from DeclareLocks, and for safety only works if
|
321 |
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
322 |
|
323 |
In the future it may grow parameters to just lock some instance's nodes, or
|
324 |
to just lock primaries or secondary nodes, if needed.
|
325 |
|
326 |
If should be called in DeclareLocks in a way similar to::
|
327 |
|
328 |
if level == locking.LEVEL_NODE:
|
329 |
self._LockInstancesNodes()
|
330 |
|
331 |
@type primary_only: boolean
|
332 |
@param primary_only: only lock primary nodes of locked instances
|
333 |
|
334 |
"""
|
335 |
assert locking.LEVEL_NODE in self.recalculate_locks, \ |
336 |
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
337 |
|
338 |
# TODO: check if we're really been called with the instance locks held
|
339 |
|
340 |
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
341 |
# future we might want to have different behaviors depending on the value
|
342 |
# of self.recalculate_locks[locking.LEVEL_NODE]
|
343 |
wanted_nodes = [] |
344 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]: |
345 |
instance = self.context.cfg.GetInstanceInfo(instance_name)
|
346 |
wanted_nodes.append(instance.primary_node) |
347 |
if not primary_only: |
348 |
wanted_nodes.extend(instance.secondary_nodes) |
349 |
|
350 |
if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: |
351 |
self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
|
352 |
elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: |
353 |
self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
|
354 |
|
355 |
del self.recalculate_locks[locking.LEVEL_NODE] |
356 |
|
357 |
|
358 |
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223 |
359 |
"""Simple LU which runs no hooks.
|
360 |
|
361 |
This LU is intended as a parent for other LogicalUnits which will
|
362 |
run no hooks, in order to reduce duplicate code.
|
363 |
|
364 |
"""
|
365 |
HPATH = None
|
366 |
HTYPE = None
|
367 |
|
368 |
def BuildHooksEnv(self): |
369 |
"""Empty BuildHooksEnv for NoHooksLu.
|
370 |
|
371 |
This just raises an error.
|
372 |
|
373 |
"""
|
374 |
assert False, "BuildHooksEnv called for NoHooksLUs" |
375 |
|
376 |
|
377 |
class Tasklet: |
378 |
"""Tasklet base class.
|
379 |
|
380 |
Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
|
381 |
they can mix legacy code with tasklets. Locking needs to be done in the LU,
|
382 |
tasklets know nothing about locks.
|
383 |
|
384 |
Subclasses must follow these rules:
|
385 |
- Implement CheckPrereq
|
386 |
- Implement Exec
|
387 |
|
388 |
"""
|
389 |
def __init__(self, lu): |
390 |
self.lu = lu
|
391 |
|
392 |
# Shortcuts
|
393 |
self.cfg = lu.cfg
|
394 |
self.rpc = lu.rpc
|
395 |
|
396 |
def CheckPrereq(self): |
397 |
"""Check prerequisites for this tasklets.
|
398 |
|
399 |
This method should check whether the prerequisites for the execution of
|
400 |
this tasklet are fulfilled. It can do internode communication, but it
|
401 |
should be idempotent - no cluster or system changes are allowed.
|
402 |
|
403 |
The method should raise errors.OpPrereqError in case something is not
|
404 |
fulfilled. Its return value is ignored.
|
405 |
|
406 |
This method should also update all parameters to their canonical form if it
|
407 |
hasn't been done before.
|
408 |
|
409 |
"""
|
410 |
raise NotImplementedError |
411 |
|
412 |
def Exec(self, feedback_fn): |
413 |
"""Execute the tasklet.
|
414 |
|
415 |
This method should implement the actual work. It should raise
|
416 |
errors.OpExecError for failures that are somewhat dealt with in code, or
|
417 |
expected.
|
418 |
|
419 |
"""
|
420 |
raise NotImplementedError |
421 |
|
422 |
|
423 |
def _GetWantedNodes(lu, nodes): |
424 |
"""Returns list of checked and expanded node names.
|
425 |
|
426 |
@type lu: L{LogicalUnit}
|
427 |
@param lu: the logical unit on whose behalf we execute
|
428 |
@type nodes: list
|
429 |
@param nodes: list of node names or None for all nodes
|
430 |
@rtype: list
|
431 |
@return: the list of nodes, sorted
|
432 |
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
433 |
|
434 |
"""
|
435 |
if not isinstance(nodes, list): |
436 |
raise errors.OpPrereqError("Invalid argument type 'nodes'", |
437 |
errors.ECODE_INVAL) |
438 |
|
439 |
if not nodes: |
440 |
raise errors.ProgrammerError("_GetWantedNodes should only be called with a" |
441 |
" non-empty list of nodes whose name is to be expanded.")
|
442 |
|
443 |
wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes] |
444 |
return utils.NiceSort(wanted)
|
445 |
|
446 |
|
447 |
def _GetWantedInstances(lu, instances): |
448 |
"""Returns list of checked and expanded instance names.
|
449 |
|
450 |
@type lu: L{LogicalUnit}
|
451 |
@param lu: the logical unit on whose behalf we execute
|
452 |
@type instances: list
|
453 |
@param instances: list of instance names or None for all instances
|
454 |
@rtype: list
|
455 |
@return: the list of instances, sorted
|
456 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
457 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
458 |
|
459 |
"""
|
460 |
if not isinstance(instances, list): |
461 |
raise errors.OpPrereqError("Invalid argument type 'instances'", |
462 |
errors.ECODE_INVAL) |
463 |
|
464 |
if instances:
|
465 |
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances] |
466 |
else:
|
467 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
468 |
return wanted
|
469 |
|
470 |
|
471 |
def _CheckOutputFields(static, dynamic, selected): |
472 |
"""Checks whether all selected fields are valid.
|
473 |
|
474 |
@type static: L{utils.FieldSet}
|
475 |
@param static: static fields set
|
476 |
@type dynamic: L{utils.FieldSet}
|
477 |
@param dynamic: dynamic fields set
|
478 |
|
479 |
"""
|
480 |
f = utils.FieldSet() |
481 |
f.Extend(static) |
482 |
f.Extend(dynamic) |
483 |
|
484 |
delta = f.NonMatching(selected) |
485 |
if delta:
|
486 |
raise errors.OpPrereqError("Unknown output fields selected: %s" |
487 |
% ",".join(delta), errors.ECODE_INVAL)
|
488 |
|
489 |
|
490 |
def _CheckBooleanOpField(op, name): |
491 |
"""Validates boolean opcode parameters.
|
492 |
|
493 |
This will ensure that an opcode parameter is either a boolean value,
|
494 |
or None (but that it always exists).
|
495 |
|
496 |
"""
|
497 |
val = getattr(op, name, None) |
498 |
if not (val is None or isinstance(val, bool)): |
499 |
raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" % |
500 |
(name, str(val)), errors.ECODE_INVAL)
|
501 |
setattr(op, name, val)
|
502 |
|
503 |
|
504 |
def _CheckGlobalHvParams(params): |
505 |
"""Validates that given hypervisor params are not global ones.
|
506 |
|
507 |
This will ensure that instances don't get customised versions of
|
508 |
global params.
|
509 |
|
510 |
"""
|
511 |
used_globals = constants.HVC_GLOBALS.intersection(params) |
512 |
if used_globals:
|
513 |
msg = ("The following hypervisor parameters are global and cannot"
|
514 |
" be customized at instance level, please modify them at"
|
515 |
" cluster level: %s" % utils.CommaJoin(used_globals))
|
516 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
517 |
|
518 |
|
519 |
def _CheckNodeOnline(lu, node): |
520 |
"""Ensure that a given node is online.
|
521 |
|
522 |
@param lu: the LU on behalf of which we make the check
|
523 |
@param node: the node to check
|
524 |
@raise errors.OpPrereqError: if the node is offline
|
525 |
|
526 |
"""
|
527 |
if lu.cfg.GetNodeInfo(node).offline:
|
528 |
raise errors.OpPrereqError("Can't use offline node %s" % node, |
529 |
errors.ECODE_INVAL) |
530 |
|
531 |
|
532 |
def _CheckNodeNotDrained(lu, node): |
533 |
"""Ensure that a given node is not drained.
|
534 |
|
535 |
@param lu: the LU on behalf of which we make the check
|
536 |
@param node: the node to check
|
537 |
@raise errors.OpPrereqError: if the node is drained
|
538 |
|
539 |
"""
|
540 |
if lu.cfg.GetNodeInfo(node).drained:
|
541 |
raise errors.OpPrereqError("Can't use drained node %s" % node, |
542 |
errors.ECODE_INVAL) |
543 |
|
544 |
|
545 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
546 |
"""Ensure that a node supports a given OS.
|
547 |
|
548 |
@param lu: the LU on behalf of which we make the check
|
549 |
@param node: the node to check
|
550 |
@param os_name: the OS to query about
|
551 |
@param force_variant: whether to ignore variant errors
|
552 |
@raise errors.OpPrereqError: if the node is not supporting the OS
|
553 |
|
554 |
"""
|
555 |
result = lu.rpc.call_os_get(node, os_name) |
556 |
result.Raise("OS '%s' not in supported OS list for node %s" %
|
557 |
(os_name, node), |
558 |
prereq=True, ecode=errors.ECODE_INVAL)
|
559 |
if not force_variant: |
560 |
_CheckOSVariant(result.payload, os_name) |
561 |
|
562 |
|
563 |
def _CheckDiskTemplate(template): |
564 |
"""Ensure a given disk template is valid.
|
565 |
|
566 |
"""
|
567 |
if template not in constants.DISK_TEMPLATES: |
568 |
msg = ("Invalid disk template name '%s', valid templates are: %s" %
|
569 |
(template, utils.CommaJoin(constants.DISK_TEMPLATES))) |
570 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
571 |
if template == constants.DT_FILE and not constants.ENABLE_FILE_STORAGE: |
572 |
raise errors.OpPrereqError("File storage disabled at configure time", |
573 |
errors.ECODE_INVAL) |
574 |
|
575 |
|
576 |
def _CheckInstanceDown(lu, instance, reason): |
577 |
"""Ensure that an instance is not running."""
|
578 |
if instance.admin_up:
|
579 |
raise errors.OpPrereqError("Instance %s is marked to be up, %s" % |
580 |
(instance.name, reason), errors.ECODE_STATE) |
581 |
|
582 |
pnode = instance.primary_node |
583 |
ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode] |
584 |
ins_l.Raise("Can't contact node %s for instance information" % pnode,
|
585 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
586 |
|
587 |
if instance.name in ins_l.payload: |
588 |
raise errors.OpPrereqError("Instance %s is running, %s" % |
589 |
(instance.name, reason), errors.ECODE_STATE) |
590 |
|
591 |
|
592 |
def _ExpandItemName(fn, name, kind): |
593 |
"""Expand an item name.
|
594 |
|
595 |
@param fn: the function to use for expansion
|
596 |
@param name: requested item name
|
597 |
@param kind: text description ('Node' or 'Instance')
|
598 |
@return: the resolved (full) name
|
599 |
@raise errors.OpPrereqError: if the item is not found
|
600 |
|
601 |
"""
|
602 |
full_name = fn(name) |
603 |
if full_name is None: |
604 |
raise errors.OpPrereqError("%s '%s' not known" % (kind, name), |
605 |
errors.ECODE_NOENT) |
606 |
return full_name
|
607 |
|
608 |
|
609 |
def _ExpandNodeName(cfg, name): |
610 |
"""Wrapper over L{_ExpandItemName} for nodes."""
|
611 |
return _ExpandItemName(cfg.ExpandNodeName, name, "Node") |
612 |
|
613 |
|
614 |
def _ExpandInstanceName(cfg, name): |
615 |
"""Wrapper over L{_ExpandItemName} for instance."""
|
616 |
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") |
617 |
|
618 |
|
619 |
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, |
620 |
memory, vcpus, nics, disk_template, disks, |
621 |
bep, hvp, hypervisor_name): |
622 |
"""Builds instance related env variables for hooks
|
623 |
|
624 |
This builds the hook environment from individual variables.
|
625 |
|
626 |
@type name: string
|
627 |
@param name: the name of the instance
|
628 |
@type primary_node: string
|
629 |
@param primary_node: the name of the instance's primary node
|
630 |
@type secondary_nodes: list
|
631 |
@param secondary_nodes: list of secondary nodes as strings
|
632 |
@type os_type: string
|
633 |
@param os_type: the name of the instance's OS
|
634 |
@type status: boolean
|
635 |
@param status: the should_run status of the instance
|
636 |
@type memory: string
|
637 |
@param memory: the memory size of the instance
|
638 |
@type vcpus: string
|
639 |
@param vcpus: the count of VCPUs the instance has
|
640 |
@type nics: list
|
641 |
@param nics: list of tuples (ip, mac, mode, link) representing
|
642 |
the NICs the instance has
|
643 |
@type disk_template: string
|
644 |
@param disk_template: the disk template of the instance
|
645 |
@type disks: list
|
646 |
@param disks: the list of (size, mode) pairs
|
647 |
@type bep: dict
|
648 |
@param bep: the backend parameters for the instance
|
649 |
@type hvp: dict
|
650 |
@param hvp: the hypervisor parameters for the instance
|
651 |
@type hypervisor_name: string
|
652 |
@param hypervisor_name: the hypervisor for the instance
|
653 |
@rtype: dict
|
654 |
@return: the hook environment for this instance
|
655 |
|
656 |
"""
|
657 |
if status:
|
658 |
str_status = "up"
|
659 |
else:
|
660 |
str_status = "down"
|
661 |
env = { |
662 |
"OP_TARGET": name,
|
663 |
"INSTANCE_NAME": name,
|
664 |
"INSTANCE_PRIMARY": primary_node,
|
665 |
"INSTANCE_SECONDARIES": " ".join(secondary_nodes), |
666 |
"INSTANCE_OS_TYPE": os_type,
|
667 |
"INSTANCE_STATUS": str_status,
|
668 |
"INSTANCE_MEMORY": memory,
|
669 |
"INSTANCE_VCPUS": vcpus,
|
670 |
"INSTANCE_DISK_TEMPLATE": disk_template,
|
671 |
"INSTANCE_HYPERVISOR": hypervisor_name,
|
672 |
} |
673 |
|
674 |
if nics:
|
675 |
nic_count = len(nics)
|
676 |
for idx, (ip, mac, mode, link) in enumerate(nics): |
677 |
if ip is None: |
678 |
ip = ""
|
679 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
680 |
env["INSTANCE_NIC%d_MAC" % idx] = mac
|
681 |
env["INSTANCE_NIC%d_MODE" % idx] = mode
|
682 |
env["INSTANCE_NIC%d_LINK" % idx] = link
|
683 |
if mode == constants.NIC_MODE_BRIDGED:
|
684 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = link
|
685 |
else:
|
686 |
nic_count = 0
|
687 |
|
688 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
689 |
|
690 |
if disks:
|
691 |
disk_count = len(disks)
|
692 |
for idx, (size, mode) in enumerate(disks): |
693 |
env["INSTANCE_DISK%d_SIZE" % idx] = size
|
694 |
env["INSTANCE_DISK%d_MODE" % idx] = mode
|
695 |
else:
|
696 |
disk_count = 0
|
697 |
|
698 |
env["INSTANCE_DISK_COUNT"] = disk_count
|
699 |
|
700 |
for source, kind in [(bep, "BE"), (hvp, "HV")]: |
701 |
for key, value in source.items(): |
702 |
env["INSTANCE_%s_%s" % (kind, key)] = value
|
703 |
|
704 |
return env
|
705 |
|
706 |
|
707 |
def _NICListToTuple(lu, nics): |
708 |
"""Build a list of nic information tuples.
|
709 |
|
710 |
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
|
711 |
value in LUQueryInstanceData.
|
712 |
|
713 |
@type lu: L{LogicalUnit}
|
714 |
@param lu: the logical unit on whose behalf we execute
|
715 |
@type nics: list of L{objects.NIC}
|
716 |
@param nics: list of nics to convert to hooks tuples
|
717 |
|
718 |
"""
|
719 |
hooks_nics = [] |
720 |
c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT] |
721 |
for nic in nics: |
722 |
ip = nic.ip |
723 |
mac = nic.mac |
724 |
filled_params = objects.FillDict(c_nicparams, nic.nicparams) |
725 |
mode = filled_params[constants.NIC_MODE] |
726 |
link = filled_params[constants.NIC_LINK] |
727 |
hooks_nics.append((ip, mac, mode, link)) |
728 |
return hooks_nics
|
729 |
|
730 |
|
731 |
def _BuildInstanceHookEnvByObject(lu, instance, override=None): |
732 |
"""Builds instance related env variables for hooks from an object.
|
733 |
|
734 |
@type lu: L{LogicalUnit}
|
735 |
@param lu: the logical unit on whose behalf we execute
|
736 |
@type instance: L{objects.Instance}
|
737 |
@param instance: the instance for which we should build the
|
738 |
environment
|
739 |
@type override: dict
|
740 |
@param override: dictionary with key/values that will override
|
741 |
our values
|
742 |
@rtype: dict
|
743 |
@return: the hook environment dictionary
|
744 |
|
745 |
"""
|
746 |
cluster = lu.cfg.GetClusterInfo() |
747 |
bep = cluster.FillBE(instance) |
748 |
hvp = cluster.FillHV(instance) |
749 |
args = { |
750 |
'name': instance.name,
|
751 |
'primary_node': instance.primary_node,
|
752 |
'secondary_nodes': instance.secondary_nodes,
|
753 |
'os_type': instance.os,
|
754 |
'status': instance.admin_up,
|
755 |
'memory': bep[constants.BE_MEMORY],
|
756 |
'vcpus': bep[constants.BE_VCPUS],
|
757 |
'nics': _NICListToTuple(lu, instance.nics),
|
758 |
'disk_template': instance.disk_template,
|
759 |
'disks': [(disk.size, disk.mode) for disk in instance.disks], |
760 |
'bep': bep,
|
761 |
'hvp': hvp,
|
762 |
'hypervisor_name': instance.hypervisor,
|
763 |
} |
764 |
if override:
|
765 |
args.update(override) |
766 |
return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142 |
767 |
|
768 |
|
769 |
def _AdjustCandidatePool(lu, exceptions): |
770 |
"""Adjust the candidate pool after node operations.
|
771 |
|
772 |
"""
|
773 |
mod_list = lu.cfg.MaintainCandidatePool(exceptions) |
774 |
if mod_list:
|
775 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
776 |
utils.CommaJoin(node.name for node in mod_list)) |
777 |
for name in mod_list: |
778 |
lu.context.ReaddNode(name) |
779 |
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
780 |
if mc_now > mc_max:
|
781 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
782 |
(mc_now, mc_max)) |
783 |
|
784 |
|
785 |
def _DecideSelfPromotion(lu, exceptions=None): |
786 |
"""Decide whether I should promote myself as a master candidate.
|
787 |
|
788 |
"""
|
789 |
cp_size = lu.cfg.GetClusterInfo().candidate_pool_size |
790 |
mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
791 |
# the new node will increase mc_max with one, so:
|
792 |
mc_should = min(mc_should + 1, cp_size) |
793 |
return mc_now < mc_should
|
794 |
|
795 |
|
796 |
def _CheckNicsBridgesExist(lu, target_nics, target_node, |
797 |
profile=constants.PP_DEFAULT): |
798 |
"""Check that the brigdes needed by a list of nics exist.
|
799 |
|
800 |
"""
|
801 |
c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile] |
802 |
paramslist = [objects.FillDict(c_nicparams, nic.nicparams) |
803 |
for nic in target_nics] |
804 |
brlist = [params[constants.NIC_LINK] for params in paramslist |
805 |
if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
|
806 |
if brlist:
|
807 |
result = lu.rpc.call_bridges_exist(target_node, brlist) |
808 |
result.Raise("Error checking bridges on destination node '%s'" %
|
809 |
target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
|
810 |
|
811 |
|
812 |
def _CheckInstanceBridgesExist(lu, instance, node=None): |
813 |
"""Check that the brigdes needed by an instance exist.
|
814 |
|
815 |
"""
|
816 |
if node is None: |
817 |
node = instance.primary_node |
818 |
_CheckNicsBridgesExist(lu, instance.nics, node) |
819 |
|
820 |
|
821 |
def _CheckOSVariant(os_obj, name): |
822 |
"""Check whether an OS name conforms to the os variants specification.
|
823 |
|
824 |
@type os_obj: L{objects.OS}
|
825 |
@param os_obj: OS object to check
|
826 |
@type name: string
|
827 |
@param name: OS name passed by the user, to check for validity
|
828 |
|
829 |
"""
|
830 |
if not os_obj.supported_variants: |
831 |
return
|
832 |
try:
|
833 |
variant = name.split("+", 1)[1] |
834 |
except IndexError: |
835 |
raise errors.OpPrereqError("OS name must include a variant", |
836 |
errors.ECODE_INVAL) |
837 |
|
838 |
if variant not in os_obj.supported_variants: |
839 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
840 |
|
841 |
|
842 |
def _GetNodeInstancesInner(cfg, fn): |
843 |
return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)] |
844 |
|
845 |
|
846 |
def _GetNodeInstances(cfg, node_name): |
847 |
"""Returns a list of all primary and secondary instances on a node.
|
848 |
|
849 |
"""
|
850 |
|
851 |
return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes) |
852 |
|
853 |
|
854 |
def _GetNodePrimaryInstances(cfg, node_name): |
855 |
"""Returns primary instances on a node.
|
856 |
|
857 |
"""
|
858 |
return _GetNodeInstancesInner(cfg,
|
859 |
lambda inst: node_name == inst.primary_node)
|
860 |
|
861 |
|
862 |
def _GetNodeSecondaryInstances(cfg, node_name): |
863 |
"""Returns secondary instances on a node.
|
864 |
|
865 |
"""
|
866 |
return _GetNodeInstancesInner(cfg,
|
867 |
lambda inst: node_name in inst.secondary_nodes) |
868 |
|
869 |
|
870 |
def _GetStorageTypeArgs(cfg, storage_type): |
871 |
"""Returns the arguments for a storage type.
|
872 |
|
873 |
"""
|
874 |
# Special case for file storage
|
875 |
if storage_type == constants.ST_FILE:
|
876 |
# storage.FileStorage wants a list of storage directories
|
877 |
return [[cfg.GetFileStorageDir()]]
|
878 |
|
879 |
return []
|
880 |
|
881 |
|
882 |
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq): |
883 |
faulty = [] |
884 |
|
885 |
for dev in instance.disks: |
886 |
cfg.SetDiskID(dev, node_name) |
887 |
|
888 |
result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks) |
889 |
result.Raise("Failed to get disk status from node %s" % node_name,
|
890 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
891 |
|
892 |
for idx, bdev_status in enumerate(result.payload): |
893 |
if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY: |
894 |
faulty.append(idx) |
895 |
|
896 |
return faulty
|
897 |
|
898 |
|
899 |
def _FormatTimestamp(secs): |
900 |
"""Formats a Unix timestamp with the local timezone.
|
901 |
|
902 |
"""
|
903 |
return time.strftime("%F %T %Z", time.gmtime(secs)) |
904 |
|
905 |
|
906 |
class LUPostInitCluster(LogicalUnit): |
907 |
"""Logical unit for running hooks after cluster initialization.
|
908 |
|
909 |
"""
|
910 |
HPATH = "cluster-init"
|
911 |
HTYPE = constants.HTYPE_CLUSTER |
912 |
_OP_REQP = [] |
913 |
|
914 |
def BuildHooksEnv(self): |
915 |
"""Build hooks env.
|
916 |
|
917 |
"""
|
918 |
env = {"OP_TARGET": self.cfg.GetClusterName()} |
919 |
mn = self.cfg.GetMasterNode()
|
920 |
return env, [], [mn]
|
921 |
|
922 |
def CheckPrereq(self): |
923 |
"""No prerequisites to check.
|
924 |
|
925 |
"""
|
926 |
return True |
927 |
|
928 |
def Exec(self, feedback_fn): |
929 |
"""Nothing to do.
|
930 |
|
931 |
"""
|
932 |
return True |
933 |
|
934 |
|
935 |
class LUDestroyCluster(LogicalUnit): |
936 |
"""Logical unit for destroying the cluster.
|
937 |
|
938 |
"""
|
939 |
HPATH = "cluster-destroy"
|
940 |
HTYPE = constants.HTYPE_CLUSTER |
941 |
_OP_REQP = [] |
942 |
|
943 |
def BuildHooksEnv(self): |
944 |
"""Build hooks env.
|
945 |
|
946 |
"""
|
947 |
env = {"OP_TARGET": self.cfg.GetClusterName()} |
948 |
return env, [], []
|
949 |
|
950 |
def CheckPrereq(self): |
951 |
"""Check prerequisites.
|
952 |
|
953 |
This checks whether the cluster is empty.
|
954 |
|
955 |
Any errors are signaled by raising errors.OpPrereqError.
|
956 |
|
957 |
"""
|
958 |
master = self.cfg.GetMasterNode()
|
959 |
|
960 |
nodelist = self.cfg.GetNodeList()
|
961 |
if len(nodelist) != 1 or nodelist[0] != master: |
962 |
raise errors.OpPrereqError("There are still %d node(s) in" |
963 |
" this cluster." % (len(nodelist) - 1), |
964 |
errors.ECODE_INVAL) |
965 |
instancelist = self.cfg.GetInstanceList()
|
966 |
if instancelist:
|
967 |
raise errors.OpPrereqError("There are still %d instance(s) in" |
968 |
" this cluster." % len(instancelist), |
969 |
errors.ECODE_INVAL) |
970 |
|
971 |
def Exec(self, feedback_fn): |
972 |
"""Destroys the cluster.
|
973 |
|
974 |
"""
|
975 |
master = self.cfg.GetMasterNode()
|
976 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
|
977 |
|
978 |
# Run post hooks on master node before it's removed
|
979 |
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) |
980 |
try:
|
981 |
hm.RunPhase(constants.HOOKS_PHASE_POST, [master]) |
982 |
except:
|
983 |
# pylint: disable-msg=W0702
|
984 |
self.LogWarning("Errors occurred running hooks on %s" % master) |
985 |
|
986 |
result = self.rpc.call_node_stop_master(master, False) |
987 |
result.Raise("Could not disable the master role")
|
988 |
|
989 |
if modify_ssh_setup:
|
990 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
991 |
utils.CreateBackup(priv_key) |
992 |
utils.CreateBackup(pub_key) |
993 |
|
994 |
return master
|
995 |
|
996 |
|
997 |
def _VerifyCertificateInner(filename, expired, not_before, not_after, now, |
998 |
warn_days=constants.SSL_CERT_EXPIRATION_WARN, |
999 |
error_days=constants.SSL_CERT_EXPIRATION_ERROR): |
1000 |
"""Verifies certificate details for LUVerifyCluster.
|
1001 |
|
1002 |
"""
|
1003 |
if expired:
|
1004 |
msg = "Certificate %s is expired" % filename
|
1005 |
|
1006 |
if not_before is not None and not_after is not None: |
1007 |
msg += (" (valid from %s to %s)" %
|
1008 |
(_FormatTimestamp(not_before), |
1009 |
_FormatTimestamp(not_after))) |
1010 |
elif not_before is not None: |
1011 |
msg += " (valid from %s)" % _FormatTimestamp(not_before)
|
1012 |
elif not_after is not None: |
1013 |
msg += " (valid until %s)" % _FormatTimestamp(not_after)
|
1014 |
|
1015 |
return (LUVerifyCluster.ETYPE_ERROR, msg)
|
1016 |
|
1017 |
elif not_before is not None and not_before > now: |
1018 |
return (LUVerifyCluster.ETYPE_WARNING,
|
1019 |
"Certificate %s not yet valid (valid from %s)" %
|
1020 |
(filename, _FormatTimestamp(not_before))) |
1021 |
|
1022 |
elif not_after is not None: |
1023 |
remaining_days = int((not_after - now) / (24 * 3600)) |
1024 |
|
1025 |
msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
|
1026 |
|
1027 |
if remaining_days <= error_days:
|
1028 |
return (LUVerifyCluster.ETYPE_ERROR, msg)
|
1029 |
|
1030 |
if remaining_days <= warn_days:
|
1031 |
return (LUVerifyCluster.ETYPE_WARNING, msg)
|
1032 |
|
1033 |
return (None, None) |
1034 |
|
1035 |
|
1036 |
def _VerifyCertificate(filename): |
1037 |
"""Verifies a certificate for LUVerifyCluster.
|
1038 |
|
1039 |
@type filename: string
|
1040 |
@param filename: Path to PEM file
|
1041 |
|
1042 |
"""
|
1043 |
try:
|
1044 |
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, |
1045 |
utils.ReadFile(filename)) |
1046 |
except Exception, err: # pylint: disable-msg=W0703 |
1047 |
return (LUVerifyCluster.ETYPE_ERROR,
|
1048 |
"Failed to load X509 certificate %s: %s" % (filename, err))
|
1049 |
|
1050 |
# Depending on the pyOpenSSL version, this can just return (None, None)
|
1051 |
(not_before, not_after) = utils.GetX509CertValidity(cert) |
1052 |
|
1053 |
return _VerifyCertificateInner(filename, cert.has_expired(),
|
1054 |
not_before, not_after, time.time()) |
1055 |
|
1056 |
|
1057 |
class LUVerifyCluster(LogicalUnit): |
1058 |
"""Verifies the cluster status.
|
1059 |
|
1060 |
"""
|
1061 |
HPATH = "cluster-verify"
|
1062 |
HTYPE = constants.HTYPE_CLUSTER |
1063 |
_OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"] |
1064 |
REQ_BGL = False
|
1065 |
|
1066 |
TCLUSTER = "cluster"
|
1067 |
TNODE = "node"
|
1068 |
TINSTANCE = "instance"
|
1069 |
|
1070 |
ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
|
1071 |
ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
|
1072 |
EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
|
1073 |
EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
|
1074 |
EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
|
1075 |
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
|
1076 |
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
|
1077 |
EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
|
1078 |
ENODEDRBD = (TNODE, "ENODEDRBD")
|
1079 |
ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
|
1080 |
ENODEHOOKS = (TNODE, "ENODEHOOKS")
|
1081 |
ENODEHV = (TNODE, "ENODEHV")
|
1082 |
ENODELVM = (TNODE, "ENODELVM")
|
1083 |
ENODEN1 = (TNODE, "ENODEN1")
|
1084 |
ENODENET = (TNODE, "ENODENET")
|
1085 |
ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
|
1086 |
ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
|
1087 |
ENODERPC = (TNODE, "ENODERPC")
|
1088 |
ENODESSH = (TNODE, "ENODESSH")
|
1089 |
ENODEVERSION = (TNODE, "ENODEVERSION")
|
1090 |
ENODESETUP = (TNODE, "ENODESETUP")
|
1091 |
ENODETIME = (TNODE, "ENODETIME")
|
1092 |
|
1093 |
ETYPE_FIELD = "code"
|
1094 |
ETYPE_ERROR = "ERROR"
|
1095 |
ETYPE_WARNING = "WARNING"
|
1096 |
|
1097 |
class NodeImage(object): |
1098 |
"""A class representing the logical and physical status of a node.
|
1099 |
|
1100 |
@ivar volumes: a structure as returned from
|
1101 |
L{ganeti.backend.GetVolumeList} (runtime)
|
1102 |
@ivar instances: a list of running instances (runtime)
|
1103 |
@ivar pinst: list of configured primary instances (config)
|
1104 |
@ivar sinst: list of configured secondary instances (config)
|
1105 |
@ivar sbp: diction of {secondary-node: list of instances} of all peers
|
1106 |
of this node (config)
|
1107 |
@ivar mfree: free memory, as reported by hypervisor (runtime)
|
1108 |
@ivar dfree: free disk, as reported by the node (runtime)
|
1109 |
@ivar offline: the offline status (config)
|
1110 |
@type rpc_fail: boolean
|
1111 |
@ivar rpc_fail: whether the RPC verify call was successfull (overall,
|
1112 |
not whether the individual keys were correct) (runtime)
|
1113 |
@type lvm_fail: boolean
|
1114 |
@ivar lvm_fail: whether the RPC call didn't return valid LVM data
|
1115 |
@type hyp_fail: boolean
|
1116 |
@ivar hyp_fail: whether the RPC call didn't return the instance list
|
1117 |
@type ghost: boolean
|
1118 |
@ivar ghost: whether this is a known node or not (config)
|
1119 |
|
1120 |
"""
|
1121 |
def __init__(self, offline=False): |
1122 |
self.volumes = {}
|
1123 |
self.instances = []
|
1124 |
self.pinst = []
|
1125 |
self.sinst = []
|
1126 |
self.sbp = {}
|
1127 |
self.mfree = 0 |
1128 |
self.dfree = 0 |
1129 |
self.offline = offline
|
1130 |
self.rpc_fail = False |
1131 |
self.lvm_fail = False |
1132 |
self.hyp_fail = False |
1133 |
self.ghost = False |
1134 |
|
1135 |
def ExpandNames(self): |
1136 |
self.needed_locks = {
|
1137 |
locking.LEVEL_NODE: locking.ALL_SET, |
1138 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1139 |
} |
1140 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
1141 |
|
1142 |
def _Error(self, ecode, item, msg, *args, **kwargs): |
1143 |
"""Format an error message.
|
1144 |
|
1145 |
Based on the opcode's error_codes parameter, either format a
|
1146 |
parseable error code, or a simpler error string.
|
1147 |
|
1148 |
This must be called only from Exec and functions called from Exec.
|
1149 |
|
1150 |
"""
|
1151 |
ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) |
1152 |
itype, etxt = ecode |
1153 |
# first complete the msg
|
1154 |
if args:
|
1155 |
msg = msg % args |
1156 |
# then format the whole message
|
1157 |
if self.op.error_codes: |
1158 |
msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
|
1159 |
else:
|
1160 |
if item:
|
1161 |
item = " " + item
|
1162 |
else:
|
1163 |
item = ""
|
1164 |
msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
|
1165 |
# and finally report it via the feedback_fn
|
1166 |
self._feedback_fn(" - %s" % msg) |
1167 |
|
1168 |
def _ErrorIf(self, cond, *args, **kwargs): |
1169 |
"""Log an error message if the passed condition is True.
|
1170 |
|
1171 |
"""
|
1172 |
cond = bool(cond) or self.op.debug_simulate_errors |
1173 |
if cond:
|
1174 |
self._Error(*args, **kwargs)
|
1175 |
# do not mark the operation as failed for WARN cases only
|
1176 |
if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR: |
1177 |
self.bad = self.bad or cond |
1178 |
|
1179 |
def _VerifyNode(self, ninfo, nresult): |
1180 |
"""Run multiple tests against a node.
|
1181 |
|
1182 |
Test list:
|
1183 |
|
1184 |
- compares ganeti version
|
1185 |
- checks vg existence and size > 20G
|
1186 |
- checks config file checksum
|
1187 |
- checks ssh to other nodes
|
1188 |
|
1189 |
@type ninfo: L{objects.Node}
|
1190 |
@param ninfo: the node to check
|
1191 |
@param nresult: the results from the node
|
1192 |
@rtype: boolean
|
1193 |
@return: whether overall this call was successful (and we can expect
|
1194 |
reasonable values in the respose)
|
1195 |
|
1196 |
"""
|
1197 |
node = ninfo.name |
1198 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1199 |
|
1200 |
# main result, nresult should be a non-empty dict
|
1201 |
test = not nresult or not isinstance(nresult, dict) |
1202 |
_ErrorIf(test, self.ENODERPC, node,
|
1203 |
"unable to verify node: no data returned")
|
1204 |
if test:
|
1205 |
return False |
1206 |
|
1207 |
# compares ganeti version
|
1208 |
local_version = constants.PROTOCOL_VERSION |
1209 |
remote_version = nresult.get("version", None) |
1210 |
test = not (remote_version and |
1211 |
isinstance(remote_version, (list, tuple)) and |
1212 |
len(remote_version) == 2) |
1213 |
_ErrorIf(test, self.ENODERPC, node,
|
1214 |
"connection to node returned invalid data")
|
1215 |
if test:
|
1216 |
return False |
1217 |
|
1218 |
test = local_version != remote_version[0]
|
1219 |
_ErrorIf(test, self.ENODEVERSION, node,
|
1220 |
"incompatible protocol versions: master %s,"
|
1221 |
" node %s", local_version, remote_version[0]) |
1222 |
if test:
|
1223 |
return False |
1224 |
|
1225 |
# node seems compatible, we can actually try to look into its results
|
1226 |
|
1227 |
# full package version
|
1228 |
self._ErrorIf(constants.RELEASE_VERSION != remote_version[1], |
1229 |
self.ENODEVERSION, node,
|
1230 |
"software version mismatch: master %s, node %s",
|
1231 |
constants.RELEASE_VERSION, remote_version[1],
|
1232 |
code=self.ETYPE_WARNING)
|
1233 |
|
1234 |
hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
|
1235 |
if isinstance(hyp_result, dict): |
1236 |
for hv_name, hv_result in hyp_result.iteritems(): |
1237 |
test = hv_result is not None |
1238 |
_ErrorIf(test, self.ENODEHV, node,
|
1239 |
"hypervisor %s verify failure: '%s'", hv_name, hv_result)
|
1240 |
|
1241 |
|
1242 |
test = nresult.get(constants.NV_NODESETUP, |
1243 |
["Missing NODESETUP results"])
|
1244 |
_ErrorIf(test, self.ENODESETUP, node, "node setup error: %s", |
1245 |
"; ".join(test))
|
1246 |
|
1247 |
return True |
1248 |
|
1249 |
def _VerifyNodeTime(self, ninfo, nresult, |
1250 |
nvinfo_starttime, nvinfo_endtime): |
1251 |
"""Check the node time.
|
1252 |
|
1253 |
@type ninfo: L{objects.Node}
|
1254 |
@param ninfo: the node to check
|
1255 |
@param nresult: the remote results for the node
|
1256 |
@param nvinfo_starttime: the start time of the RPC call
|
1257 |
@param nvinfo_endtime: the end time of the RPC call
|
1258 |
|
1259 |
"""
|
1260 |
node = ninfo.name |
1261 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1262 |
|
1263 |
ntime = nresult.get(constants.NV_TIME, None)
|
1264 |
try:
|
1265 |
ntime_merged = utils.MergeTime(ntime) |
1266 |
except (ValueError, TypeError): |
1267 |
_ErrorIf(True, self.ENODETIME, node, "Node returned invalid time") |
1268 |
return
|
1269 |
|
1270 |
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
|
1271 |
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged) |
1272 |
elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
|
1273 |
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime) |
1274 |
else:
|
1275 |
ntime_diff = None
|
1276 |
|
1277 |
_ErrorIf(ntime_diff is not None, self.ENODETIME, node, |
1278 |
"Node time diverges by at least %s from master node time",
|
1279 |
ntime_diff) |
1280 |
|
1281 |
def _VerifyNodeLVM(self, ninfo, nresult, vg_name): |
1282 |
"""Check the node time.
|
1283 |
|
1284 |
@type ninfo: L{objects.Node}
|
1285 |
@param ninfo: the node to check
|
1286 |
@param nresult: the remote results for the node
|
1287 |
@param vg_name: the configured VG name
|
1288 |
|
1289 |
"""
|
1290 |
if vg_name is None: |
1291 |
return
|
1292 |
|
1293 |
node = ninfo.name |
1294 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1295 |
|
1296 |
# checks vg existence and size > 20G
|
1297 |
vglist = nresult.get(constants.NV_VGLIST, None)
|
1298 |
test = not vglist
|
1299 |
_ErrorIf(test, self.ENODELVM, node, "unable to check volume groups") |
1300 |
if not test: |
1301 |
vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name, |
1302 |
constants.MIN_VG_SIZE) |
1303 |
_ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
|
1304 |
|
1305 |
# check pv names
|
1306 |
pvlist = nresult.get(constants.NV_PVLIST, None)
|
1307 |
test = pvlist is None |
1308 |
_ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node") |
1309 |
if not test: |
1310 |
# check that ':' is not present in PV names, since it's a
|
1311 |
# special character for lvcreate (denotes the range of PEs to
|
1312 |
# use on the PV)
|
1313 |
for _, pvname, owner_vg in pvlist: |
1314 |
test = ":" in pvname |
1315 |
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV" |
1316 |
" '%s' of VG '%s'", pvname, owner_vg)
|
1317 |
|
1318 |
def _VerifyNodeNetwork(self, ninfo, nresult): |
1319 |
"""Check the node time.
|
1320 |
|
1321 |
@type ninfo: L{objects.Node}
|
1322 |
@param ninfo: the node to check
|
1323 |
@param nresult: the remote results for the node
|
1324 |
|
1325 |
"""
|
1326 |
node = ninfo.name |
1327 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1328 |
|
1329 |
test = constants.NV_NODELIST not in nresult |
1330 |
_ErrorIf(test, self.ENODESSH, node,
|
1331 |
"node hasn't returned node ssh connectivity data")
|
1332 |
if not test: |
1333 |
if nresult[constants.NV_NODELIST]:
|
1334 |
for a_node, a_msg in nresult[constants.NV_NODELIST].items(): |
1335 |
_ErrorIf(True, self.ENODESSH, node, |
1336 |
"ssh communication with node '%s': %s", a_node, a_msg)
|
1337 |
|
1338 |
test = constants.NV_NODENETTEST not in nresult |
1339 |
_ErrorIf(test, self.ENODENET, node,
|
1340 |
"node hasn't returned node tcp connectivity data")
|
1341 |
if not test: |
1342 |
if nresult[constants.NV_NODENETTEST]:
|
1343 |
nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys()) |
1344 |
for anode in nlist: |
1345 |
_ErrorIf(True, self.ENODENET, node, |
1346 |
"tcp communication with node '%s': %s",
|
1347 |
anode, nresult[constants.NV_NODENETTEST][anode]) |
1348 |
|
1349 |
def _VerifyInstance(self, instance, instanceconfig, node_image): |
1350 |
"""Verify an instance.
|
1351 |
|
1352 |
This function checks to see if the required block devices are
|
1353 |
available on the instance's node.
|
1354 |
|
1355 |
"""
|
1356 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1357 |
node_current = instanceconfig.primary_node |
1358 |
|
1359 |
node_vol_should = {} |
1360 |
instanceconfig.MapLVsByNode(node_vol_should) |
1361 |
|
1362 |
for node in node_vol_should: |
1363 |
n_img = node_image[node] |
1364 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
1365 |
# ignore missing volumes on offline or broken nodes
|
1366 |
continue
|
1367 |
for volume in node_vol_should[node]: |
1368 |
test = volume not in n_img.volumes |
1369 |
_ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
|
1370 |
"volume %s missing on node %s", volume, node)
|
1371 |
|
1372 |
if instanceconfig.admin_up:
|
1373 |
pri_img = node_image[node_current] |
1374 |
test = instance not in pri_img.instances and not pri_img.offline |
1375 |
_ErrorIf(test, self.EINSTANCEDOWN, instance,
|
1376 |
"instance not running on its primary node %s",
|
1377 |
node_current) |
1378 |
|
1379 |
for node, n_img in node_image.items(): |
1380 |
if (not node == node_current): |
1381 |
test = instance in n_img.instances
|
1382 |
_ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
|
1383 |
"instance should not run on node %s", node)
|
1384 |
|
1385 |
def _VerifyOrphanVolumes(self, node_vol_should, node_image): |
1386 |
"""Verify if there are any unknown volumes in the cluster.
|
1387 |
|
1388 |
The .os, .swap and backup volumes are ignored. All other volumes are
|
1389 |
reported as unknown.
|
1390 |
|
1391 |
"""
|
1392 |
for node, n_img in node_image.items(): |
1393 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
1394 |
# skip non-healthy nodes
|
1395 |
continue
|
1396 |
for volume in n_img.volumes: |
1397 |
test = (node not in node_vol_should or |
1398 |
volume not in node_vol_should[node]) |
1399 |
self._ErrorIf(test, self.ENODEORPHANLV, node, |
1400 |
"volume %s is unknown", volume)
|
1401 |
|
1402 |
def _VerifyOrphanInstances(self, instancelist, node_image): |
1403 |
"""Verify the list of running instances.
|
1404 |
|
1405 |
This checks what instances are running but unknown to the cluster.
|
1406 |
|
1407 |
"""
|
1408 |
for node, n_img in node_image.items(): |
1409 |
for o_inst in n_img.instances: |
1410 |
test = o_inst not in instancelist |
1411 |
self._ErrorIf(test, self.ENODEORPHANINSTANCE, node, |
1412 |
"instance %s on node %s should not exist", o_inst, node)
|
1413 |
|
1414 |
def _VerifyNPlusOneMemory(self, node_image, instance_cfg): |
1415 |
"""Verify N+1 Memory Resilience.
|
1416 |
|
1417 |
Check that if one single node dies we can still start all the
|
1418 |
instances it was primary for.
|
1419 |
|
1420 |
"""
|
1421 |
for node, n_img in node_image.items(): |
1422 |
# This code checks that every node which is now listed as
|
1423 |
# secondary has enough memory to host all instances it is
|
1424 |
# supposed to should a single other node in the cluster fail.
|
1425 |
# FIXME: not ready for failover to an arbitrary node
|
1426 |
# FIXME: does not support file-backed instances
|
1427 |
# WARNING: we currently take into account down instances as well
|
1428 |
# as up ones, considering that even if they're down someone
|
1429 |
# might want to start them even in the event of a node failure.
|
1430 |
for prinode, instances in n_img.sbp.items(): |
1431 |
needed_mem = 0
|
1432 |
for instance in instances: |
1433 |
bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
|
1434 |
if bep[constants.BE_AUTO_BALANCE]:
|
1435 |
needed_mem += bep[constants.BE_MEMORY] |
1436 |
test = n_img.mfree < needed_mem |
1437 |
self._ErrorIf(test, self.ENODEN1, node, |
1438 |
"not enough memory on to accommodate"
|
1439 |
" failovers should peer node %s fail", prinode)
|
1440 |
|
1441 |
def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum, |
1442 |
master_files): |
1443 |
"""Verifies and computes the node required file checksums.
|
1444 |
|
1445 |
@type ninfo: L{objects.Node}
|
1446 |
@param ninfo: the node to check
|
1447 |
@param nresult: the remote results for the node
|
1448 |
@param file_list: required list of files
|
1449 |
@param local_cksum: dictionary of local files and their checksums
|
1450 |
@param master_files: list of files that only masters should have
|
1451 |
|
1452 |
"""
|
1453 |
node = ninfo.name |
1454 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1455 |
|
1456 |
remote_cksum = nresult.get(constants.NV_FILELIST, None)
|
1457 |
test = not isinstance(remote_cksum, dict) |
1458 |
_ErrorIf(test, self.ENODEFILECHECK, node,
|
1459 |
"node hasn't returned file checksum data")
|
1460 |
if test:
|
1461 |
return
|
1462 |
|
1463 |
for file_name in file_list: |
1464 |
node_is_mc = ninfo.master_candidate |
1465 |
must_have = (file_name not in master_files) or node_is_mc |
1466 |
# missing
|
1467 |
test1 = file_name not in remote_cksum |
1468 |
# invalid checksum
|
1469 |
test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name] |
1470 |
# existing and good
|
1471 |
test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name] |
1472 |
_ErrorIf(test1 and must_have, self.ENODEFILECHECK, node, |
1473 |
"file '%s' missing", file_name)
|
1474 |
_ErrorIf(test2 and must_have, self.ENODEFILECHECK, node, |
1475 |
"file '%s' has wrong checksum", file_name)
|
1476 |
# not candidate and this is not a must-have file
|
1477 |
_ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node, |
1478 |
"file '%s' should not exist on non master"
|
1479 |
" candidates (and the file is outdated)", file_name)
|
1480 |
# all good, except non-master/non-must have combination
|
1481 |
_ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node, |
1482 |
"file '%s' should not exist"
|
1483 |
" on non master candidates", file_name)
|
1484 |
|
1485 |
def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map): |
1486 |
"""Verifies and the node DRBD status.
|
1487 |
|
1488 |
@type ninfo: L{objects.Node}
|
1489 |
@param ninfo: the node to check
|
1490 |
@param nresult: the remote results for the node
|
1491 |
@param instanceinfo: the dict of instances
|
1492 |
@param drbd_map: the DRBD map as returned by
|
1493 |
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
|
1494 |
|
1495 |
"""
|
1496 |
node = ninfo.name |
1497 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1498 |
|
1499 |
# compute the DRBD minors
|
1500 |
node_drbd = {} |
1501 |
for minor, instance in drbd_map[node].items(): |
1502 |
test = instance not in instanceinfo |
1503 |
_ErrorIf(test, self.ECLUSTERCFG, None, |
1504 |
"ghost instance '%s' in temporary DRBD map", instance)
|
1505 |
# ghost instance should not be running, but otherwise we
|
1506 |
# don't give double warnings (both ghost instance and
|
1507 |
# unallocated minor in use)
|
1508 |
if test:
|
1509 |
node_drbd[minor] = (instance, False)
|
1510 |
else:
|
1511 |
instance = instanceinfo[instance] |
1512 |
node_drbd[minor] = (instance.name, instance.admin_up) |
1513 |
|
1514 |
# and now check them
|
1515 |
used_minors = nresult.get(constants.NV_DRBDLIST, []) |
1516 |
test = not isinstance(used_minors, (tuple, list)) |
1517 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1518 |
"cannot parse drbd status file: %s", str(used_minors)) |
1519 |
if test:
|
1520 |
# we cannot check drbd status
|
1521 |
return
|
1522 |
|
1523 |
for minor, (iname, must_exist) in node_drbd.items(): |
1524 |
test = minor not in used_minors and must_exist |
1525 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1526 |
"drbd minor %d of instance %s is not active", minor, iname)
|
1527 |
for minor in used_minors: |
1528 |
test = minor not in node_drbd |
1529 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1530 |
"unallocated drbd minor %d is in use", minor)
|
1531 |
|
1532 |
def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name): |
1533 |
"""Verifies and updates the node volume data.
|
1534 |
|
1535 |
This function will update a L{NodeImage}'s internal structures
|
1536 |
with data from the remote call.
|
1537 |
|
1538 |
@type ninfo: L{objects.Node}
|
1539 |
@param ninfo: the node to check
|
1540 |
@param nresult: the remote results for the node
|
1541 |
@param nimg: the node image object
|
1542 |
@param vg_name: the configured VG name
|
1543 |
|
1544 |
"""
|
1545 |
node = ninfo.name |
1546 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1547 |
|
1548 |
nimg.lvm_fail = True
|
1549 |
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
|
1550 |
if vg_name is None: |
1551 |
pass
|
1552 |
elif isinstance(lvdata, basestring): |
1553 |
_ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s", |
1554 |
utils.SafeEncode(lvdata)) |
1555 |
elif not isinstance(lvdata, dict): |
1556 |
_ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)") |
1557 |
else:
|
1558 |
nimg.volumes = lvdata |
1559 |
nimg.lvm_fail = False
|
1560 |
|
1561 |
def _UpdateNodeInstances(self, ninfo, nresult, nimg): |
1562 |
"""Verifies and updates the node instance list.
|
1563 |
|
1564 |
If the listing was successful, then updates this node's instance
|
1565 |
list. Otherwise, it marks the RPC call as failed for the instance
|
1566 |
list key.
|
1567 |
|
1568 |
@type ninfo: L{objects.Node}
|
1569 |
@param ninfo: the node to check
|
1570 |
@param nresult: the remote results for the node
|
1571 |
@param nimg: the node image object
|
1572 |
|
1573 |
"""
|
1574 |
idata = nresult.get(constants.NV_INSTANCELIST, None)
|
1575 |
test = not isinstance(idata, list) |
1576 |
self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed" |
1577 |
" (instancelist): %s", utils.SafeEncode(str(idata))) |
1578 |
if test:
|
1579 |
nimg.hyp_fail = True
|
1580 |
else:
|
1581 |
nimg.instances = idata |
1582 |
|
1583 |
def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name): |
1584 |
"""Verifies and computes a node information map
|
1585 |
|
1586 |
@type ninfo: L{objects.Node}
|
1587 |
@param ninfo: the node to check
|
1588 |
@param nresult: the remote results for the node
|
1589 |
@param nimg: the node image object
|
1590 |
@param vg_name: the configured VG name
|
1591 |
|
1592 |
"""
|
1593 |
node = ninfo.name |
1594 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1595 |
|
1596 |
# try to read free memory (from the hypervisor)
|
1597 |
hv_info = nresult.get(constants.NV_HVINFO, None)
|
1598 |
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info |
1599 |
_ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)") |
1600 |
if not test: |
1601 |
try:
|
1602 |
nimg.mfree = int(hv_info["memory_free"]) |
1603 |
except (ValueError, TypeError): |
1604 |
_ErrorIf(True, self.ENODERPC, node, |
1605 |
"node returned invalid nodeinfo, check hypervisor")
|
1606 |
|
1607 |
# FIXME: devise a free space model for file based instances as well
|
1608 |
if vg_name is not None: |
1609 |
test = (constants.NV_VGLIST not in nresult or |
1610 |
vg_name not in nresult[constants.NV_VGLIST]) |
1611 |
_ErrorIf(test, self.ENODELVM, node,
|
1612 |
"node didn't return data for the volume group '%s'"
|
1613 |
" - it is either missing or broken", vg_name)
|
1614 |
if not test: |
1615 |
try:
|
1616 |
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
|
1617 |
except (ValueError, TypeError): |
1618 |
_ErrorIf(True, self.ENODERPC, node, |
1619 |
"node returned invalid LVM info, check LVM status")
|
1620 |
|
1621 |
def CheckPrereq(self): |
1622 |
"""Check prerequisites.
|
1623 |
|
1624 |
Transform the list of checks we're going to skip into a set and check that
|
1625 |
all its members are valid.
|
1626 |
|
1627 |
"""
|
1628 |
self.skip_set = frozenset(self.op.skip_checks) |
1629 |
if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set): |
1630 |
raise errors.OpPrereqError("Invalid checks to be skipped specified", |
1631 |
errors.ECODE_INVAL) |
1632 |
|
1633 |
def BuildHooksEnv(self): |
1634 |
"""Build hooks env.
|
1635 |
|
1636 |
Cluster-Verify hooks just ran in the post phase and their failure makes
|
1637 |
the output be logged in the verify output and the verification to fail.
|
1638 |
|
1639 |
"""
|
1640 |
all_nodes = self.cfg.GetNodeList()
|
1641 |
env = { |
1642 |
"CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()) |
1643 |
} |
1644 |
for node in self.cfg.GetAllNodesInfo().values(): |
1645 |
env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags()) |
1646 |
|
1647 |
return env, [], all_nodes
|
1648 |
|
1649 |
def Exec(self, feedback_fn): |
1650 |
"""Verify integrity of cluster, performing various test on nodes.
|
1651 |
|
1652 |
"""
|
1653 |
self.bad = False |
1654 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1655 |
verbose = self.op.verbose
|
1656 |
self._feedback_fn = feedback_fn
|
1657 |
feedback_fn("* Verifying global settings")
|
1658 |
for msg in self.cfg.VerifyConfig(): |
1659 |
_ErrorIf(True, self.ECLUSTERCFG, None, msg) |
1660 |
|
1661 |
# Check the cluster certificates
|
1662 |
for cert_filename in constants.ALL_CERT_FILES: |
1663 |
(errcode, msg) = _VerifyCertificate(cert_filename) |
1664 |
_ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode) |
1665 |
|
1666 |
vg_name = self.cfg.GetVGName()
|
1667 |
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
|
1668 |
nodelist = utils.NiceSort(self.cfg.GetNodeList())
|
1669 |
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist] |
1670 |
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
|
1671 |
instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname)) |
1672 |
for iname in instancelist) |
1673 |
i_non_redundant = [] # Non redundant instances
|
1674 |
i_non_a_balanced = [] # Non auto-balanced instances
|
1675 |
n_offline = 0 # Count of offline nodes |
1676 |
n_drained = 0 # Count of nodes being drained |
1677 |
node_vol_should = {} |
1678 |
|
1679 |
# FIXME: verify OS list
|
1680 |
# do local checksums
|
1681 |
master_files = [constants.CLUSTER_CONF_FILE] |
1682 |
|
1683 |
file_names = ssconf.SimpleStore().GetFileList() |
1684 |
file_names.extend(constants.ALL_CERT_FILES) |
1685 |
file_names.extend(master_files) |
1686 |
|
1687 |
local_checksums = utils.FingerprintFiles(file_names) |
1688 |
|
1689 |
feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
1690 |
node_verify_param = { |
1691 |
constants.NV_FILELIST: file_names, |
1692 |
constants.NV_NODELIST: [node.name for node in nodeinfo |
1693 |
if not node.offline], |
1694 |
constants.NV_HYPERVISOR: hypervisors, |
1695 |
constants.NV_NODENETTEST: [(node.name, node.primary_ip, |
1696 |
node.secondary_ip) for node in nodeinfo |
1697 |
if not node.offline], |
1698 |
constants.NV_INSTANCELIST: hypervisors, |
1699 |
constants.NV_VERSION: None,
|
1700 |
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
|
1701 |
constants.NV_NODESETUP: None,
|
1702 |
constants.NV_TIME: None,
|
1703 |
} |
1704 |
|
1705 |
if vg_name is not None: |
1706 |
node_verify_param[constants.NV_VGLIST] = None
|
1707 |
node_verify_param[constants.NV_LVLIST] = vg_name |
1708 |
node_verify_param[constants.NV_PVLIST] = [vg_name] |
1709 |
node_verify_param[constants.NV_DRBDLIST] = None
|
1710 |
|
1711 |
# Build our expected cluster state
|
1712 |
node_image = dict((node.name, self.NodeImage(offline=node.offline)) |
1713 |
for node in nodeinfo) |
1714 |
|
1715 |
for instance in instancelist: |
1716 |
inst_config = instanceinfo[instance] |
1717 |
|
1718 |
for nname in inst_config.all_nodes: |
1719 |
if nname not in node_image: |
1720 |
# ghost node
|
1721 |
gnode = self.NodeImage()
|
1722 |
gnode.ghost = True
|
1723 |
node_image[nname] = gnode |
1724 |
|
1725 |
inst_config.MapLVsByNode(node_vol_should) |
1726 |
|
1727 |
pnode = inst_config.primary_node |
1728 |
node_image[pnode].pinst.append(instance) |
1729 |
|
1730 |
for snode in inst_config.secondary_nodes: |
1731 |
nimg = node_image[snode] |
1732 |
nimg.sinst.append(instance) |
1733 |
if pnode not in nimg.sbp: |
1734 |
nimg.sbp[pnode] = [] |
1735 |
nimg.sbp[pnode].append(instance) |
1736 |
|
1737 |
# At this point, we have the in-memory data structures complete,
|
1738 |
# except for the runtime information, which we'll gather next
|
1739 |
|
1740 |
# Due to the way our RPC system works, exact response times cannot be
|
1741 |
# guaranteed (e.g. a broken node could run into a timeout). By keeping the
|
1742 |
# time before and after executing the request, we can at least have a time
|
1743 |
# window.
|
1744 |
nvinfo_starttime = time.time() |
1745 |
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
|
1746 |
self.cfg.GetClusterName())
|
1747 |
nvinfo_endtime = time.time() |
1748 |
|
1749 |
cluster = self.cfg.GetClusterInfo()
|
1750 |
master_node = self.cfg.GetMasterNode()
|
1751 |
all_drbd_map = self.cfg.ComputeDRBDMap()
|
1752 |
|
1753 |
feedback_fn("* Verifying node status")
|
1754 |
for node_i in nodeinfo: |
1755 |
node = node_i.name |
1756 |
nimg = node_image[node] |
1757 |
|
1758 |
if node_i.offline:
|
1759 |
if verbose:
|
1760 |
feedback_fn("* Skipping offline node %s" % (node,))
|
1761 |
n_offline += 1
|
1762 |
continue
|
1763 |
|
1764 |
if node == master_node:
|
1765 |
ntype = "master"
|
1766 |
elif node_i.master_candidate:
|
1767 |
ntype = "master candidate"
|
1768 |
elif node_i.drained:
|
1769 |
ntype = "drained"
|
1770 |
n_drained += 1
|
1771 |
else:
|
1772 |
ntype = "regular"
|
1773 |
if verbose:
|
1774 |
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
|
1775 |
|
1776 |
msg = all_nvinfo[node].fail_msg |
1777 |
_ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg) |
1778 |
if msg:
|
1779 |
nimg.rpc_fail = True
|
1780 |
continue
|
1781 |
|
1782 |
nresult = all_nvinfo[node].payload |
1783 |
|
1784 |
nimg.call_ok = self._VerifyNode(node_i, nresult)
|
1785 |
self._VerifyNodeNetwork(node_i, nresult)
|
1786 |
self._VerifyNodeLVM(node_i, nresult, vg_name)
|
1787 |
self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
|
1788 |
master_files) |
1789 |
self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
|
1790 |
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
|
1791 |
|
1792 |
self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
|
1793 |
self._UpdateNodeInstances(node_i, nresult, nimg)
|
1794 |
self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
|
1795 |
|
1796 |
feedback_fn("* Verifying instance status")
|
1797 |
for instance in instancelist: |
1798 |
if verbose:
|
1799 |
feedback_fn("* Verifying instance %s" % instance)
|
1800 |
inst_config = instanceinfo[instance] |
1801 |
self._VerifyInstance(instance, inst_config, node_image)
|
1802 |
inst_nodes_offline = [] |
1803 |
|
1804 |
pnode = inst_config.primary_node |
1805 |
pnode_img = node_image[pnode] |
1806 |
_ErrorIf(pnode_img.rpc_fail and not pnode_img.offline, |
1807 |
self.ENODERPC, pnode, "instance %s, connection to" |
1808 |
" primary node failed", instance)
|
1809 |
|
1810 |
if pnode_img.offline:
|
1811 |
inst_nodes_offline.append(pnode) |
1812 |
|
1813 |
# If the instance is non-redundant we cannot survive losing its primary
|
1814 |
# node, so we are not N+1 compliant. On the other hand we have no disk
|
1815 |
# templates with more than one secondary so that situation is not well
|
1816 |
# supported either.
|
1817 |
# FIXME: does not support file-backed instances
|
1818 |
if not inst_config.secondary_nodes: |
1819 |
i_non_redundant.append(instance) |
1820 |
_ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT, |
1821 |
instance, "instance has multiple secondary nodes: %s",
|
1822 |
utils.CommaJoin(inst_config.secondary_nodes), |
1823 |
code=self.ETYPE_WARNING)
|
1824 |
|
1825 |
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]: |
1826 |
i_non_a_balanced.append(instance) |
1827 |
|
1828 |
for snode in inst_config.secondary_nodes: |
1829 |
s_img = node_image[snode] |
1830 |
_ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode, |
1831 |
"instance %s, connection to secondary node failed", instance)
|
1832 |
|
1833 |
if s_img.offline:
|
1834 |
inst_nodes_offline.append(snode) |
1835 |
|
1836 |
# warn that the instance lives on offline nodes
|
1837 |
_ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
|
1838 |
"instance lives on offline node(s) %s",
|
1839 |
utils.CommaJoin(inst_nodes_offline)) |
1840 |
# ... or ghost nodes
|
1841 |
for node in inst_config.all_nodes: |
1842 |
_ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
|
1843 |
"instance lives on ghost node %s", node)
|
1844 |
|
1845 |
feedback_fn("* Verifying orphan volumes")
|
1846 |
self._VerifyOrphanVolumes(node_vol_should, node_image)
|
1847 |
|
1848 |
feedback_fn("* Verifying oprhan instances")
|
1849 |
self._VerifyOrphanInstances(instancelist, node_image)
|
1850 |
|
1851 |
if constants.VERIFY_NPLUSONE_MEM not in self.skip_set: |
1852 |
feedback_fn("* Verifying N+1 Memory redundancy")
|
1853 |
self._VerifyNPlusOneMemory(node_image, instanceinfo)
|
1854 |
|
1855 |
feedback_fn("* Other Notes")
|
1856 |
if i_non_redundant:
|
1857 |
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
|
1858 |
% len(i_non_redundant))
|
1859 |
|
1860 |
if i_non_a_balanced:
|
1861 |
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
|
1862 |
% len(i_non_a_balanced))
|
1863 |
|
1864 |
if n_offline:
|
1865 |
feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
|
1866 |
|
1867 |
if n_drained:
|
1868 |
feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
|
1869 |
|
1870 |
return not self.bad |
1871 |
|
1872 |
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): |
1873 |
"""Analyze the post-hooks' result
|
1874 |
|
1875 |
This method analyses the hook result, handles it, and sends some
|
1876 |
nicely-formatted feedback back to the user.
|
1877 |
|
1878 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
1879 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
1880 |
@param hooks_results: the results of the multi-node hooks rpc call
|
1881 |
@param feedback_fn: function used send feedback back to the caller
|
1882 |
@param lu_result: previous Exec result
|
1883 |
@return: the new Exec result, based on the previous result
|
1884 |
and hook results
|
1885 |
|
1886 |
"""
|
1887 |
# We only really run POST phase hooks, and are only interested in
|
1888 |
# their results
|
1889 |
if phase == constants.HOOKS_PHASE_POST:
|
1890 |
# Used to change hooks' output to proper indentation
|
1891 |
indent_re = re.compile('^', re.M)
|
1892 |
feedback_fn("* Hooks Results")
|
1893 |
assert hooks_results, "invalid result from hooks" |
1894 |
|
1895 |
for node_name in hooks_results: |
1896 |
res = hooks_results[node_name] |
1897 |
msg = res.fail_msg |
1898 |
test = msg and not res.offline |
1899 |
self._ErrorIf(test, self.ENODEHOOKS, node_name, |
1900 |
"Communication failure in hooks execution: %s", msg)
|
1901 |
if res.offline or msg: |
1902 |
# No need to investigate payload if node is offline or gave an error.
|
1903 |
# override manually lu_result here as _ErrorIf only
|
1904 |
# overrides self.bad
|
1905 |
lu_result = 1
|
1906 |
continue
|
1907 |
for script, hkr, output in res.payload: |
1908 |
test = hkr == constants.HKR_FAIL |
1909 |
self._ErrorIf(test, self.ENODEHOOKS, node_name, |
1910 |
"Script %s failed, output:", script)
|
1911 |
if test:
|
1912 |
output = indent_re.sub(' ', output)
|
1913 |
feedback_fn("%s" % output)
|
1914 |
lu_result = 0
|
1915 |
|
1916 |
return lu_result
|
1917 |
|
1918 |
|
1919 |
class LUVerifyDisks(NoHooksLU): |
1920 |
"""Verifies the cluster disks status.
|
1921 |
|
1922 |
"""
|
1923 |
_OP_REQP = [] |
1924 |
REQ_BGL = False
|
1925 |
|
1926 |
def ExpandNames(self): |
1927 |
self.needed_locks = {
|
1928 |
locking.LEVEL_NODE: locking.ALL_SET, |
1929 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1930 |
} |
1931 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
1932 |
|
1933 |
def CheckPrereq(self): |
1934 |
"""Check prerequisites.
|
1935 |
|
1936 |
This has no prerequisites.
|
1937 |
|
1938 |
"""
|
1939 |
pass
|
1940 |
|
1941 |
def Exec(self, feedback_fn): |
1942 |
"""Verify integrity of cluster disks.
|
1943 |
|
1944 |
@rtype: tuple of three items
|
1945 |
@return: a tuple of (dict of node-to-node_error, list of instances
|
1946 |
which need activate-disks, dict of instance: (node, volume) for
|
1947 |
missing volumes
|
1948 |
|
1949 |
"""
|
1950 |
result = res_nodes, res_instances, res_missing = {}, [], {} |
1951 |
|
1952 |
vg_name = self.cfg.GetVGName()
|
1953 |
nodes = utils.NiceSort(self.cfg.GetNodeList())
|
1954 |
instances = [self.cfg.GetInstanceInfo(name)
|
1955 |
for name in self.cfg.GetInstanceList()] |
1956 |
|
1957 |
nv_dict = {} |
1958 |
for inst in instances: |
1959 |
inst_lvs = {} |
1960 |
if (not inst.admin_up or |
1961 |
inst.disk_template not in constants.DTS_NET_MIRROR): |
1962 |
continue
|
1963 |
inst.MapLVsByNode(inst_lvs) |
1964 |
# transform { iname: {node: [vol,],},} to {(node, vol): iname}
|
1965 |
for node, vol_list in inst_lvs.iteritems(): |
1966 |
for vol in vol_list: |
1967 |
nv_dict[(node, vol)] = inst |
1968 |
|
1969 |
if not nv_dict: |
1970 |
return result
|
1971 |
|
1972 |
node_lvs = self.rpc.call_lv_list(nodes, vg_name)
|
1973 |
|
1974 |
for node in nodes: |
1975 |
# node_volume
|
1976 |
node_res = node_lvs[node] |
1977 |
if node_res.offline:
|
1978 |
continue
|
1979 |
msg = node_res.fail_msg |
1980 |
if msg:
|
1981 |
logging.warning("Error enumerating LVs on node %s: %s", node, msg)
|
1982 |
res_nodes[node] = msg |
1983 |
continue
|
1984 |
|
1985 |
lvs = node_res.payload |
1986 |
for lv_name, (_, _, lv_online) in lvs.items(): |
1987 |
inst = nv_dict.pop((node, lv_name), None)
|
1988 |
if (not lv_online and inst is not None |
1989 |
and inst.name not in res_instances): |
1990 |
res_instances.append(inst.name) |
1991 |
|
1992 |
# any leftover items in nv_dict are missing LVs, let's arrange the
|
1993 |
# data better
|
1994 |
for key, inst in nv_dict.iteritems(): |
1995 |
if inst.name not in res_missing: |
1996 |
res_missing[inst.name] = [] |
1997 |
res_missing[inst.name].append(key) |
1998 |
|
1999 |
return result
|
2000 |
|
2001 |
|
2002 |
class LURepairDiskSizes(NoHooksLU): |
2003 |
"""Verifies the cluster disks sizes.
|
2004 |
|
2005 |
"""
|
2006 |
_OP_REQP = ["instances"]
|
2007 |
REQ_BGL = False
|
2008 |
|
2009 |
def ExpandNames(self): |
2010 |
if not isinstance(self.op.instances, list): |
2011 |
raise errors.OpPrereqError("Invalid argument type 'instances'", |
2012 |
errors.ECODE_INVAL) |
2013 |
|
2014 |
if self.op.instances: |
2015 |
self.wanted_names = []
|
2016 |
for name in self.op.instances: |
2017 |
full_name = _ExpandInstanceName(self.cfg, name)
|
2018 |
self.wanted_names.append(full_name)
|
2019 |
self.needed_locks = {
|
2020 |
locking.LEVEL_NODE: [], |
2021 |
locking.LEVEL_INSTANCE: self.wanted_names,
|
2022 |
} |
2023 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2024 |
else:
|
2025 |
self.wanted_names = None |
2026 |
self.needed_locks = {
|
2027 |
locking.LEVEL_NODE: locking.ALL_SET, |
2028 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
2029 |
} |
2030 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
2031 |
|
2032 |
def DeclareLocks(self, level): |
2033 |
if level == locking.LEVEL_NODE and self.wanted_names is not None: |
2034 |
self._LockInstancesNodes(primary_only=True) |
2035 |
|
2036 |
def CheckPrereq(self): |
2037 |
"""Check prerequisites.
|
2038 |
|
2039 |
This only checks the optional instance list against the existing names.
|
2040 |
|
2041 |
"""
|
2042 |
if self.wanted_names is None: |
2043 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE] |
2044 |
|
2045 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
2046 |
in self.wanted_names] |
2047 |
|
2048 |
def _EnsureChildSizes(self, disk): |
2049 |
"""Ensure children of the disk have the needed disk size.
|
2050 |
|
2051 |
This is valid mainly for DRBD8 and fixes an issue where the
|
2052 |
children have smaller disk size.
|
2053 |
|
2054 |
@param disk: an L{ganeti.objects.Disk} object
|
2055 |
|
2056 |
"""
|
2057 |
if disk.dev_type == constants.LD_DRBD8:
|
2058 |
assert disk.children, "Empty children for DRBD8?" |
2059 |
fchild = disk.children[0]
|
2060 |
mismatch = fchild.size < disk.size |
2061 |
if mismatch:
|
2062 |
self.LogInfo("Child disk has size %d, parent %d, fixing", |
2063 |
fchild.size, disk.size) |
2064 |
fchild.size = disk.size |
2065 |
|
2066 |
# and we recurse on this child only, not on the metadev
|
2067 |
return self._EnsureChildSizes(fchild) or mismatch |
2068 |
else:
|
2069 |
return False |
2070 |
|
2071 |
def Exec(self, feedback_fn): |
2072 |
"""Verify the size of cluster disks.
|
2073 |
|
2074 |
"""
|
2075 |
# TODO: check child disks too
|
2076 |
# TODO: check differences in size between primary/secondary nodes
|
2077 |
per_node_disks = {} |
2078 |
for instance in self.wanted_instances: |
2079 |
pnode = instance.primary_node |
2080 |
if pnode not in per_node_disks: |
2081 |
per_node_disks[pnode] = [] |
2082 |
for idx, disk in enumerate(instance.disks): |
2083 |
per_node_disks[pnode].append((instance, idx, disk)) |
2084 |
|
2085 |
changed = [] |
2086 |
for node, dskl in per_node_disks.items(): |
2087 |
newl = [v[2].Copy() for v in dskl] |
2088 |
for dsk in newl: |
2089 |
self.cfg.SetDiskID(dsk, node)
|
2090 |
result = self.rpc.call_blockdev_getsizes(node, newl)
|
2091 |
if result.fail_msg:
|
2092 |
self.LogWarning("Failure in blockdev_getsizes call to node" |
2093 |
" %s, ignoring", node)
|
2094 |
continue
|
2095 |
if len(result.data) != len(dskl): |
2096 |
self.LogWarning("Invalid result from node %s, ignoring node results", |
2097 |
node) |
2098 |
continue
|
2099 |
for ((instance, idx, disk), size) in zip(dskl, result.data): |
2100 |
if size is None: |
2101 |
self.LogWarning("Disk %d of instance %s did not return size" |
2102 |
" information, ignoring", idx, instance.name)
|
2103 |
continue
|
2104 |
if not isinstance(size, (int, long)): |
2105 |
self.LogWarning("Disk %d of instance %s did not return valid" |
2106 |
" size information, ignoring", idx, instance.name)
|
2107 |
continue
|
2108 |
size = size >> 20
|
2109 |
if size != disk.size:
|
2110 |
self.LogInfo("Disk %d of instance %s has mismatched size," |
2111 |
" correcting: recorded %d, actual %d", idx,
|
2112 |
instance.name, disk.size, size) |
2113 |
disk.size = size |
2114 |
self.cfg.Update(instance, feedback_fn)
|
2115 |
changed.append((instance.name, idx, size)) |
2116 |
if self._EnsureChildSizes(disk): |
2117 |
self.cfg.Update(instance, feedback_fn)
|
2118 |
changed.append((instance.name, idx, disk.size)) |
2119 |
return changed
|
2120 |
|
2121 |
|
2122 |
class LURenameCluster(LogicalUnit): |
2123 |
"""Rename the cluster.
|
2124 |
|
2125 |
"""
|
2126 |
HPATH = "cluster-rename"
|
2127 |
HTYPE = constants.HTYPE_CLUSTER |
2128 |
_OP_REQP = ["name"]
|
2129 |
|
2130 |
def BuildHooksEnv(self): |
2131 |
"""Build hooks env.
|
2132 |
|
2133 |
"""
|
2134 |
env = { |
2135 |
"OP_TARGET": self.cfg.GetClusterName(), |
2136 |
"NEW_NAME": self.op.name, |
2137 |
} |
2138 |
mn = self.cfg.GetMasterNode()
|
2139 |
all_nodes = self.cfg.GetNodeList()
|
2140 |
return env, [mn], all_nodes
|
2141 |
|
2142 |
def CheckPrereq(self): |
2143 |
"""Verify that the passed name is a valid one.
|
2144 |
|
2145 |
"""
|
2146 |
hostname = utils.GetHostInfo(self.op.name)
|
2147 |
|
2148 |
new_name = hostname.name |
2149 |
self.ip = new_ip = hostname.ip
|
2150 |
old_name = self.cfg.GetClusterName()
|
2151 |
old_ip = self.cfg.GetMasterIP()
|
2152 |
if new_name == old_name and new_ip == old_ip: |
2153 |
raise errors.OpPrereqError("Neither the name nor the IP address of the" |
2154 |
" cluster has changed",
|
2155 |
errors.ECODE_INVAL) |
2156 |
if new_ip != old_ip:
|
2157 |
if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
|
2158 |
raise errors.OpPrereqError("The given cluster IP address (%s) is" |
2159 |
" reachable on the network. Aborting." %
|
2160 |
new_ip, errors.ECODE_NOTUNIQUE) |
2161 |
|
2162 |
self.op.name = new_name
|
2163 |
|
2164 |
def Exec(self, feedback_fn): |
2165 |
"""Rename the cluster.
|
2166 |
|
2167 |
"""
|
2168 |
clustername = self.op.name
|
2169 |
ip = self.ip
|
2170 |
|
2171 |
# shutdown the master IP
|
2172 |
master = self.cfg.GetMasterNode()
|
2173 |
result = self.rpc.call_node_stop_master(master, False) |
2174 |
result.Raise("Could not disable the master role")
|
2175 |
|
2176 |
try:
|
2177 |
cluster = self.cfg.GetClusterInfo()
|
2178 |
cluster.cluster_name = clustername |
2179 |
cluster.master_ip = ip |
2180 |
self.cfg.Update(cluster, feedback_fn)
|
2181 |
|
2182 |
# update the known hosts file
|
2183 |
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
|
2184 |
node_list = self.cfg.GetNodeList()
|
2185 |
try:
|
2186 |
node_list.remove(master) |
2187 |
except ValueError: |
2188 |
pass
|
2189 |
result = self.rpc.call_upload_file(node_list,
|
2190 |
constants.SSH_KNOWN_HOSTS_FILE) |
2191 |
for to_node, to_result in result.iteritems(): |
2192 |
msg = to_result.fail_msg |
2193 |
if msg:
|
2194 |
msg = ("Copy of file %s to node %s failed: %s" %
|
2195 |
(constants.SSH_KNOWN_HOSTS_FILE, to_node, msg)) |
2196 |
self.proc.LogWarning(msg)
|
2197 |
|
2198 |
finally:
|
2199 |
result = self.rpc.call_node_start_master(master, False, False) |
2200 |
msg = result.fail_msg |
2201 |
if msg:
|
2202 |
self.LogWarning("Could not re-enable the master role on" |
2203 |
" the master, please restart manually: %s", msg)
|
2204 |
|
2205 |
|
2206 |
def _RecursiveCheckIfLVMBased(disk): |
2207 |
"""Check if the given disk or its children are lvm-based.
|
2208 |
|
2209 |
@type disk: L{objects.Disk}
|
2210 |
@param disk: the disk to check
|
2211 |
@rtype: boolean
|
2212 |
@return: boolean indicating whether a LD_LV dev_type was found or not
|
2213 |
|
2214 |
"""
|
2215 |
if disk.children:
|
2216 |
for chdisk in disk.children: |
2217 |
if _RecursiveCheckIfLVMBased(chdisk):
|
2218 |
return True |
2219 |
return disk.dev_type == constants.LD_LV
|
2220 |
|
2221 |
|
2222 |
class LUSetClusterParams(LogicalUnit): |
2223 |
"""Change the parameters of the cluster.
|
2224 |
|
2225 |
"""
|
2226 |
HPATH = "cluster-modify"
|
2227 |
HTYPE = constants.HTYPE_CLUSTER |
2228 |
_OP_REQP = [] |
2229 |
REQ_BGL = False
|
2230 |
|
2231 |
def CheckArguments(self): |
2232 |
"""Check parameters
|
2233 |
|
2234 |
"""
|
2235 |
if not hasattr(self.op, "candidate_pool_size"): |
2236 |
self.op.candidate_pool_size = None |
2237 |
if self.op.candidate_pool_size is not None: |
2238 |
try:
|
2239 |
self.op.candidate_pool_size = int(self.op.candidate_pool_size) |
2240 |
except (ValueError, TypeError), err: |
2241 |
raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" % |
2242 |
str(err), errors.ECODE_INVAL)
|
2243 |
if self.op.candidate_pool_size < 1: |
2244 |
raise errors.OpPrereqError("At least one master candidate needed", |
2245 |
errors.ECODE_INVAL) |
2246 |
_CheckBooleanOpField(self.op, "maintain_node_health") |
2247 |
|
2248 |
def ExpandNames(self): |
2249 |
# FIXME: in the future maybe other cluster params won't require checking on
|
2250 |
# all nodes to be modified.
|
2251 |
self.needed_locks = {
|
2252 |
locking.LEVEL_NODE: locking.ALL_SET, |
2253 |
} |
2254 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2255 |
|
2256 |
def BuildHooksEnv(self): |
2257 |
"""Build hooks env.
|
2258 |
|
2259 |
"""
|
2260 |
env = { |
2261 |
"OP_TARGET": self.cfg.GetClusterName(), |
2262 |
"NEW_VG_NAME": self.op.vg_name, |
2263 |
} |
2264 |
mn = self.cfg.GetMasterNode()
|
2265 |
return env, [mn], [mn]
|
2266 |
|
2267 |
def CheckPrereq(self): |
2268 |
"""Check prerequisites.
|
2269 |
|
2270 |
This checks whether the given params don't conflict and
|
2271 |
if the given volume group is valid.
|
2272 |
|
2273 |
"""
|
2274 |
if self.op.vg_name is not None and not self.op.vg_name: |
2275 |
instances = self.cfg.GetAllInstancesInfo().values()
|
2276 |
for inst in instances: |
2277 |
for disk in inst.disks: |
2278 |
if _RecursiveCheckIfLVMBased(disk):
|
2279 |
raise errors.OpPrereqError("Cannot disable lvm storage while" |
2280 |
" lvm-based instances exist",
|
2281 |
errors.ECODE_INVAL) |
2282 |
|
2283 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
2284 |
|
2285 |
# if vg_name not None, checks given volume group on all nodes
|
2286 |
if self.op.vg_name: |
2287 |
vglist = self.rpc.call_vg_list(node_list)
|
2288 |
for node in node_list: |
2289 |
msg = vglist[node].fail_msg |
2290 |
if msg:
|
2291 |
# ignoring down node
|
2292 |
self.LogWarning("Error while gathering data on node %s" |
2293 |
" (ignoring node): %s", node, msg)
|
2294 |
continue
|
2295 |
vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload, |
2296 |
self.op.vg_name,
|
2297 |
constants.MIN_VG_SIZE) |
2298 |
if vgstatus:
|
2299 |
raise errors.OpPrereqError("Error on node '%s': %s" % |
2300 |
(node, vgstatus), errors.ECODE_ENVIRON) |
2301 |
|
2302 |
self.cluster = cluster = self.cfg.GetClusterInfo() |
2303 |
# validate params changes
|
2304 |
if self.op.beparams: |
2305 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
2306 |
self.new_beparams = objects.FillDict(
|
2307 |
cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
|
2308 |
|
2309 |
if self.op.nicparams: |
2310 |
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
|
2311 |
self.new_nicparams = objects.FillDict(
|
2312 |
cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
|
2313 |
objects.NIC.CheckParameterSyntax(self.new_nicparams)
|
2314 |
nic_errors = [] |
2315 |
|
2316 |
# check all instances for consistency
|
2317 |
for instance in self.cfg.GetAllInstancesInfo().values(): |
2318 |
for nic_idx, nic in enumerate(instance.nics): |
2319 |
params_copy = copy.deepcopy(nic.nicparams) |
2320 |
params_filled = objects.FillDict(self.new_nicparams, params_copy)
|
2321 |
|
2322 |
# check parameter syntax
|
2323 |
try:
|
2324 |
objects.NIC.CheckParameterSyntax(params_filled) |
2325 |
except errors.ConfigurationError, err:
|
2326 |
nic_errors.append("Instance %s, nic/%d: %s" %
|
2327 |
(instance.name, nic_idx, err)) |
2328 |
|
2329 |
# if we're moving instances to routed, check that they have an ip
|
2330 |
target_mode = params_filled[constants.NIC_MODE] |
2331 |
if target_mode == constants.NIC_MODE_ROUTED and not nic.ip: |
2332 |
nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
|
2333 |
(instance.name, nic_idx)) |
2334 |
if nic_errors:
|
2335 |
raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" % |
2336 |
"\n".join(nic_errors))
|
2337 |
|
2338 |
# hypervisor list/parameters
|
2339 |
self.new_hvparams = objects.FillDict(cluster.hvparams, {})
|
2340 |
if self.op.hvparams: |
2341 |
if not isinstance(self.op.hvparams, dict): |
2342 |
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input", |
2343 |
errors.ECODE_INVAL) |
2344 |
for hv_name, hv_dict in self.op.hvparams.items(): |
2345 |
if hv_name not in self.new_hvparams: |
2346 |
self.new_hvparams[hv_name] = hv_dict
|
2347 |
else:
|
2348 |
self.new_hvparams[hv_name].update(hv_dict)
|
2349 |
|
2350 |
# os hypervisor parameters
|
2351 |
self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
|
2352 |
if self.op.os_hvp: |
2353 |
if not isinstance(self.op.os_hvp, dict): |
2354 |
raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input", |
2355 |
errors.ECODE_INVAL) |
2356 |
for os_name, hvs in self.op.os_hvp.items(): |
2357 |
if not isinstance(hvs, dict): |
2358 |
raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on" |
2359 |
" input"), errors.ECODE_INVAL)
|
2360 |
if os_name not in self.new_os_hvp: |
2361 |
self.new_os_hvp[os_name] = hvs
|
2362 |
else:
|
2363 |
for hv_name, hv_dict in hvs.items(): |
2364 |
if hv_name not in self.new_os_hvp[os_name]: |
2365 |
self.new_os_hvp[os_name][hv_name] = hv_dict
|
2366 |
else:
|
2367 |
self.new_os_hvp[os_name][hv_name].update(hv_dict)
|
2368 |
|
2369 |
if self.op.enabled_hypervisors is not None: |
2370 |
self.hv_list = self.op.enabled_hypervisors |
2371 |
if not self.hv_list: |
2372 |
raise errors.OpPrereqError("Enabled hypervisors list must contain at" |
2373 |
" least one member",
|
2374 |
errors.ECODE_INVAL) |
2375 |
invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES |
2376 |
if invalid_hvs:
|
2377 |
raise errors.OpPrereqError("Enabled hypervisors contains invalid" |
2378 |
" entries: %s" %
|
2379 |
utils.CommaJoin(invalid_hvs), |
2380 |
errors.ECODE_INVAL) |
2381 |
else:
|
2382 |
self.hv_list = cluster.enabled_hypervisors
|
2383 |
|
2384 |
if self.op.hvparams or self.op.enabled_hypervisors is not None: |
2385 |
# either the enabled list has changed, or the parameters have, validate
|
2386 |
for hv_name, hv_params in self.new_hvparams.items(): |
2387 |
if ((self.op.hvparams and hv_name in self.op.hvparams) or |
2388 |
(self.op.enabled_hypervisors and |
2389 |
hv_name in self.op.enabled_hypervisors)): |
2390 |
# either this is a new hypervisor, or its parameters have changed
|
2391 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2392 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
2393 |
hv_class.CheckParameterSyntax(hv_params) |
2394 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
2395 |
|
2396 |
if self.op.os_hvp: |
2397 |
# no need to check any newly-enabled hypervisors, since the
|
2398 |
# defaults have already been checked in the above code-block
|
2399 |
for os_name, os_hvp in self.new_os_hvp.items(): |
2400 |
for hv_name, hv_params in os_hvp.items(): |
2401 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
2402 |
# we need to fill in the new os_hvp on top of the actual hv_p
|
2403 |
cluster_defaults = self.new_hvparams.get(hv_name, {})
|
2404 |
new_osp = objects.FillDict(cluster_defaults, hv_params) |
2405 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2406 |
hv_class.CheckParameterSyntax(new_osp) |
2407 |
_CheckHVParams(self, node_list, hv_name, new_osp)
|
2408 |
|
2409 |
|
2410 |
def Exec(self, feedback_fn): |
2411 |
"""Change the parameters of the cluster.
|
2412 |
|
2413 |
"""
|
2414 |
if self.op.vg_name is not None: |
2415 |
new_volume = self.op.vg_name
|
2416 |
if not new_volume: |
2417 |
new_volume = None
|
2418 |
if new_volume != self.cfg.GetVGName(): |
2419 |
self.cfg.SetVGName(new_volume)
|
2420 |
else:
|
2421 |
feedback_fn("Cluster LVM configuration already in desired"
|
2422 |
" state, not changing")
|
2423 |
if self.op.hvparams: |
2424 |
self.cluster.hvparams = self.new_hvparams |
2425 |
if self.op.os_hvp: |
2426 |
self.cluster.os_hvp = self.new_os_hvp |
2427 |
if self.op.enabled_hypervisors is not None: |
2428 |
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors |
2429 |
if self.op.beparams: |
2430 |
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams |
2431 |
if self.op.nicparams: |
2432 |
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams |
2433 |
|
2434 |
if self.op.candidate_pool_size is not None: |
2435 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
2436 |
# we need to update the pool size here, otherwise the save will fail
|
2437 |
_AdjustCandidatePool(self, [])
|
2438 |
|
2439 |
if self.op.maintain_node_health is not None: |
2440 |
self.cluster.maintain_node_health = self.op.maintain_node_health |
2441 |
|
2442 |
self.cfg.Update(self.cluster, feedback_fn) |
2443 |
|
2444 |
|
2445 |
def _RedistributeAncillaryFiles(lu, additional_nodes=None): |
2446 |
"""Distribute additional files which are part of the cluster configuration.
|
2447 |
|
2448 |
ConfigWriter takes care of distributing the config and ssconf files, but
|
2449 |
there are more files which should be distributed to all nodes. This function
|
2450 |
makes sure those are copied.
|
2451 |
|
2452 |
@param lu: calling logical unit
|
2453 |
@param additional_nodes: list of nodes not in the config to distribute to
|
2454 |
|
2455 |
"""
|
2456 |
# 1. Gather target nodes
|
2457 |
myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode()) |
2458 |
dist_nodes = lu.cfg.GetOnlineNodeList() |
2459 |
if additional_nodes is not None: |
2460 |
dist_nodes.extend(additional_nodes) |
2461 |
if myself.name in dist_nodes: |
2462 |
dist_nodes.remove(myself.name) |
2463 |
|
2464 |
# 2. Gather files to distribute
|
2465 |
dist_files = set([constants.ETC_HOSTS,
|
2466 |
constants.SSH_KNOWN_HOSTS_FILE, |
2467 |
constants.RAPI_CERT_FILE, |
2468 |
constants.RAPI_USERS_FILE, |
2469 |
constants.CONFD_HMAC_KEY, |
2470 |
]) |
2471 |
|
2472 |
enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors |
2473 |
for hv_name in enabled_hypervisors: |
2474 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2475 |
dist_files.update(hv_class.GetAncillaryFiles()) |
2476 |
|
2477 |
# 3. Perform the files upload
|
2478 |
for fname in dist_files: |
2479 |
if os.path.exists(fname):
|
2480 |
result = lu.rpc.call_upload_file(dist_nodes, fname) |
2481 |
for to_node, to_result in result.items(): |
2482 |
msg = to_result.fail_msg |
2483 |
if msg:
|
2484 |
msg = ("Copy of file %s to node %s failed: %s" %
|
2485 |
(fname, to_node, msg)) |
2486 |
lu.proc.LogWarning(msg) |
2487 |
|
2488 |
|
2489 |
class LURedistributeConfig(NoHooksLU): |
2490 |
"""Force the redistribution of cluster configuration.
|
2491 |
|
2492 |
This is a very simple LU.
|
2493 |
|
2494 |
"""
|
2495 |
_OP_REQP = [] |
2496 |
REQ_BGL = False
|
2497 |
|
2498 |
def ExpandNames(self): |
2499 |
self.needed_locks = {
|
2500 |
locking.LEVEL_NODE: locking.ALL_SET, |
2501 |
} |
2502 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2503 |
|
2504 |
def CheckPrereq(self): |
2505 |
"""Check prerequisites.
|
2506 |
|
2507 |
"""
|
2508 |
|
2509 |
def Exec(self, feedback_fn): |
2510 |
"""Redistribute the configuration.
|
2511 |
|
2512 |
"""
|
2513 |
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn) |
2514 |
_RedistributeAncillaryFiles(self)
|
2515 |
|
2516 |
|
2517 |
def _WaitForSync(lu, instance, oneshot=False): |
2518 |
"""Sleep and poll for an instance's disk to sync.
|
2519 |
|
2520 |
"""
|
2521 |
if not instance.disks: |
2522 |
return True |
2523 |
|
2524 |
if not oneshot: |
2525 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
2526 |
|
2527 |
node = instance.primary_node |
2528 |
|
2529 |
for dev in instance.disks: |
2530 |
lu.cfg.SetDiskID(dev, node) |
2531 |
|
2532 |
# TODO: Convert to utils.Retry
|
2533 |
|
2534 |
retries = 0
|
2535 |
degr_retries = 10 # in seconds, as we sleep 1 second each time |
2536 |
while True: |
2537 |
max_time = 0
|
2538 |
done = True
|
2539 |
cumul_degraded = False
|
2540 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
2541 |
msg = rstats.fail_msg |
2542 |
if msg:
|
2543 |
lu.LogWarning("Can't get any data from node %s: %s", node, msg)
|
2544 |
retries += 1
|
2545 |
if retries >= 10: |
2546 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
2547 |
" aborting." % node)
|
2548 |
time.sleep(6)
|
2549 |
continue
|
2550 |
rstats = rstats.payload |
2551 |
retries = 0
|
2552 |
for i, mstat in enumerate(rstats): |
2553 |
if mstat is None: |
2554 |
lu.LogWarning("Can't compute data for node %s/%s",
|
2555 |
node, instance.disks[i].iv_name) |
2556 |
continue
|
2557 |
|
2558 |
cumul_degraded = (cumul_degraded or
|
2559 |
(mstat.is_degraded and mstat.sync_percent is None)) |
2560 |
if mstat.sync_percent is not None: |
2561 |
done = False
|
2562 |
if mstat.estimated_time is not None: |
2563 |
rem_time = "%d estimated seconds remaining" % mstat.estimated_time
|
2564 |
max_time = mstat.estimated_time |
2565 |
else:
|
2566 |
rem_time = "no time estimate"
|
2567 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
2568 |
(instance.disks[i].iv_name, mstat.sync_percent, |
2569 |
rem_time)) |
2570 |
|
2571 |
# if we're done but degraded, let's do a few small retries, to
|
2572 |
# make sure we see a stable and not transient situation; therefore
|
2573 |
# we force restart of the loop
|
2574 |
if (done or oneshot) and cumul_degraded and degr_retries > 0: |
2575 |
logging.info("Degraded disks found, %d retries left", degr_retries)
|
2576 |
degr_retries -= 1
|
2577 |
time.sleep(1)
|
2578 |
continue
|
2579 |
|
2580 |
if done or oneshot: |
2581 |
break
|
2582 |
|
2583 |
time.sleep(min(60, max_time)) |
2584 |
|
2585 |
if done:
|
2586 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
2587 |
return not cumul_degraded |
2588 |
|
2589 |
|
2590 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): |
2591 |
"""Check that mirrors are not degraded.
|
2592 |
|
2593 |
The ldisk parameter, if True, will change the test from the
|
2594 |
is_degraded attribute (which represents overall non-ok status for
|
2595 |
the device(s)) to the ldisk (representing the local storage status).
|
2596 |
|
2597 |
"""
|
2598 |
lu.cfg.SetDiskID(dev, node) |
2599 |
|
2600 |
result = True
|
2601 |
|
2602 |
if on_primary or dev.AssembleOnSecondary(): |
2603 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
2604 |
msg = rstats.fail_msg |
2605 |
if msg:
|
2606 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg)
|
2607 |
result = False
|
2608 |
elif not rstats.payload: |
2609 |
lu.LogWarning("Can't find disk on node %s", node)
|
2610 |
result = False
|
2611 |
else:
|
2612 |
if ldisk:
|
2613 |
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
|
2614 |
else:
|
2615 |
result = result and not rstats.payload.is_degraded |
2616 |
|
2617 |
if dev.children:
|
2618 |
for child in dev.children: |
2619 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
2620 |
|
2621 |
return result
|
2622 |
|
2623 |
|
2624 |
class LUDiagnoseOS(NoHooksLU): |
2625 |
"""Logical unit for OS diagnose/query.
|
2626 |
|
2627 |
"""
|
2628 |
_OP_REQP = ["output_fields", "names"] |
2629 |
REQ_BGL = False
|
2630 |
_FIELDS_STATIC = utils.FieldSet() |
2631 |
_FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants") |
2632 |
# Fields that need calculation of global os validity
|
2633 |
_FIELDS_NEEDVALID = frozenset(["valid", "variants"]) |
2634 |
|
2635 |
def ExpandNames(self): |
2636 |
if self.op.names: |
2637 |
raise errors.OpPrereqError("Selective OS query not supported", |
2638 |
errors.ECODE_INVAL) |
2639 |
|
2640 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2641 |
dynamic=self._FIELDS_DYNAMIC,
|
2642 |
selected=self.op.output_fields)
|
2643 |
|
2644 |
# Lock all nodes, in shared mode
|
2645 |
# Temporary removal of locks, should be reverted later
|
2646 |
# TODO: reintroduce locks when they are lighter-weight
|
2647 |
self.needed_locks = {}
|
2648 |
#self.share_locks[locking.LEVEL_NODE] = 1
|
2649 |
#self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
2650 |
|
2651 |
def CheckPrereq(self): |
2652 |
"""Check prerequisites.
|
2653 |
|
2654 |
"""
|
2655 |
|
2656 |
@staticmethod
|
2657 |
def _DiagnoseByOS(rlist): |
2658 |
"""Remaps a per-node return list into an a per-os per-node dictionary
|
2659 |
|
2660 |
@param rlist: a map with node names as keys and OS objects as values
|
2661 |
|
2662 |
@rtype: dict
|
2663 |
@return: a dictionary with osnames as keys and as value another map, with
|
2664 |
nodes as keys and tuples of (path, status, diagnose) as values, eg::
|
2665 |
|
2666 |
{"debian-etch": {"node1": [(/usr/lib/..., True, ""),
|
2667 |
(/srv/..., False, "invalid api")],
|
2668 |
"node2": [(/srv/..., True, "")]}
|
2669 |
}
|
2670 |
|
2671 |
"""
|
2672 |
all_os = {} |
2673 |
# we build here the list of nodes that didn't fail the RPC (at RPC
|
2674 |
# level), so that nodes with a non-responding node daemon don't
|
2675 |
# make all OSes invalid
|
2676 |
good_nodes = [node_name for node_name in rlist |
2677 |
if not rlist[node_name].fail_msg] |
2678 |
for node_name, nr in rlist.items(): |
2679 |
if nr.fail_msg or not nr.payload: |
2680 |
continue
|
2681 |
for name, path, status, diagnose, variants in nr.payload: |
2682 |
if name not in all_os: |
2683 |
# build a list of nodes for this os containing empty lists
|
2684 |
# for each node in node_list
|
2685 |
all_os[name] = {} |
2686 |
for nname in good_nodes: |
2687 |
all_os[name][nname] = [] |
2688 |
all_os[name][node_name].append((path, status, diagnose, variants)) |
2689 |
return all_os
|
2690 |
|
2691 |
def Exec(self, feedback_fn): |
2692 |
"""Compute the list of OSes.
|
2693 |
|
2694 |
"""
|
2695 |
valid_nodes = [node for node in self.cfg.GetOnlineNodeList()] |
2696 |
node_data = self.rpc.call_os_diagnose(valid_nodes)
|
2697 |
pol = self._DiagnoseByOS(node_data)
|
2698 |
output = [] |
2699 |
calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields) |
2700 |
calc_variants = "variants" in self.op.output_fields |
2701 |
|
2702 |
for os_name, os_data in pol.items(): |
2703 |
row = [] |
2704 |
if calc_valid:
|
2705 |
valid = True
|
2706 |
variants = None
|
2707 |
for osl in os_data.values(): |
2708 |
valid = valid and osl and osl[0][1] |
2709 |
if not valid: |
2710 |
variants = None
|
2711 |
break
|
2712 |
if calc_variants:
|
2713 |
node_variants = osl[0][3] |
2714 |
if variants is None: |
2715 |
variants = node_variants |
2716 |
else:
|
2717 |
variants = [v for v in variants if v in node_variants] |
2718 |
|
2719 |
for field in self.op.output_fields: |
2720 |
if field == "name": |
2721 |
val = os_name |
2722 |
elif field == "valid": |
2723 |
val = valid |
2724 |
elif field == "node_status": |
2725 |
# this is just a copy of the dict
|
2726 |
val = {} |
2727 |
for node_name, nos_list in os_data.items(): |
2728 |
val[node_name] = nos_list |
2729 |
elif field == "variants": |
2730 |
val = variants |
2731 |
else:
|
2732 |
raise errors.ParameterError(field)
|
2733 |
row.append(val) |
2734 |
output.append(row) |
2735 |
|
2736 |
return output
|
2737 |
|
2738 |
|
2739 |
class LURemoveNode(LogicalUnit): |
2740 |
"""Logical unit for removing a node.
|
2741 |
|
2742 |
"""
|
2743 |
HPATH = "node-remove"
|
2744 |
HTYPE = constants.HTYPE_NODE |
2745 |
_OP_REQP = ["node_name"]
|
2746 |
|
2747 |
def BuildHooksEnv(self): |
2748 |
"""Build hooks env.
|
2749 |
|
2750 |
This doesn't run on the target node in the pre phase as a failed
|
2751 |
node would then be impossible to remove.
|
2752 |
|
2753 |
"""
|
2754 |
env = { |
2755 |
"OP_TARGET": self.op.node_name, |
2756 |
"NODE_NAME": self.op.node_name, |
2757 |
} |
2758 |
all_nodes = self.cfg.GetNodeList()
|
2759 |
try:
|
2760 |
all_nodes.remove(self.op.node_name)
|
2761 |
except ValueError: |
2762 |
logging.warning("Node %s which is about to be removed not found"
|
2763 |
" in the all nodes list", self.op.node_name) |
2764 |
return env, all_nodes, all_nodes
|
2765 |
|
2766 |
def CheckPrereq(self): |
2767 |
"""Check prerequisites.
|
2768 |
|
2769 |
This checks:
|
2770 |
- the node exists in the configuration
|
2771 |
- it does not have primary or secondary instances
|
2772 |
- it's not the master
|
2773 |
|
2774 |
Any errors are signaled by raising errors.OpPrereqError.
|
2775 |
|
2776 |
"""
|
2777 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
2778 |
node = self.cfg.GetNodeInfo(self.op.node_name) |
2779 |
assert node is not None |
2780 |
|
2781 |
instance_list = self.cfg.GetInstanceList()
|
2782 |
|
2783 |
masternode = self.cfg.GetMasterNode()
|
2784 |
if node.name == masternode:
|
2785 |
raise errors.OpPrereqError("Node is the master node," |
2786 |
" you need to failover first.",
|
2787 |
errors.ECODE_INVAL) |
2788 |
|
2789 |
for instance_name in instance_list: |
2790 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
2791 |
if node.name in instance.all_nodes: |
2792 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
2793 |
" please remove first." % instance_name,
|
2794 |
errors.ECODE_INVAL) |
2795 |
self.op.node_name = node.name
|
2796 |
self.node = node
|
2797 |
|
2798 |
def Exec(self, feedback_fn): |
2799 |
"""Removes the node from the cluster.
|
2800 |
|
2801 |
"""
|
2802 |
node = self.node
|
2803 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
2804 |
node.name) |
2805 |
|
2806 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
|
2807 |
|
2808 |
# Promote nodes to master candidate as needed
|
2809 |
_AdjustCandidatePool(self, exceptions=[node.name])
|
2810 |
self.context.RemoveNode(node.name)
|
2811 |
|
2812 |
# Run post hooks on the node before it's removed
|
2813 |
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) |
2814 |
try:
|
2815 |
hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) |
2816 |
except:
|
2817 |
# pylint: disable-msg=W0702
|
2818 |
self.LogWarning("Errors occurred running hooks on %s" % node.name) |
2819 |
|
2820 |
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
|
2821 |
msg = result.fail_msg |
2822 |
if msg:
|
2823 |
self.LogWarning("Errors encountered on the remote node while leaving" |
2824 |
" the cluster: %s", msg)
|
2825 |
|
2826 |
|
2827 |
class LUQueryNodes(NoHooksLU): |
2828 |
"""Logical unit for querying nodes.
|
2829 |
|
2830 |
"""
|
2831 |
# pylint: disable-msg=W0142
|
2832 |
_OP_REQP = ["output_fields", "names", "use_locking"] |
2833 |
REQ_BGL = False
|
2834 |
|
2835 |
_SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid", |
2836 |
"master_candidate", "offline", "drained"] |
2837 |
|
2838 |
_FIELDS_DYNAMIC = utils.FieldSet( |
2839 |
"dtotal", "dfree", |
2840 |
"mtotal", "mnode", "mfree", |
2841 |
"bootid",
|
2842 |
"ctotal", "cnodes", "csockets", |
2843 |
) |
2844 |
|
2845 |
_FIELDS_STATIC = utils.FieldSet(*[ |
2846 |
"pinst_cnt", "sinst_cnt", |
2847 |
"pinst_list", "sinst_list", |
2848 |
"pip", "sip", "tags", |
2849 |
"master",
|
2850 |
"role"] + _SIMPLE_FIELDS
|
2851 |
) |
2852 |
|
2853 |
def ExpandNames(self): |
2854 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2855 |
dynamic=self._FIELDS_DYNAMIC,
|
2856 |
selected=self.op.output_fields)
|
2857 |
|
2858 |
self.needed_locks = {}
|
2859 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2860 |
|
2861 |
if self.op.names: |
2862 |
self.wanted = _GetWantedNodes(self, self.op.names) |
2863 |
else:
|
2864 |
self.wanted = locking.ALL_SET
|
2865 |
|
2866 |
self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
2867 |
self.do_locking = self.do_node_query and self.op.use_locking |
2868 |
if self.do_locking: |
2869 |
# if we don't request only static fields, we need to lock the nodes
|
2870 |
self.needed_locks[locking.LEVEL_NODE] = self.wanted |
2871 |
|
2872 |
def CheckPrereq(self): |
2873 |
"""Check prerequisites.
|
2874 |
|
2875 |
"""
|
2876 |
# The validation of the node list is done in the _GetWantedNodes,
|
2877 |
# if non empty, and if empty, there's no validation to do
|
2878 |
pass
|
2879 |
|
2880 |
def Exec(self, feedback_fn): |
2881 |
"""Computes the list of nodes and their attributes.
|
2882 |
|
2883 |
"""
|
2884 |
all_info = self.cfg.GetAllNodesInfo()
|
2885 |
if self.do_locking: |
2886 |
nodenames = self.acquired_locks[locking.LEVEL_NODE]
|
2887 |
elif self.wanted != locking.ALL_SET: |
2888 |
nodenames = self.wanted
|
2889 |
missing = set(nodenames).difference(all_info.keys())
|
2890 |
if missing:
|
2891 |
raise errors.OpExecError(
|
2892 |
"Some nodes were removed before retrieving their data: %s" % missing)
|
2893 |
else:
|
2894 |
nodenames = all_info.keys() |
2895 |
|
2896 |
nodenames = utils.NiceSort(nodenames) |
2897 |
nodelist = [all_info[name] for name in nodenames] |
2898 |
|
2899 |
# begin data gathering
|
2900 |
|
2901 |
if self.do_node_query: |
2902 |
live_data = {} |
2903 |
node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
2904 |
self.cfg.GetHypervisorType())
|
2905 |
for name in nodenames: |
2906 |
nodeinfo = node_data[name] |
2907 |
if not nodeinfo.fail_msg and nodeinfo.payload: |
2908 |
nodeinfo = nodeinfo.payload |
2909 |
fn = utils.TryConvert |
2910 |
live_data[name] = { |
2911 |
"mtotal": fn(int, nodeinfo.get('memory_total', None)), |
2912 |
"mnode": fn(int, nodeinfo.get('memory_dom0', None)), |
2913 |
"mfree": fn(int, nodeinfo.get('memory_free', None)), |
2914 |
"dtotal": fn(int, nodeinfo.get('vg_size', None)), |
2915 |
"dfree": fn(int, nodeinfo.get('vg_free', None)), |
2916 |
"ctotal": fn(int, nodeinfo.get('cpu_total', None)), |
2917 |
"bootid": nodeinfo.get('bootid', None), |
2918 |
"cnodes": fn(int, nodeinfo.get('cpu_nodes', None)), |
2919 |
"csockets": fn(int, nodeinfo.get('cpu_sockets', None)), |
2920 |
} |
2921 |
else:
|
2922 |
live_data[name] = {} |
2923 |
else:
|
2924 |
live_data = dict.fromkeys(nodenames, {})
|
2925 |
|
2926 |
node_to_primary = dict([(name, set()) for name in nodenames]) |
2927 |
node_to_secondary = dict([(name, set()) for name in nodenames]) |
2928 |
|
2929 |
inst_fields = frozenset(("pinst_cnt", "pinst_list", |
2930 |
"sinst_cnt", "sinst_list")) |
2931 |
if inst_fields & frozenset(self.op.output_fields): |
2932 |
inst_data = self.cfg.GetAllInstancesInfo()
|
2933 |
|
2934 |
for inst in inst_data.values(): |
2935 |
if inst.primary_node in node_to_primary: |
2936 |
node_to_primary[inst.primary_node].add(inst.name) |
2937 |
for secnode in inst.secondary_nodes: |
2938 |
if secnode in node_to_secondary: |
2939 |
node_to_secondary[secnode].add(inst.name) |
2940 |
|
2941 |
master_node = self.cfg.GetMasterNode()
|
2942 |
|
2943 |
# end data gathering
|
2944 |
|
2945 |
output = [] |
2946 |
for node in nodelist: |
2947 |
node_output = [] |
2948 |
for field in self.op.output_fields: |
2949 |
if field in self._SIMPLE_FIELDS: |
2950 |
val = getattr(node, field)
|
2951 |
elif field == "pinst_list": |
2952 |
val = list(node_to_primary[node.name])
|
2953 |
elif field == "sinst_list": |
2954 |
val = list(node_to_secondary[node.name])
|
2955 |
elif field == "pinst_cnt": |
2956 |
val = len(node_to_primary[node.name])
|
2957 |
elif field == "sinst_cnt": |
2958 |
val = len(node_to_secondary[node.name])
|
2959 |
elif field == "pip": |
2960 |
val = node.primary_ip |
2961 |
elif field == "sip": |
2962 |
val = node.secondary_ip |
2963 |
elif field == "tags": |
2964 |
val = list(node.GetTags())
|
2965 |
elif field == "master": |
2966 |
val = node.name == master_node |
2967 |
elif self._FIELDS_DYNAMIC.Matches(field): |
2968 |
val = live_data[node.name].get(field, None)
|
2969 |
elif field == "role": |
2970 |
if node.name == master_node:
|
2971 |
val = "M"
|
2972 |
elif node.master_candidate:
|
2973 |
val = "C"
|
2974 |
elif node.drained:
|
2975 |
val = "D"
|
2976 |
elif node.offline:
|
2977 |
val = "O"
|
2978 |
else:
|
2979 |
val = "R"
|
2980 |
else:
|
2981 |
raise errors.ParameterError(field)
|
2982 |
node_output.append(val) |
2983 |
output.append(node_output) |
2984 |
|
2985 |
return output
|
2986 |
|
2987 |
|
2988 |
class LUQueryNodeVolumes(NoHooksLU): |
2989 |
"""Logical unit for getting volumes on node(s).
|
2990 |
|
2991 |
"""
|
2992 |
_OP_REQP = ["nodes", "output_fields"] |
2993 |
REQ_BGL = False
|
2994 |
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance") |
2995 |
_FIELDS_STATIC = utils.FieldSet("node")
|
2996 |
|
2997 |
def ExpandNames(self): |
2998 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2999 |
dynamic=self._FIELDS_DYNAMIC,
|
3000 |
selected=self.op.output_fields)
|
3001 |
|
3002 |
self.needed_locks = {}
|
3003 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3004 |
if not self.op.nodes: |
3005 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
3006 |
else:
|
3007 |
self.needed_locks[locking.LEVEL_NODE] = \
|
3008 |
_GetWantedNodes(self, self.op.nodes) |
3009 |
|
3010 |
def CheckPrereq(self): |
3011 |
"""Check prerequisites.
|
3012 |
|
3013 |
This checks that the fields required are valid output fields.
|
3014 |
|
3015 |
"""
|
3016 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
3017 |
|
3018 |
def Exec(self, feedback_fn): |
3019 |
"""Computes the list of nodes and their attributes.
|
3020 |
|
3021 |
"""
|
3022 |
nodenames = self.nodes
|
3023 |
volumes = self.rpc.call_node_volumes(nodenames)
|
3024 |
|
3025 |
ilist = [self.cfg.GetInstanceInfo(iname) for iname |
3026 |
in self.cfg.GetInstanceList()] |
3027 |
|
3028 |
lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist]) |
3029 |
|
3030 |
output = [] |
3031 |
for node in nodenames: |
3032 |
nresult = volumes[node] |
3033 |
if nresult.offline:
|
3034 |
continue
|
3035 |
msg = nresult.fail_msg |
3036 |
if msg:
|
3037 |
self.LogWarning("Can't compute volume data on node %s: %s", node, msg) |
3038 |
continue
|
3039 |
|
3040 |
node_vols = nresult.payload[:] |
3041 |
node_vols.sort(key=lambda vol: vol['dev']) |
3042 |
|
3043 |
for vol in node_vols: |
3044 |
node_output = [] |
3045 |
for field in self.op.output_fields: |
3046 |
if field == "node": |
3047 |
val = node |
3048 |
elif field == "phys": |
3049 |
val = vol['dev']
|
3050 |
elif field == "vg": |
3051 |
val = vol['vg']
|
3052 |
elif field == "name": |
3053 |
val = vol['name']
|
3054 |
elif field == "size": |
3055 |
val = int(float(vol['size'])) |
3056 |
elif field == "instance": |
3057 |
for inst in ilist: |
3058 |
if node not in lv_by_node[inst]: |
3059 |
continue
|
3060 |
if vol['name'] in lv_by_node[inst][node]: |
3061 |
val = inst.name |
3062 |
break
|
3063 |
else:
|
3064 |
val = '-'
|
3065 |
else:
|
3066 |
raise errors.ParameterError(field)
|
3067 |
node_output.append(str(val))
|
3068 |
|
3069 |
output.append(node_output) |
3070 |
|
3071 |
return output
|
3072 |
|
3073 |
|
3074 |
class LUQueryNodeStorage(NoHooksLU): |
3075 |
"""Logical unit for getting information on storage units on node(s).
|
3076 |
|
3077 |
"""
|
3078 |
_OP_REQP = ["nodes", "storage_type", "output_fields"] |
3079 |
REQ_BGL = False
|
3080 |
_FIELDS_STATIC = utils.FieldSet(constants.SF_NODE) |
3081 |
|
3082 |
def ExpandNames(self): |
3083 |
storage_type = self.op.storage_type
|
3084 |
|
3085 |
if storage_type not in constants.VALID_STORAGE_TYPES: |
3086 |
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type, |
3087 |
errors.ECODE_INVAL) |
3088 |
|
3089 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3090 |
dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS), |
3091 |
selected=self.op.output_fields)
|
3092 |
|
3093 |
self.needed_locks = {}
|
3094 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3095 |
|
3096 |
if self.op.nodes: |
3097 |
self.needed_locks[locking.LEVEL_NODE] = \
|
3098 |
_GetWantedNodes(self, self.op.nodes) |
3099 |
else:
|
3100 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
3101 |
|
3102 |
def CheckPrereq(self): |
3103 |
"""Check prerequisites.
|
3104 |
|
3105 |
This checks that the fields required are valid output fields.
|
3106 |
|
3107 |
"""
|
3108 |
self.op.name = getattr(self.op, "name", None) |
3109 |
|
3110 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
3111 |
|
3112 |
def Exec(self, feedback_fn): |
3113 |
"""Computes the list of nodes and their attributes.
|
3114 |
|
3115 |
"""
|
3116 |
# Always get name to sort by
|
3117 |
if constants.SF_NAME in self.op.output_fields: |
3118 |
fields = self.op.output_fields[:]
|
3119 |
else:
|
3120 |
fields = [constants.SF_NAME] + self.op.output_fields
|
3121 |
|
3122 |
# Never ask for node or type as it's only known to the LU
|
3123 |
for extra in [constants.SF_NODE, constants.SF_TYPE]: |
3124 |
while extra in fields: |
3125 |
fields.remove(extra) |
3126 |
|
3127 |
field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)]) |
3128 |
name_idx = field_idx[constants.SF_NAME] |
3129 |
|
3130 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
3131 |
data = self.rpc.call_storage_list(self.nodes, |
3132 |
self.op.storage_type, st_args,
|
3133 |
self.op.name, fields)
|
3134 |
|
3135 |
result = [] |
3136 |
|
3137 |
for node in utils.NiceSort(self.nodes): |
3138 |
nresult = data[node] |
3139 |
if nresult.offline:
|
3140 |
continue
|
3141 |
|
3142 |
msg = nresult.fail_msg |
3143 |
if msg:
|
3144 |
self.LogWarning("Can't get storage data from node %s: %s", node, msg) |
3145 |
continue
|
3146 |
|
3147 |
rows = dict([(row[name_idx], row) for row in nresult.payload]) |
3148 |
|
3149 |
for name in utils.NiceSort(rows.keys()): |
3150 |
row = rows[name] |
3151 |
|
3152 |
out = [] |
3153 |
|
3154 |
for field in self.op.output_fields: |
3155 |
if field == constants.SF_NODE:
|
3156 |
val = node |
3157 |
elif field == constants.SF_TYPE:
|
3158 |
val = self.op.storage_type
|
3159 |
elif field in field_idx: |
3160 |
val = row[field_idx[field]] |
3161 |
else:
|
3162 |
raise errors.ParameterError(field)
|
3163 |
|
3164 |
out.append(val) |
3165 |
|
3166 |
result.append(out) |
3167 |
|
3168 |
return result
|
3169 |
|
3170 |
|
3171 |
class LUModifyNodeStorage(NoHooksLU): |
3172 |
"""Logical unit for modifying a storage volume on a node.
|
3173 |
|
3174 |
"""
|
3175 |
_OP_REQP = ["node_name", "storage_type", "name", "changes"] |
3176 |
REQ_BGL = False
|
3177 |
|
3178 |
def CheckArguments(self): |
3179 |
self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3180 |
|
3181 |
storage_type = self.op.storage_type
|
3182 |
if storage_type not in constants.VALID_STORAGE_TYPES: |
3183 |
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type, |
3184 |
errors.ECODE_INVAL) |
3185 |
|
3186 |
def ExpandNames(self): |
3187 |
self.needed_locks = {
|
3188 |
locking.LEVEL_NODE: self.op.node_name,
|
3189 |
} |
3190 |
|
3191 |
def CheckPrereq(self): |
3192 |
"""Check prerequisites.
|
3193 |
|
3194 |
"""
|
3195 |
storage_type = self.op.storage_type
|
3196 |
|
3197 |
try:
|
3198 |
modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type] |
3199 |
except KeyError: |
3200 |
raise errors.OpPrereqError("Storage units of type '%s' can not be" |
3201 |
" modified" % storage_type,
|
3202 |
errors.ECODE_INVAL) |
3203 |
|
3204 |
diff = set(self.op.changes.keys()) - modifiable |
3205 |
if diff:
|
3206 |
raise errors.OpPrereqError("The following fields can not be modified for" |
3207 |
" storage units of type '%s': %r" %
|
3208 |
(storage_type, list(diff)),
|
3209 |
errors.ECODE_INVAL) |
3210 |
|
3211 |
def Exec(self, feedback_fn): |
3212 |
"""Computes the list of nodes and their attributes.
|
3213 |
|
3214 |
"""
|
3215 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
3216 |
result = self.rpc.call_storage_modify(self.op.node_name, |
3217 |
self.op.storage_type, st_args,
|
3218 |
self.op.name, self.op.changes) |
3219 |
result.Raise("Failed to modify storage unit '%s' on %s" %
|
3220 |
(self.op.name, self.op.node_name)) |
3221 |
|
3222 |
|
3223 |
class LUAddNode(LogicalUnit): |
3224 |
"""Logical unit for adding node to the cluster.
|
3225 |
|
3226 |
"""
|
3227 |
HPATH = "node-add"
|
3228 |
HTYPE = constants.HTYPE_NODE |
3229 |
_OP_REQP = ["node_name"]
|
3230 |
|
3231 |
def CheckArguments(self): |
3232 |
# validate/normalize the node name
|
3233 |
self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name) |
3234 |
|
3235 |
def BuildHooksEnv(self): |
3236 |
"""Build hooks env.
|
3237 |
|
3238 |
This will run on all nodes before, and on all nodes + the new node after.
|
3239 |
|
3240 |
"""
|
3241 |
env = { |
3242 |
"OP_TARGET": self.op.node_name, |
3243 |
"NODE_NAME": self.op.node_name, |
3244 |
"NODE_PIP": self.op.primary_ip, |
3245 |
"NODE_SIP": self.op.secondary_ip, |
3246 |
} |
3247 |
nodes_0 = self.cfg.GetNodeList()
|
3248 |
nodes_1 = nodes_0 + [self.op.node_name, ]
|
3249 |
return env, nodes_0, nodes_1
|
3250 |
|
3251 |
def CheckPrereq(self): |
3252 |
"""Check prerequisites.
|
3253 |
|
3254 |
This checks:
|
3255 |
- the new node is not already in the config
|
3256 |
- it is resolvable
|
3257 |
- its parameters (single/dual homed) matches the cluster
|
3258 |
|
3259 |
Any errors are signaled by raising errors.OpPrereqError.
|
3260 |
|
3261 |
"""
|
3262 |
node_name = self.op.node_name
|
3263 |
cfg = self.cfg
|
3264 |
|
3265 |
dns_data = utils.GetHostInfo(node_name) |
3266 |
|
3267 |
node = dns_data.name |
3268 |
primary_ip = self.op.primary_ip = dns_data.ip
|
3269 |
secondary_ip = getattr(self.op, "secondary_ip", None) |
3270 |
if secondary_ip is None: |
3271 |
secondary_ip = primary_ip |
3272 |
if not utils.IsValidIP(secondary_ip): |
3273 |
raise errors.OpPrereqError("Invalid secondary IP given", |
3274 |
errors.ECODE_INVAL) |
3275 |
self.op.secondary_ip = secondary_ip
|
3276 |
|
3277 |
node_list = cfg.GetNodeList() |
3278 |
if not self.op.readd and node in node_list: |
3279 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
3280 |
node, errors.ECODE_EXISTS) |
3281 |
elif self.op.readd and node not in node_list: |
3282 |
raise errors.OpPrereqError("Node %s is not in the configuration" % node, |
3283 |
errors.ECODE_NOENT) |
3284 |
|
3285 |
for existing_node_name in node_list: |
3286 |
existing_node = cfg.GetNodeInfo(existing_node_name) |
3287 |
|
3288 |
if self.op.readd and node == existing_node_name: |
3289 |
if (existing_node.primary_ip != primary_ip or |
3290 |
existing_node.secondary_ip != secondary_ip): |
3291 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
3292 |
" address configuration as before",
|
3293 |
errors.ECODE_INVAL) |
3294 |
continue
|
3295 |
|
3296 |
if (existing_node.primary_ip == primary_ip or |
3297 |
existing_node.secondary_ip == primary_ip or
|
3298 |
existing_node.primary_ip == secondary_ip or
|
3299 |
existing_node.secondary_ip == secondary_ip): |
3300 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
3301 |
" existing node %s" % existing_node.name,
|
3302 |
errors.ECODE_NOTUNIQUE) |
3303 |
|
3304 |
# check that the type of the node (single versus dual homed) is the
|
3305 |
# same as for the master
|
3306 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
3307 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
3308 |
newbie_singlehomed = secondary_ip == primary_ip |
3309 |
if master_singlehomed != newbie_singlehomed:
|
3310 |
if master_singlehomed:
|
3311 |
raise errors.OpPrereqError("The master has no private ip but the" |
3312 |
" new node has one",
|
3313 |
errors.ECODE_INVAL) |
3314 |
else:
|
3315 |
raise errors.OpPrereqError("The master has a private ip but the" |
3316 |
" new node doesn't have one",
|
3317 |
errors.ECODE_INVAL) |
3318 |
|
3319 |
# checks reachability
|
3320 |
if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
3321 |
raise errors.OpPrereqError("Node not reachable by ping", |
3322 |
errors.ECODE_ENVIRON) |
3323 |
|
3324 |
if not newbie_singlehomed: |
3325 |
# check reachability from my secondary ip to newbie's secondary ip
|
3326 |
if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
3327 |
source=myself.secondary_ip): |
3328 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
3329 |
" based ping to noded port",
|
3330 |
errors.ECODE_ENVIRON) |
3331 |
|
3332 |
if self.op.readd: |
3333 |
exceptions = [node] |
3334 |
else:
|
3335 |
exceptions = [] |
3336 |
|
3337 |
self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions) |
3338 |
|
3339 |
if self.op.readd: |
3340 |
self.new_node = self.cfg.GetNodeInfo(node) |
3341 |
assert self.new_node is not None, "Can't retrieve locked node %s" % node |
3342 |
else:
|
3343 |
self.new_node = objects.Node(name=node,
|
3344 |
primary_ip=primary_ip, |
3345 |
secondary_ip=secondary_ip, |
3346 |
master_candidate=self.master_candidate,
|
3347 |
offline=False, drained=False) |
3348 |
|
3349 |
def Exec(self, feedback_fn): |
3350 |
"""Adds the new node to the cluster.
|
3351 |
|
3352 |
"""
|
3353 |
new_node = self.new_node
|
3354 |
node = new_node.name |
3355 |
|
3356 |
# for re-adds, reset the offline/drained/master-candidate flags;
|
3357 |
# we need to reset here, otherwise offline would prevent RPC calls
|
3358 |
# later in the procedure; this also means that if the re-add
|
3359 |
# fails, we are left with a non-offlined, broken node
|
3360 |
if self.op.readd: |
3361 |
new_node.drained = new_node.offline = False # pylint: disable-msg=W0201 |
3362 |
self.LogInfo("Readding a node, the offline/drained flags were reset") |
3363 |
# if we demote the node, we do cleanup later in the procedure
|
3364 |
new_node.master_candidate = self.master_candidate
|
3365 |
|
3366 |
# notify the user about any possible mc promotion
|
3367 |
if new_node.master_candidate:
|
3368 |
self.LogInfo("Node will be a master candidate") |
3369 |
|
3370 |
# check connectivity
|
3371 |
result = self.rpc.call_version([node])[node]
|
3372 |
result.Raise("Can't get version information from node %s" % node)
|
3373 |
if constants.PROTOCOL_VERSION == result.payload:
|
3374 |
logging.info("Communication to node %s fine, sw version %s match",
|
3375 |
node, result.payload) |
3376 |
else:
|
3377 |
raise errors.OpExecError("Version mismatch master version %s," |
3378 |
" node version %s" %
|
3379 |
(constants.PROTOCOL_VERSION, result.payload)) |
3380 |
|
3381 |
# setup ssh on node
|
3382 |
if self.cfg.GetClusterInfo().modify_ssh_setup: |
3383 |
logging.info("Copy ssh key to node %s", node)
|
3384 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
3385 |
keyarray = [] |
3386 |
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, |
3387 |
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, |
3388 |
priv_key, pub_key] |
3389 |
|
3390 |
for i in keyfiles: |
3391 |
keyarray.append(utils.ReadFile(i)) |
3392 |
|
3393 |
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1], |
3394 |
keyarray[2], keyarray[3], keyarray[4], |
3395 |
keyarray[5])
|
3396 |
result.Raise("Cannot transfer ssh keys to the new node")
|
3397 |
|
3398 |
# Add node to our /etc/hosts, and add key to known_hosts
|
3399 |
if self.cfg.GetClusterInfo().modify_etc_hosts: |
3400 |
utils.AddHostToEtcHosts(new_node.name) |
3401 |
|
3402 |
if new_node.secondary_ip != new_node.primary_ip:
|
3403 |
result = self.rpc.call_node_has_ip_address(new_node.name,
|
3404 |
new_node.secondary_ip) |
3405 |
result.Raise("Failure checking secondary ip on node %s" % new_node.name,
|
3406 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
3407 |
if not result.payload: |
3408 |
raise errors.OpExecError("Node claims it doesn't have the secondary ip" |
3409 |
" you gave (%s). Please fix and re-run this"
|
3410 |
" command." % new_node.secondary_ip)
|
3411 |
|
3412 |
node_verify_list = [self.cfg.GetMasterNode()]
|
3413 |
node_verify_param = { |
3414 |
constants.NV_NODELIST: [node], |
3415 |
# TODO: do a node-net-test as well?
|
3416 |
} |
3417 |
|
3418 |
result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
|
3419 |
self.cfg.GetClusterName())
|
3420 |
for verifier in node_verify_list: |
3421 |
result[verifier].Raise("Cannot communicate with node %s" % verifier)
|
3422 |
nl_payload = result[verifier].payload[constants.NV_NODELIST] |
3423 |
if nl_payload:
|
3424 |
for failed in nl_payload: |
3425 |
feedback_fn("ssh/hostname verification failed"
|
3426 |
" (checking from %s): %s" %
|
3427 |
(verifier, nl_payload[failed])) |
3428 |
raise errors.OpExecError("ssh/hostname verification failed.") |
3429 |
|
3430 |
if self.op.readd: |
3431 |
_RedistributeAncillaryFiles(self)
|
3432 |
self.context.ReaddNode(new_node)
|
3433 |
# make sure we redistribute the config
|
3434 |
self.cfg.Update(new_node, feedback_fn)
|
3435 |
# and make sure the new node will not have old files around
|
3436 |
if not new_node.master_candidate: |
3437 |
result = self.rpc.call_node_demote_from_mc(new_node.name)
|
3438 |
msg = result.fail_msg |
3439 |
if msg:
|
3440 |
self.LogWarning("Node failed to demote itself from master" |
3441 |
" candidate status: %s" % msg)
|
3442 |
else:
|
3443 |
_RedistributeAncillaryFiles(self, additional_nodes=[node])
|
3444 |
self.context.AddNode(new_node, self.proc.GetECId()) |
3445 |
|
3446 |
|
3447 |
class LUSetNodeParams(LogicalUnit): |
3448 |
"""Modifies the parameters of a node.
|
3449 |
|
3450 |
"""
|
3451 |
HPATH = "node-modify"
|
3452 |
HTYPE = constants.HTYPE_NODE |
3453 |
_OP_REQP = ["node_name"]
|
3454 |
REQ_BGL = False
|
3455 |
|
3456 |
def CheckArguments(self): |
3457 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3458 |
_CheckBooleanOpField(self.op, 'master_candidate') |
3459 |
_CheckBooleanOpField(self.op, 'offline') |
3460 |
_CheckBooleanOpField(self.op, 'drained') |
3461 |
_CheckBooleanOpField(self.op, 'auto_promote') |
3462 |
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] |
3463 |
if all_mods.count(None) == 3: |
3464 |
raise errors.OpPrereqError("Please pass at least one modification", |
3465 |
errors.ECODE_INVAL) |
3466 |
if all_mods.count(True) > 1: |
3467 |
raise errors.OpPrereqError("Can't set the node into more than one" |
3468 |
" state at the same time",
|
3469 |
errors.ECODE_INVAL) |
3470 |
|
3471 |
# Boolean value that tells us whether we're offlining or draining the node
|
3472 |
self.offline_or_drain = (self.op.offline == True or |
3473 |
self.op.drained == True) |
3474 |
self.deoffline_or_drain = (self.op.offline == False or |
3475 |
self.op.drained == False) |
3476 |
self.might_demote = (self.op.master_candidate == False or |
3477 |
self.offline_or_drain)
|
3478 |
|
3479 |
self.lock_all = self.op.auto_promote and self.might_demote |
3480 |
|
3481 |
|
3482 |
def ExpandNames(self): |
3483 |
if self.lock_all: |
3484 |
self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
|
3485 |
else:
|
3486 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
3487 |
|
3488 |
def BuildHooksEnv(self): |
3489 |
"""Build hooks env.
|
3490 |
|
3491 |
This runs on the master node.
|
3492 |
|
3493 |
"""
|
3494 |
env = { |
3495 |
"OP_TARGET": self.op.node_name, |
3496 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
3497 |
"OFFLINE": str(self.op.offline), |
3498 |
"DRAINED": str(self.op.drained), |
3499 |
} |
3500 |
nl = [self.cfg.GetMasterNode(),
|
3501 |
self.op.node_name]
|
3502 |
return env, nl, nl
|
3503 |
|
3504 |
def CheckPrereq(self): |
3505 |
"""Check prerequisites.
|
3506 |
|
3507 |
This only checks the instance list against the existing names.
|
3508 |
|
3509 |
"""
|
3510 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
3511 |
|
3512 |
if (self.op.master_candidate is not None or |
3513 |
self.op.drained is not None or |
3514 |
self.op.offline is not None): |
3515 |
# we can't change the master's node flags
|
3516 |
if self.op.node_name == self.cfg.GetMasterNode(): |
3517 |
raise errors.OpPrereqError("The master role can be changed" |
3518 |
" only via masterfailover",
|
3519 |
errors.ECODE_INVAL) |
3520 |
|
3521 |
|
3522 |
if node.master_candidate and self.might_demote and not self.lock_all: |
3523 |
assert not self.op.auto_promote, "auto-promote set but lock_all not" |
3524 |
# check if after removing the current node, we're missing master
|
3525 |
# candidates
|
3526 |
(mc_remaining, mc_should, _) = \ |
3527 |
self.cfg.GetMasterCandidateStats(exceptions=[node.name])
|
3528 |
if mc_remaining < mc_should:
|
3529 |
raise errors.OpPrereqError("Not enough master candidates, please" |
3530 |
" pass auto_promote to allow promotion",
|
3531 |
errors.ECODE_INVAL) |
3532 |
|
3533 |
if (self.op.master_candidate == True and |
3534 |
((node.offline and not self.op.offline == False) or |
3535 |
(node.drained and not self.op.drained == False))): |
3536 |
raise errors.OpPrereqError("Node '%s' is offline or drained, can't set" |
3537 |
" to master_candidate" % node.name,
|
3538 |
errors.ECODE_INVAL) |
3539 |
|
3540 |
# If we're being deofflined/drained, we'll MC ourself if needed
|
3541 |
if (self.deoffline_or_drain and not self.offline_or_drain and not |
3542 |
self.op.master_candidate == True and not node.master_candidate): |
3543 |
self.op.master_candidate = _DecideSelfPromotion(self) |
3544 |
if self.op.master_candidate: |
3545 |
self.LogInfo("Autopromoting node to master candidate") |
3546 |
|
3547 |
return
|
3548 |
|
3549 |
def Exec(self, feedback_fn): |
3550 |
"""Modifies a node.
|
3551 |
|
3552 |
"""
|
3553 |
node = self.node
|
3554 |
|
3555 |
result = [] |
3556 |
changed_mc = False
|
3557 |
|
3558 |
if self.op.offline is not None: |
3559 |
node.offline = self.op.offline
|
3560 |
result.append(("offline", str(self.op.offline))) |
3561 |
if self.op.offline == True: |
3562 |
if node.master_candidate:
|
3563 |
node.master_candidate = False
|
3564 |
changed_mc = True
|
3565 |
result.append(("master_candidate", "auto-demotion due to offline")) |
3566 |
if node.drained:
|
3567 |
node.drained = False
|
3568 |
result.append(("drained", "clear drained status due to offline")) |
3569 |
|
3570 |
if self.op.master_candidate is not None: |
3571 |
node.master_candidate = self.op.master_candidate
|
3572 |
changed_mc = True
|
3573 |
result.append(("master_candidate", str(self.op.master_candidate))) |
3574 |
if self.op.master_candidate == False: |
3575 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
3576 |
msg = rrc.fail_msg |
3577 |
if msg:
|
3578 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
3579 |
|
3580 |
if self.op.drained is not None: |
3581 |
node.drained = self.op.drained
|
3582 |
result.append(("drained", str(self.op.drained))) |
3583 |
if self.op.drained == True: |
3584 |
if node.master_candidate:
|
3585 |
node.master_candidate = False
|
3586 |
changed_mc = True
|
3587 |
result.append(("master_candidate", "auto-demotion due to drain")) |
3588 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
3589 |
msg = rrc.fail_msg |
3590 |
if msg:
|
3591 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
3592 |
if node.offline:
|
3593 |
node.offline = False
|
3594 |
result.append(("offline", "clear offline status due to drain")) |
3595 |
|
3596 |
# we locked all nodes, we adjust the CP before updating this node
|
3597 |
if self.lock_all: |
3598 |
_AdjustCandidatePool(self, [node.name])
|
3599 |
|
3600 |
# this will trigger configuration file update, if needed
|
3601 |
self.cfg.Update(node, feedback_fn)
|
3602 |
|
3603 |
# this will trigger job queue propagation or cleanup
|
3604 |
if changed_mc:
|
3605 |
self.context.ReaddNode(node)
|
3606 |
|
3607 |
return result
|
3608 |
|
3609 |
|
3610 |
class LUPowercycleNode(NoHooksLU): |
3611 |
"""Powercycles a node.
|
3612 |
|
3613 |
"""
|
3614 |
_OP_REQP = ["node_name", "force"] |
3615 |
REQ_BGL = False
|
3616 |
|
3617 |
def CheckArguments(self): |
3618 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3619 |
if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force: |
3620 |
raise errors.OpPrereqError("The node is the master and the force" |
3621 |
" parameter was not set",
|
3622 |
errors.ECODE_INVAL) |
3623 |
|
3624 |
def ExpandNames(self): |
3625 |
"""Locking for PowercycleNode.
|
3626 |
|
3627 |
This is a last-resort option and shouldn't block on other
|
3628 |
jobs. Therefore, we grab no locks.
|
3629 |
|
3630 |
"""
|
3631 |
self.needed_locks = {}
|
3632 |
|
3633 |
def CheckPrereq(self): |
3634 |
"""Check prerequisites.
|
3635 |
|
3636 |
This LU has no prereqs.
|
3637 |
|
3638 |
"""
|
3639 |
pass
|
3640 |
|
3641 |
def Exec(self, feedback_fn): |
3642 |
"""Reboots a node.
|
3643 |
|
3644 |
"""
|
3645 |
result = self.rpc.call_node_powercycle(self.op.node_name, |
3646 |
self.cfg.GetHypervisorType())
|
3647 |
result.Raise("Failed to schedule the reboot")
|
3648 |
return result.payload
|
3649 |
|
3650 |
|
3651 |
class LUQueryClusterInfo(NoHooksLU): |
3652 |
"""Query cluster configuration.
|
3653 |
|
3654 |
"""
|
3655 |
_OP_REQP = [] |
3656 |
REQ_BGL = False
|
3657 |
|
3658 |
def ExpandNames(self): |
3659 |
self.needed_locks = {}
|
3660 |
|
3661 |
def CheckPrereq(self): |
3662 |
"""No prerequsites needed for this LU.
|
3663 |
|
3664 |
"""
|
3665 |
pass
|
3666 |
|
3667 |
def Exec(self, feedback_fn): |
3668 |
"""Return cluster config.
|
3669 |
|
3670 |
"""
|
3671 |
cluster = self.cfg.GetClusterInfo()
|
3672 |
os_hvp = {} |
3673 |
|
3674 |
# Filter just for enabled hypervisors
|
3675 |
for os_name, hv_dict in cluster.os_hvp.items(): |
3676 |
os_hvp[os_name] = {} |
3677 |
for hv_name, hv_params in hv_dict.items(): |
3678 |
if hv_name in cluster.enabled_hypervisors: |
3679 |
os_hvp[os_name][hv_name] = hv_params |
3680 |
|
3681 |
result = { |
3682 |
"software_version": constants.RELEASE_VERSION,
|
3683 |
"protocol_version": constants.PROTOCOL_VERSION,
|
3684 |
"config_version": constants.CONFIG_VERSION,
|
3685 |
"os_api_version": max(constants.OS_API_VERSIONS), |
3686 |
"export_version": constants.EXPORT_VERSION,
|
3687 |
"architecture": (platform.architecture()[0], platform.machine()), |
3688 |
"name": cluster.cluster_name,
|
3689 |
"master": cluster.master_node,
|
3690 |
"default_hypervisor": cluster.enabled_hypervisors[0], |
3691 |
"enabled_hypervisors": cluster.enabled_hypervisors,
|
3692 |
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name]) |
3693 |
for hypervisor_name in cluster.enabled_hypervisors]), |
3694 |
"os_hvp": os_hvp,
|
3695 |
"beparams": cluster.beparams,
|
3696 |
"nicparams": cluster.nicparams,
|
3697 |
"candidate_pool_size": cluster.candidate_pool_size,
|
3698 |
"master_netdev": cluster.master_netdev,
|
3699 |
"volume_group_name": cluster.volume_group_name,
|
3700 |
"file_storage_dir": cluster.file_storage_dir,
|
3701 |
"maintain_node_health": cluster.maintain_node_health,
|
3702 |
"ctime": cluster.ctime,
|
3703 |
"mtime": cluster.mtime,
|
3704 |
"uuid": cluster.uuid,
|
3705 |
"tags": list(cluster.GetTags()), |
3706 |
} |
3707 |
|
3708 |
return result
|
3709 |
|
3710 |
|
3711 |
class LUQueryConfigValues(NoHooksLU): |
3712 |
"""Return configuration values.
|
3713 |
|
3714 |
"""
|
3715 |
_OP_REQP = [] |
3716 |
REQ_BGL = False
|
3717 |
_FIELDS_DYNAMIC = utils.FieldSet() |
3718 |
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag", |
3719 |
"watcher_pause")
|
3720 |
|
3721 |
def ExpandNames(self): |
3722 |
self.needed_locks = {}
|
3723 |
|
3724 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3725 |
dynamic=self._FIELDS_DYNAMIC,
|
3726 |
selected=self.op.output_fields)
|
3727 |
|
3728 |
def CheckPrereq(self): |
3729 |
"""No prerequisites.
|
3730 |
|
3731 |
"""
|
3732 |
pass
|
3733 |
|
3734 |
def Exec(self, feedback_fn): |
3735 |
"""Dump a representation of the cluster config to the standard output.
|
3736 |
|
3737 |
"""
|
3738 |
values = [] |
3739 |
for field in self.op.output_fields: |
3740 |
if field == "cluster_name": |
3741 |
entry = self.cfg.GetClusterName()
|
3742 |
elif field == "master_node": |
3743 |
entry = self.cfg.GetMasterNode()
|
3744 |
elif field == "drain_flag": |
3745 |
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE) |
3746 |
elif field == "watcher_pause": |
3747 |
entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE) |
3748 |
else:
|
3749 |
raise errors.ParameterError(field)
|
3750 |
values.append(entry) |
3751 |
return values
|
3752 |
|
3753 |
|
3754 |
class LUActivateInstanceDisks(NoHooksLU): |
3755 |
"""Bring up an instance's disks.
|
3756 |
|
3757 |
"""
|
3758 |
_OP_REQP = ["instance_name"]
|
3759 |
REQ_BGL = False
|
3760 |
|
3761 |
def ExpandNames(self): |
3762 |
self._ExpandAndLockInstance()
|
3763 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3764 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3765 |
|
3766 |
def DeclareLocks(self, level): |
3767 |
if level == locking.LEVEL_NODE:
|
3768 |
self._LockInstancesNodes()
|
3769 |
|
3770 |
def CheckPrereq(self): |
3771 |
"""Check prerequisites.
|
3772 |
|
3773 |
This checks that the instance is in the cluster.
|
3774 |
|
3775 |
"""
|
3776 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3777 |
assert self.instance is not None, \ |
3778 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3779 |
_CheckNodeOnline(self, self.instance.primary_node) |
3780 |
if not hasattr(self.op, "ignore_size"): |
3781 |
self.op.ignore_size = False |
3782 |
|
3783 |
def Exec(self, feedback_fn): |
3784 |
"""Activate the disks.
|
3785 |
|
3786 |
"""
|
3787 |
disks_ok, disks_info = \ |
3788 |
_AssembleInstanceDisks(self, self.instance, |
3789 |
ignore_size=self.op.ignore_size)
|
3790 |
if not disks_ok: |
3791 |
raise errors.OpExecError("Cannot activate block devices") |
3792 |
|
3793 |
return disks_info
|
3794 |
|
3795 |
|
3796 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False, |
3797 |
ignore_size=False):
|
3798 |
"""Prepare the block devices for an instance.
|
3799 |
|
3800 |
This sets up the block devices on all nodes.
|
3801 |
|
3802 |
@type lu: L{LogicalUnit}
|
3803 |
@param lu: the logical unit on whose behalf we execute
|
3804 |
@type instance: L{objects.Instance}
|
3805 |
@param instance: the instance for whose disks we assemble
|
3806 |
@type ignore_secondaries: boolean
|
3807 |
@param ignore_secondaries: if true, errors on secondary nodes
|
3808 |
won't result in an error return from the function
|
3809 |
@type ignore_size: boolean
|
3810 |
@param ignore_size: if true, the current known size of the disk
|
3811 |
will not be used during the disk activation, useful for cases
|
3812 |
when the size is wrong
|
3813 |
@return: False if the operation failed, otherwise a list of
|
3814 |
(host, instance_visible_name, node_visible_name)
|
3815 |
with the mapping from node devices to instance devices
|
3816 |
|
3817 |
"""
|
3818 |
device_info = [] |
3819 |
disks_ok = True
|
3820 |
iname = instance.name |
3821 |
# With the two passes mechanism we try to reduce the window of
|
3822 |
# opportunity for the race condition of switching DRBD to primary
|
3823 |
# before handshaking occured, but we do not eliminate it
|
3824 |
|
3825 |
# The proper fix would be to wait (with some limits) until the
|
3826 |
# connection has been made and drbd transitions from WFConnection
|
3827 |
# into any other network-connected state (Connected, SyncTarget,
|
3828 |
# SyncSource, etc.)
|
3829 |
|
3830 |
# 1st pass, assemble on all nodes in secondary mode
|
3831 |
for inst_disk in instance.disks: |
3832 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
3833 |
if ignore_size:
|
3834 |
node_disk = node_disk.Copy() |
3835 |
node_disk.UnsetSize() |
3836 |
lu.cfg.SetDiskID(node_disk, node) |
3837 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
|
3838 |
msg = result.fail_msg |
3839 |
if msg:
|
3840 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
3841 |
" (is_primary=False, pass=1): %s",
|
3842 |
inst_disk.iv_name, node, msg) |
3843 |
if not ignore_secondaries: |
3844 |
disks_ok = False
|
3845 |
|
3846 |
# FIXME: race condition on drbd migration to primary
|
3847 |
|
3848 |
# 2nd pass, do only the primary node
|
3849 |
for inst_disk in instance.disks: |
3850 |
dev_path = None
|
3851 |
|
3852 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
3853 |
if node != instance.primary_node:
|
3854 |
continue
|
3855 |
if ignore_size:
|
3856 |
node_disk = node_disk.Copy() |
3857 |
node_disk.UnsetSize() |
3858 |
lu.cfg.SetDiskID(node_disk, node) |
3859 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
|
3860 |
msg = result.fail_msg |
3861 |
if msg:
|
3862 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
3863 |
" (is_primary=True, pass=2): %s",
|
3864 |
inst_disk.iv_name, node, msg) |
3865 |
disks_ok = False
|
3866 |
else:
|
3867 |
dev_path = result.payload |
3868 |
|
3869 |
device_info.append((instance.primary_node, inst_disk.iv_name, dev_path)) |
3870 |
|
3871 |
# leave the disks configured for the primary node
|
3872 |
# this is a workaround that would be fixed better by
|
3873 |
# improving the logical/physical id handling
|
3874 |
for disk in instance.disks: |
3875 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
3876 |
|
3877 |
return disks_ok, device_info
|
3878 |
|
3879 |
|
3880 |
def _StartInstanceDisks(lu, instance, force): |
3881 |
"""Start the disks of an instance.
|
3882 |
|
3883 |
"""
|
3884 |
disks_ok, _ = _AssembleInstanceDisks(lu, instance, |
3885 |
ignore_secondaries=force) |
3886 |
if not disks_ok: |
3887 |
_ShutdownInstanceDisks(lu, instance) |
3888 |
if force is not None and not force: |
3889 |
lu.proc.LogWarning("", hint="If the message above refers to a" |
3890 |
" secondary node,"
|
3891 |
" you can retry the operation using '--force'.")
|
3892 |
raise errors.OpExecError("Disk consistency error") |
3893 |
|
3894 |
|
3895 |
class LUDeactivateInstanceDisks(NoHooksLU): |
3896 |
"""Shutdown an instance's disks.
|
3897 |
|
3898 |
"""
|
3899 |
_OP_REQP = ["instance_name"]
|
3900 |
REQ_BGL = False
|
3901 |
|
3902 |
def ExpandNames(self): |
3903 |
self._ExpandAndLockInstance()
|
3904 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3905 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3906 |
|
3907 |
def DeclareLocks(self, level): |
3908 |
if level == locking.LEVEL_NODE:
|
3909 |
self._LockInstancesNodes()
|
3910 |
|
3911 |
def CheckPrereq(self): |
3912 |
"""Check prerequisites.
|
3913 |
|
3914 |
This checks that the instance is in the cluster.
|
3915 |
|
3916 |
"""
|
3917 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3918 |
assert self.instance is not None, \ |
3919 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3920 |
|
3921 |
def Exec(self, feedback_fn): |
3922 |
"""Deactivate the disks
|
3923 |
|
3924 |
"""
|
3925 |
instance = self.instance
|
3926 |
_SafeShutdownInstanceDisks(self, instance)
|
3927 |
|
3928 |
|
3929 |
def _SafeShutdownInstanceDisks(lu, instance): |
3930 |
"""Shutdown block devices of an instance.
|
3931 |
|
3932 |
This function checks if an instance is running, before calling
|
3933 |
_ShutdownInstanceDisks.
|
3934 |
|
3935 |
"""
|
3936 |
_CheckInstanceDown(lu, instance, "cannot shutdown disks")
|
3937 |
_ShutdownInstanceDisks(lu, instance) |
3938 |
|
3939 |
|
3940 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): |
3941 |
"""Shutdown block devices of an instance.
|
3942 |
|
3943 |
This does the shutdown on all nodes of the instance.
|
3944 |
|
3945 |
If the ignore_primary is false, errors on the primary node are
|
3946 |
ignored.
|
3947 |
|
3948 |
"""
|
3949 |
all_result = True
|
3950 |
for disk in instance.disks: |
3951 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
3952 |
lu.cfg.SetDiskID(top_disk, node) |
3953 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
3954 |
msg = result.fail_msg |
3955 |
if msg:
|
3956 |
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
|
3957 |
disk.iv_name, node, msg) |
3958 |
if not ignore_primary or node != instance.primary_node: |
3959 |
all_result = False
|
3960 |
return all_result
|
3961 |
|
3962 |
|
3963 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): |
3964 |
"""Checks if a node has enough free memory.
|
3965 |
|
3966 |
This function check if a given node has the needed amount of free
|
3967 |
memory. In case the node has less memory or we cannot get the
|
3968 |
information from the node, this function raise an OpPrereqError
|
3969 |
exception.
|
3970 |
|
3971 |
@type lu: C{LogicalUnit}
|
3972 |
@param lu: a logical unit from which we get configuration data
|
3973 |
@type node: C{str}
|
3974 |
@param node: the node to check
|
3975 |
@type reason: C{str}
|
3976 |
@param reason: string to use in the error message
|
3977 |
@type requested: C{int}
|
3978 |
@param requested: the amount of memory in MiB to check for
|
3979 |
@type hypervisor_name: C{str}
|
3980 |
@param hypervisor_name: the hypervisor to ask for memory stats
|
3981 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
3982 |
we cannot check the node
|
3983 |
|
3984 |
"""
|
3985 |
nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name) |
3986 |
nodeinfo[node].Raise("Can't get data from node %s" % node,
|
3987 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
3988 |
free_mem = nodeinfo[node].payload.get('memory_free', None) |
3989 |
if not isinstance(free_mem, int): |
3990 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
3991 |
" was '%s'" % (node, free_mem),
|
3992 |
errors.ECODE_ENVIRON) |
3993 |
if requested > free_mem:
|
3994 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
3995 |
" needed %s MiB, available %s MiB" %
|
3996 |
(node, reason, requested, free_mem), |
3997 |
errors.ECODE_NORES) |
3998 |
|
3999 |
|
4000 |
def _CheckNodesFreeDisk(lu, nodenames, requested): |
4001 |
"""Checks if nodes have enough free disk space in the default VG.
|
4002 |
|
4003 |
This function check if all given nodes have the needed amount of
|
4004 |
free disk. In case any node has less disk or we cannot get the
|
4005 |
information from the node, this function raise an OpPrereqError
|
4006 |
exception.
|
4007 |
|
4008 |
@type lu: C{LogicalUnit}
|
4009 |
@param lu: a logical unit from which we get configuration data
|
4010 |
@type nodenames: C{list}
|
4011 |
@param nodenames: the list of node names to check
|
4012 |
@type requested: C{int}
|
4013 |
@param requested: the amount of disk in MiB to check for
|
4014 |
@raise errors.OpPrereqError: if the node doesn't have enough disk, or
|
4015 |
we cannot check the node
|
4016 |
|
4017 |
"""
|
4018 |
nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(), |
4019 |
lu.cfg.GetHypervisorType()) |
4020 |
for node in nodenames: |
4021 |
info = nodeinfo[node] |
4022 |
info.Raise("Cannot get current information from node %s" % node,
|
4023 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
4024 |
vg_free = info.payload.get("vg_free", None) |
4025 |
if not isinstance(vg_free, int): |
4026 |
raise errors.OpPrereqError("Can't compute free disk space on node %s," |
4027 |
" result was '%s'" % (node, vg_free),
|
4028 |
errors.ECODE_ENVIRON) |