root / lib / cmdlib.py @ 417eabe2
History | View | Annotate | Download (332.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable-msg=W0201
|
25 |
|
26 |
# W0201 since most LU attributes are defined in CheckPrereq or similar
|
27 |
# functions
|
28 |
|
29 |
import os |
30 |
import os.path |
31 |
import time |
32 |
import re |
33 |
import platform |
34 |
import logging |
35 |
import copy |
36 |
import OpenSSL |
37 |
|
38 |
from ganeti import ssh |
39 |
from ganeti import utils |
40 |
from ganeti import errors |
41 |
from ganeti import hypervisor |
42 |
from ganeti import locking |
43 |
from ganeti import constants |
44 |
from ganeti import objects |
45 |
from ganeti import serializer |
46 |
from ganeti import ssconf |
47 |
|
48 |
|
49 |
class LogicalUnit(object): |
50 |
"""Logical Unit base class.
|
51 |
|
52 |
Subclasses must follow these rules:
|
53 |
- implement ExpandNames
|
54 |
- implement CheckPrereq (except when tasklets are used)
|
55 |
- implement Exec (except when tasklets are used)
|
56 |
- implement BuildHooksEnv
|
57 |
- redefine HPATH and HTYPE
|
58 |
- optionally redefine their run requirements:
|
59 |
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
60 |
|
61 |
Note that all commands require root permissions.
|
62 |
|
63 |
@ivar dry_run_result: the value (if any) that will be returned to the caller
|
64 |
in dry-run mode (signalled by opcode dry_run parameter)
|
65 |
|
66 |
"""
|
67 |
HPATH = None
|
68 |
HTYPE = None
|
69 |
_OP_REQP = [] |
70 |
REQ_BGL = True
|
71 |
|
72 |
def __init__(self, processor, op, context, rpc): |
73 |
"""Constructor for LogicalUnit.
|
74 |
|
75 |
This needs to be overridden in derived classes in order to check op
|
76 |
validity.
|
77 |
|
78 |
"""
|
79 |
self.proc = processor
|
80 |
self.op = op
|
81 |
self.cfg = context.cfg
|
82 |
self.context = context
|
83 |
self.rpc = rpc
|
84 |
# Dicts used to declare locking needs to mcpu
|
85 |
self.needed_locks = None |
86 |
self.acquired_locks = {}
|
87 |
self.share_locks = dict.fromkeys(locking.LEVELS, 0) |
88 |
self.add_locks = {}
|
89 |
self.remove_locks = {}
|
90 |
# Used to force good behavior when calling helper functions
|
91 |
self.recalculate_locks = {}
|
92 |
self.__ssh = None |
93 |
# logging
|
94 |
self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103 |
95 |
self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103 |
96 |
self.LogStep = processor.LogStep # pylint: disable-msg=C0103 |
97 |
# support for dry-run
|
98 |
self.dry_run_result = None |
99 |
# support for generic debug attribute
|
100 |
if (not hasattr(self.op, "debug_level") or |
101 |
not isinstance(self.op.debug_level, int)): |
102 |
self.op.debug_level = 0 |
103 |
|
104 |
# Tasklets
|
105 |
self.tasklets = None |
106 |
|
107 |
for attr_name in self._OP_REQP: |
108 |
attr_val = getattr(op, attr_name, None) |
109 |
if attr_val is None: |
110 |
raise errors.OpPrereqError("Required parameter '%s' missing" % |
111 |
attr_name, errors.ECODE_INVAL) |
112 |
|
113 |
self.CheckArguments()
|
114 |
|
115 |
def __GetSSH(self): |
116 |
"""Returns the SshRunner object
|
117 |
|
118 |
"""
|
119 |
if not self.__ssh: |
120 |
self.__ssh = ssh.SshRunner(self.cfg.GetClusterName()) |
121 |
return self.__ssh |
122 |
|
123 |
ssh = property(fget=__GetSSH)
|
124 |
|
125 |
def CheckArguments(self): |
126 |
"""Check syntactic validity for the opcode arguments.
|
127 |
|
128 |
This method is for doing a simple syntactic check and ensure
|
129 |
validity of opcode parameters, without any cluster-related
|
130 |
checks. While the same can be accomplished in ExpandNames and/or
|
131 |
CheckPrereq, doing these separate is better because:
|
132 |
|
133 |
- ExpandNames is left as as purely a lock-related function
|
134 |
- CheckPrereq is run after we have acquired locks (and possible
|
135 |
waited for them)
|
136 |
|
137 |
The function is allowed to change the self.op attribute so that
|
138 |
later methods can no longer worry about missing parameters.
|
139 |
|
140 |
"""
|
141 |
pass
|
142 |
|
143 |
def ExpandNames(self): |
144 |
"""Expand names for this LU.
|
145 |
|
146 |
This method is called before starting to execute the opcode, and it should
|
147 |
update all the parameters of the opcode to their canonical form (e.g. a
|
148 |
short node name must be fully expanded after this method has successfully
|
149 |
completed). This way locking, hooks, logging, ecc. can work correctly.
|
150 |
|
151 |
LUs which implement this method must also populate the self.needed_locks
|
152 |
member, as a dict with lock levels as keys, and a list of needed lock names
|
153 |
as values. Rules:
|
154 |
|
155 |
- use an empty dict if you don't need any lock
|
156 |
- if you don't need any lock at a particular level omit that level
|
157 |
- don't put anything for the BGL level
|
158 |
- if you want all locks at a level use locking.ALL_SET as a value
|
159 |
|
160 |
If you need to share locks (rather than acquire them exclusively) at one
|
161 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
162 |
that level. By default locks are not shared.
|
163 |
|
164 |
This function can also define a list of tasklets, which then will be
|
165 |
executed in order instead of the usual LU-level CheckPrereq and Exec
|
166 |
functions, if those are not defined by the LU.
|
167 |
|
168 |
Examples::
|
169 |
|
170 |
# Acquire all nodes and one instance
|
171 |
self.needed_locks = {
|
172 |
locking.LEVEL_NODE: locking.ALL_SET,
|
173 |
locking.LEVEL_INSTANCE: ['instance1.example.tld'],
|
174 |
}
|
175 |
# Acquire just two nodes
|
176 |
self.needed_locks = {
|
177 |
locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
|
178 |
}
|
179 |
# Acquire no locks
|
180 |
self.needed_locks = {} # No, you can't leave it to the default value None
|
181 |
|
182 |
"""
|
183 |
# The implementation of this method is mandatory only if the new LU is
|
184 |
# concurrent, so that old LUs don't need to be changed all at the same
|
185 |
# time.
|
186 |
if self.REQ_BGL: |
187 |
self.needed_locks = {} # Exclusive LUs don't need locks. |
188 |
else:
|
189 |
raise NotImplementedError |
190 |
|
191 |
def DeclareLocks(self, level): |
192 |
"""Declare LU locking needs for a level
|
193 |
|
194 |
While most LUs can just declare their locking needs at ExpandNames time,
|
195 |
sometimes there's the need to calculate some locks after having acquired
|
196 |
the ones before. This function is called just before acquiring locks at a
|
197 |
particular level, but after acquiring the ones at lower levels, and permits
|
198 |
such calculations. It can be used to modify self.needed_locks, and by
|
199 |
default it does nothing.
|
200 |
|
201 |
This function is only called if you have something already set in
|
202 |
self.needed_locks for the level.
|
203 |
|
204 |
@param level: Locking level which is going to be locked
|
205 |
@type level: member of ganeti.locking.LEVELS
|
206 |
|
207 |
"""
|
208 |
|
209 |
def CheckPrereq(self): |
210 |
"""Check prerequisites for this LU.
|
211 |
|
212 |
This method should check that the prerequisites for the execution
|
213 |
of this LU are fulfilled. It can do internode communication, but
|
214 |
it should be idempotent - no cluster or system changes are
|
215 |
allowed.
|
216 |
|
217 |
The method should raise errors.OpPrereqError in case something is
|
218 |
not fulfilled. Its return value is ignored.
|
219 |
|
220 |
This method should also update all the parameters of the opcode to
|
221 |
their canonical form if it hasn't been done by ExpandNames before.
|
222 |
|
223 |
"""
|
224 |
if self.tasklets is not None: |
225 |
for (idx, tl) in enumerate(self.tasklets): |
226 |
logging.debug("Checking prerequisites for tasklet %s/%s",
|
227 |
idx + 1, len(self.tasklets)) |
228 |
tl.CheckPrereq() |
229 |
else:
|
230 |
raise NotImplementedError |
231 |
|
232 |
def Exec(self, feedback_fn): |
233 |
"""Execute the LU.
|
234 |
|
235 |
This method should implement the actual work. It should raise
|
236 |
errors.OpExecError for failures that are somewhat dealt with in
|
237 |
code, or expected.
|
238 |
|
239 |
"""
|
240 |
if self.tasklets is not None: |
241 |
for (idx, tl) in enumerate(self.tasklets): |
242 |
logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets)) |
243 |
tl.Exec(feedback_fn) |
244 |
else:
|
245 |
raise NotImplementedError |
246 |
|
247 |
def BuildHooksEnv(self): |
248 |
"""Build hooks environment for this LU.
|
249 |
|
250 |
This method should return a three-node tuple consisting of: a dict
|
251 |
containing the environment that will be used for running the
|
252 |
specific hook for this LU, a list of node names on which the hook
|
253 |
should run before the execution, and a list of node names on which
|
254 |
the hook should run after the execution.
|
255 |
|
256 |
The keys of the dict must not have 'GANETI_' prefixed as this will
|
257 |
be handled in the hooks runner. Also note additional keys will be
|
258 |
added by the hooks runner. If the LU doesn't define any
|
259 |
environment, an empty dict (and not None) should be returned.
|
260 |
|
261 |
No nodes should be returned as an empty list (and not None).
|
262 |
|
263 |
Note that if the HPATH for a LU class is None, this function will
|
264 |
not be called.
|
265 |
|
266 |
"""
|
267 |
raise NotImplementedError |
268 |
|
269 |
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result): |
270 |
"""Notify the LU about the results of its hooks.
|
271 |
|
272 |
This method is called every time a hooks phase is executed, and notifies
|
273 |
the Logical Unit about the hooks' result. The LU can then use it to alter
|
274 |
its result based on the hooks. By default the method does nothing and the
|
275 |
previous result is passed back unchanged but any LU can define it if it
|
276 |
wants to use the local cluster hook-scripts somehow.
|
277 |
|
278 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
279 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
280 |
@param hook_results: the results of the multi-node hooks rpc call
|
281 |
@param feedback_fn: function used send feedback back to the caller
|
282 |
@param lu_result: the previous Exec result this LU had, or None
|
283 |
in the PRE phase
|
284 |
@return: the new Exec result, based on the previous result
|
285 |
and hook results
|
286 |
|
287 |
"""
|
288 |
# API must be kept, thus we ignore the unused argument and could
|
289 |
# be a function warnings
|
290 |
# pylint: disable-msg=W0613,R0201
|
291 |
return lu_result
|
292 |
|
293 |
def _ExpandAndLockInstance(self): |
294 |
"""Helper function to expand and lock an instance.
|
295 |
|
296 |
Many LUs that work on an instance take its name in self.op.instance_name
|
297 |
and need to expand it and then declare the expanded name for locking. This
|
298 |
function does it, and then updates self.op.instance_name to the expanded
|
299 |
name. It also initializes needed_locks as a dict, if this hasn't been done
|
300 |
before.
|
301 |
|
302 |
"""
|
303 |
if self.needed_locks is None: |
304 |
self.needed_locks = {}
|
305 |
else:
|
306 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
307 |
"_ExpandAndLockInstance called with instance-level locks set"
|
308 |
self.op.instance_name = _ExpandInstanceName(self.cfg, |
309 |
self.op.instance_name)
|
310 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name |
311 |
|
312 |
def _LockInstancesNodes(self, primary_only=False): |
313 |
"""Helper function to declare instances' nodes for locking.
|
314 |
|
315 |
This function should be called after locking one or more instances to lock
|
316 |
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
317 |
with all primary or secondary nodes for instances already locked and
|
318 |
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
319 |
|
320 |
It should be called from DeclareLocks, and for safety only works if
|
321 |
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
322 |
|
323 |
In the future it may grow parameters to just lock some instance's nodes, or
|
324 |
to just lock primaries or secondary nodes, if needed.
|
325 |
|
326 |
If should be called in DeclareLocks in a way similar to::
|
327 |
|
328 |
if level == locking.LEVEL_NODE:
|
329 |
self._LockInstancesNodes()
|
330 |
|
331 |
@type primary_only: boolean
|
332 |
@param primary_only: only lock primary nodes of locked instances
|
333 |
|
334 |
"""
|
335 |
assert locking.LEVEL_NODE in self.recalculate_locks, \ |
336 |
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
337 |
|
338 |
# TODO: check if we're really been called with the instance locks held
|
339 |
|
340 |
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
341 |
# future we might want to have different behaviors depending on the value
|
342 |
# of self.recalculate_locks[locking.LEVEL_NODE]
|
343 |
wanted_nodes = [] |
344 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]: |
345 |
instance = self.context.cfg.GetInstanceInfo(instance_name)
|
346 |
wanted_nodes.append(instance.primary_node) |
347 |
if not primary_only: |
348 |
wanted_nodes.extend(instance.secondary_nodes) |
349 |
|
350 |
if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: |
351 |
self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
|
352 |
elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: |
353 |
self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
|
354 |
|
355 |
del self.recalculate_locks[locking.LEVEL_NODE] |
356 |
|
357 |
|
358 |
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223 |
359 |
"""Simple LU which runs no hooks.
|
360 |
|
361 |
This LU is intended as a parent for other LogicalUnits which will
|
362 |
run no hooks, in order to reduce duplicate code.
|
363 |
|
364 |
"""
|
365 |
HPATH = None
|
366 |
HTYPE = None
|
367 |
|
368 |
def BuildHooksEnv(self): |
369 |
"""Empty BuildHooksEnv for NoHooksLu.
|
370 |
|
371 |
This just raises an error.
|
372 |
|
373 |
"""
|
374 |
assert False, "BuildHooksEnv called for NoHooksLUs" |
375 |
|
376 |
|
377 |
class Tasklet: |
378 |
"""Tasklet base class.
|
379 |
|
380 |
Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
|
381 |
they can mix legacy code with tasklets. Locking needs to be done in the LU,
|
382 |
tasklets know nothing about locks.
|
383 |
|
384 |
Subclasses must follow these rules:
|
385 |
- Implement CheckPrereq
|
386 |
- Implement Exec
|
387 |
|
388 |
"""
|
389 |
def __init__(self, lu): |
390 |
self.lu = lu
|
391 |
|
392 |
# Shortcuts
|
393 |
self.cfg = lu.cfg
|
394 |
self.rpc = lu.rpc
|
395 |
|
396 |
def CheckPrereq(self): |
397 |
"""Check prerequisites for this tasklets.
|
398 |
|
399 |
This method should check whether the prerequisites for the execution of
|
400 |
this tasklet are fulfilled. It can do internode communication, but it
|
401 |
should be idempotent - no cluster or system changes are allowed.
|
402 |
|
403 |
The method should raise errors.OpPrereqError in case something is not
|
404 |
fulfilled. Its return value is ignored.
|
405 |
|
406 |
This method should also update all parameters to their canonical form if it
|
407 |
hasn't been done before.
|
408 |
|
409 |
"""
|
410 |
raise NotImplementedError |
411 |
|
412 |
def Exec(self, feedback_fn): |
413 |
"""Execute the tasklet.
|
414 |
|
415 |
This method should implement the actual work. It should raise
|
416 |
errors.OpExecError for failures that are somewhat dealt with in code, or
|
417 |
expected.
|
418 |
|
419 |
"""
|
420 |
raise NotImplementedError |
421 |
|
422 |
|
423 |
def _GetWantedNodes(lu, nodes): |
424 |
"""Returns list of checked and expanded node names.
|
425 |
|
426 |
@type lu: L{LogicalUnit}
|
427 |
@param lu: the logical unit on whose behalf we execute
|
428 |
@type nodes: list
|
429 |
@param nodes: list of node names or None for all nodes
|
430 |
@rtype: list
|
431 |
@return: the list of nodes, sorted
|
432 |
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
433 |
|
434 |
"""
|
435 |
if not isinstance(nodes, list): |
436 |
raise errors.OpPrereqError("Invalid argument type 'nodes'", |
437 |
errors.ECODE_INVAL) |
438 |
|
439 |
if not nodes: |
440 |
raise errors.ProgrammerError("_GetWantedNodes should only be called with a" |
441 |
" non-empty list of nodes whose name is to be expanded.")
|
442 |
|
443 |
wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes] |
444 |
return utils.NiceSort(wanted)
|
445 |
|
446 |
|
447 |
def _GetWantedInstances(lu, instances): |
448 |
"""Returns list of checked and expanded instance names.
|
449 |
|
450 |
@type lu: L{LogicalUnit}
|
451 |
@param lu: the logical unit on whose behalf we execute
|
452 |
@type instances: list
|
453 |
@param instances: list of instance names or None for all instances
|
454 |
@rtype: list
|
455 |
@return: the list of instances, sorted
|
456 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
457 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
458 |
|
459 |
"""
|
460 |
if not isinstance(instances, list): |
461 |
raise errors.OpPrereqError("Invalid argument type 'instances'", |
462 |
errors.ECODE_INVAL) |
463 |
|
464 |
if instances:
|
465 |
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances] |
466 |
else:
|
467 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
468 |
return wanted
|
469 |
|
470 |
|
471 |
def _CheckOutputFields(static, dynamic, selected): |
472 |
"""Checks whether all selected fields are valid.
|
473 |
|
474 |
@type static: L{utils.FieldSet}
|
475 |
@param static: static fields set
|
476 |
@type dynamic: L{utils.FieldSet}
|
477 |
@param dynamic: dynamic fields set
|
478 |
|
479 |
"""
|
480 |
f = utils.FieldSet() |
481 |
f.Extend(static) |
482 |
f.Extend(dynamic) |
483 |
|
484 |
delta = f.NonMatching(selected) |
485 |
if delta:
|
486 |
raise errors.OpPrereqError("Unknown output fields selected: %s" |
487 |
% ",".join(delta), errors.ECODE_INVAL)
|
488 |
|
489 |
|
490 |
def _CheckBooleanOpField(op, name): |
491 |
"""Validates boolean opcode parameters.
|
492 |
|
493 |
This will ensure that an opcode parameter is either a boolean value,
|
494 |
or None (but that it always exists).
|
495 |
|
496 |
"""
|
497 |
val = getattr(op, name, None) |
498 |
if not (val is None or isinstance(val, bool)): |
499 |
raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" % |
500 |
(name, str(val)), errors.ECODE_INVAL)
|
501 |
setattr(op, name, val)
|
502 |
|
503 |
|
504 |
def _CheckGlobalHvParams(params): |
505 |
"""Validates that given hypervisor params are not global ones.
|
506 |
|
507 |
This will ensure that instances don't get customised versions of
|
508 |
global params.
|
509 |
|
510 |
"""
|
511 |
used_globals = constants.HVC_GLOBALS.intersection(params) |
512 |
if used_globals:
|
513 |
msg = ("The following hypervisor parameters are global and cannot"
|
514 |
" be customized at instance level, please modify them at"
|
515 |
" cluster level: %s" % utils.CommaJoin(used_globals))
|
516 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
517 |
|
518 |
|
519 |
def _CheckNodeOnline(lu, node): |
520 |
"""Ensure that a given node is online.
|
521 |
|
522 |
@param lu: the LU on behalf of which we make the check
|
523 |
@param node: the node to check
|
524 |
@raise errors.OpPrereqError: if the node is offline
|
525 |
|
526 |
"""
|
527 |
if lu.cfg.GetNodeInfo(node).offline:
|
528 |
raise errors.OpPrereqError("Can't use offline node %s" % node, |
529 |
errors.ECODE_INVAL) |
530 |
|
531 |
|
532 |
def _CheckNodeNotDrained(lu, node): |
533 |
"""Ensure that a given node is not drained.
|
534 |
|
535 |
@param lu: the LU on behalf of which we make the check
|
536 |
@param node: the node to check
|
537 |
@raise errors.OpPrereqError: if the node is drained
|
538 |
|
539 |
"""
|
540 |
if lu.cfg.GetNodeInfo(node).drained:
|
541 |
raise errors.OpPrereqError("Can't use drained node %s" % node, |
542 |
errors.ECODE_INVAL) |
543 |
|
544 |
|
545 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
546 |
"""Ensure that a node supports a given OS.
|
547 |
|
548 |
@param lu: the LU on behalf of which we make the check
|
549 |
@param node: the node to check
|
550 |
@param os_name: the OS to query about
|
551 |
@param force_variant: whether to ignore variant errors
|
552 |
@raise errors.OpPrereqError: if the node is not supporting the OS
|
553 |
|
554 |
"""
|
555 |
result = lu.rpc.call_os_get(node, os_name) |
556 |
result.Raise("OS '%s' not in supported OS list for node %s" %
|
557 |
(os_name, node), |
558 |
prereq=True, ecode=errors.ECODE_INVAL)
|
559 |
if not force_variant: |
560 |
_CheckOSVariant(result.payload, os_name) |
561 |
|
562 |
|
563 |
def _CheckDiskTemplate(template): |
564 |
"""Ensure a given disk template is valid.
|
565 |
|
566 |
"""
|
567 |
if template not in constants.DISK_TEMPLATES: |
568 |
msg = ("Invalid disk template name '%s', valid templates are: %s" %
|
569 |
(template, utils.CommaJoin(constants.DISK_TEMPLATES))) |
570 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
571 |
|
572 |
|
573 |
def _CheckInstanceDown(lu, instance, reason): |
574 |
"""Ensure that an instance is not running."""
|
575 |
if instance.admin_up:
|
576 |
raise errors.OpPrereqError("Instance %s is marked to be up, %s" % |
577 |
(instance.name, reason), errors.ECODE_STATE) |
578 |
|
579 |
pnode = instance.primary_node |
580 |
ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode] |
581 |
ins_l.Raise("Can't contact node %s for instance information" % pnode,
|
582 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
583 |
|
584 |
if instance.name in ins_l.payload: |
585 |
raise errors.OpPrereqError("Instance %s is running, %s" % |
586 |
(instance.name, reason), errors.ECODE_STATE) |
587 |
|
588 |
|
589 |
def _ExpandItemName(fn, name, kind): |
590 |
"""Expand an item name.
|
591 |
|
592 |
@param fn: the function to use for expansion
|
593 |
@param name: requested item name
|
594 |
@param kind: text description ('Node' or 'Instance')
|
595 |
@return: the resolved (full) name
|
596 |
@raise errors.OpPrereqError: if the item is not found
|
597 |
|
598 |
"""
|
599 |
full_name = fn(name) |
600 |
if full_name is None: |
601 |
raise errors.OpPrereqError("%s '%s' not known" % (kind, name), |
602 |
errors.ECODE_NOENT) |
603 |
return full_name
|
604 |
|
605 |
|
606 |
def _ExpandNodeName(cfg, name): |
607 |
"""Wrapper over L{_ExpandItemName} for nodes."""
|
608 |
return _ExpandItemName(cfg.ExpandNodeName, name, "Node") |
609 |
|
610 |
|
611 |
def _ExpandInstanceName(cfg, name): |
612 |
"""Wrapper over L{_ExpandItemName} for instance."""
|
613 |
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") |
614 |
|
615 |
|
616 |
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, |
617 |
memory, vcpus, nics, disk_template, disks, |
618 |
bep, hvp, hypervisor_name): |
619 |
"""Builds instance related env variables for hooks
|
620 |
|
621 |
This builds the hook environment from individual variables.
|
622 |
|
623 |
@type name: string
|
624 |
@param name: the name of the instance
|
625 |
@type primary_node: string
|
626 |
@param primary_node: the name of the instance's primary node
|
627 |
@type secondary_nodes: list
|
628 |
@param secondary_nodes: list of secondary nodes as strings
|
629 |
@type os_type: string
|
630 |
@param os_type: the name of the instance's OS
|
631 |
@type status: boolean
|
632 |
@param status: the should_run status of the instance
|
633 |
@type memory: string
|
634 |
@param memory: the memory size of the instance
|
635 |
@type vcpus: string
|
636 |
@param vcpus: the count of VCPUs the instance has
|
637 |
@type nics: list
|
638 |
@param nics: list of tuples (ip, mac, mode, link) representing
|
639 |
the NICs the instance has
|
640 |
@type disk_template: string
|
641 |
@param disk_template: the disk template of the instance
|
642 |
@type disks: list
|
643 |
@param disks: the list of (size, mode) pairs
|
644 |
@type bep: dict
|
645 |
@param bep: the backend parameters for the instance
|
646 |
@type hvp: dict
|
647 |
@param hvp: the hypervisor parameters for the instance
|
648 |
@type hypervisor_name: string
|
649 |
@param hypervisor_name: the hypervisor for the instance
|
650 |
@rtype: dict
|
651 |
@return: the hook environment for this instance
|
652 |
|
653 |
"""
|
654 |
if status:
|
655 |
str_status = "up"
|
656 |
else:
|
657 |
str_status = "down"
|
658 |
env = { |
659 |
"OP_TARGET": name,
|
660 |
"INSTANCE_NAME": name,
|
661 |
"INSTANCE_PRIMARY": primary_node,
|
662 |
"INSTANCE_SECONDARIES": " ".join(secondary_nodes), |
663 |
"INSTANCE_OS_TYPE": os_type,
|
664 |
"INSTANCE_STATUS": str_status,
|
665 |
"INSTANCE_MEMORY": memory,
|
666 |
"INSTANCE_VCPUS": vcpus,
|
667 |
"INSTANCE_DISK_TEMPLATE": disk_template,
|
668 |
"INSTANCE_HYPERVISOR": hypervisor_name,
|
669 |
} |
670 |
|
671 |
if nics:
|
672 |
nic_count = len(nics)
|
673 |
for idx, (ip, mac, mode, link) in enumerate(nics): |
674 |
if ip is None: |
675 |
ip = ""
|
676 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
677 |
env["INSTANCE_NIC%d_MAC" % idx] = mac
|
678 |
env["INSTANCE_NIC%d_MODE" % idx] = mode
|
679 |
env["INSTANCE_NIC%d_LINK" % idx] = link
|
680 |
if mode == constants.NIC_MODE_BRIDGED:
|
681 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = link
|
682 |
else:
|
683 |
nic_count = 0
|
684 |
|
685 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
686 |
|
687 |
if disks:
|
688 |
disk_count = len(disks)
|
689 |
for idx, (size, mode) in enumerate(disks): |
690 |
env["INSTANCE_DISK%d_SIZE" % idx] = size
|
691 |
env["INSTANCE_DISK%d_MODE" % idx] = mode
|
692 |
else:
|
693 |
disk_count = 0
|
694 |
|
695 |
env["INSTANCE_DISK_COUNT"] = disk_count
|
696 |
|
697 |
for source, kind in [(bep, "BE"), (hvp, "HV")]: |
698 |
for key, value in source.items(): |
699 |
env["INSTANCE_%s_%s" % (kind, key)] = value
|
700 |
|
701 |
return env
|
702 |
|
703 |
|
704 |
def _NICListToTuple(lu, nics): |
705 |
"""Build a list of nic information tuples.
|
706 |
|
707 |
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
|
708 |
value in LUQueryInstanceData.
|
709 |
|
710 |
@type lu: L{LogicalUnit}
|
711 |
@param lu: the logical unit on whose behalf we execute
|
712 |
@type nics: list of L{objects.NIC}
|
713 |
@param nics: list of nics to convert to hooks tuples
|
714 |
|
715 |
"""
|
716 |
hooks_nics = [] |
717 |
c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT] |
718 |
for nic in nics: |
719 |
ip = nic.ip |
720 |
mac = nic.mac |
721 |
filled_params = objects.FillDict(c_nicparams, nic.nicparams) |
722 |
mode = filled_params[constants.NIC_MODE] |
723 |
link = filled_params[constants.NIC_LINK] |
724 |
hooks_nics.append((ip, mac, mode, link)) |
725 |
return hooks_nics
|
726 |
|
727 |
|
728 |
def _BuildInstanceHookEnvByObject(lu, instance, override=None): |
729 |
"""Builds instance related env variables for hooks from an object.
|
730 |
|
731 |
@type lu: L{LogicalUnit}
|
732 |
@param lu: the logical unit on whose behalf we execute
|
733 |
@type instance: L{objects.Instance}
|
734 |
@param instance: the instance for which we should build the
|
735 |
environment
|
736 |
@type override: dict
|
737 |
@param override: dictionary with key/values that will override
|
738 |
our values
|
739 |
@rtype: dict
|
740 |
@return: the hook environment dictionary
|
741 |
|
742 |
"""
|
743 |
cluster = lu.cfg.GetClusterInfo() |
744 |
bep = cluster.FillBE(instance) |
745 |
hvp = cluster.FillHV(instance) |
746 |
args = { |
747 |
'name': instance.name,
|
748 |
'primary_node': instance.primary_node,
|
749 |
'secondary_nodes': instance.secondary_nodes,
|
750 |
'os_type': instance.os,
|
751 |
'status': instance.admin_up,
|
752 |
'memory': bep[constants.BE_MEMORY],
|
753 |
'vcpus': bep[constants.BE_VCPUS],
|
754 |
'nics': _NICListToTuple(lu, instance.nics),
|
755 |
'disk_template': instance.disk_template,
|
756 |
'disks': [(disk.size, disk.mode) for disk in instance.disks], |
757 |
'bep': bep,
|
758 |
'hvp': hvp,
|
759 |
'hypervisor_name': instance.hypervisor,
|
760 |
} |
761 |
if override:
|
762 |
args.update(override) |
763 |
return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142 |
764 |
|
765 |
|
766 |
def _AdjustCandidatePool(lu, exceptions): |
767 |
"""Adjust the candidate pool after node operations.
|
768 |
|
769 |
"""
|
770 |
mod_list = lu.cfg.MaintainCandidatePool(exceptions) |
771 |
if mod_list:
|
772 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
773 |
utils.CommaJoin(node.name for node in mod_list)) |
774 |
for name in mod_list: |
775 |
lu.context.ReaddNode(name) |
776 |
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
777 |
if mc_now > mc_max:
|
778 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
779 |
(mc_now, mc_max)) |
780 |
|
781 |
|
782 |
def _DecideSelfPromotion(lu, exceptions=None): |
783 |
"""Decide whether I should promote myself as a master candidate.
|
784 |
|
785 |
"""
|
786 |
cp_size = lu.cfg.GetClusterInfo().candidate_pool_size |
787 |
mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
788 |
# the new node will increase mc_max with one, so:
|
789 |
mc_should = min(mc_should + 1, cp_size) |
790 |
return mc_now < mc_should
|
791 |
|
792 |
|
793 |
def _CheckNicsBridgesExist(lu, target_nics, target_node, |
794 |
profile=constants.PP_DEFAULT): |
795 |
"""Check that the brigdes needed by a list of nics exist.
|
796 |
|
797 |
"""
|
798 |
c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile] |
799 |
paramslist = [objects.FillDict(c_nicparams, nic.nicparams) |
800 |
for nic in target_nics] |
801 |
brlist = [params[constants.NIC_LINK] for params in paramslist |
802 |
if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
|
803 |
if brlist:
|
804 |
result = lu.rpc.call_bridges_exist(target_node, brlist) |
805 |
result.Raise("Error checking bridges on destination node '%s'" %
|
806 |
target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
|
807 |
|
808 |
|
809 |
def _CheckInstanceBridgesExist(lu, instance, node=None): |
810 |
"""Check that the brigdes needed by an instance exist.
|
811 |
|
812 |
"""
|
813 |
if node is None: |
814 |
node = instance.primary_node |
815 |
_CheckNicsBridgesExist(lu, instance.nics, node) |
816 |
|
817 |
|
818 |
def _CheckOSVariant(os_obj, name): |
819 |
"""Check whether an OS name conforms to the os variants specification.
|
820 |
|
821 |
@type os_obj: L{objects.OS}
|
822 |
@param os_obj: OS object to check
|
823 |
@type name: string
|
824 |
@param name: OS name passed by the user, to check for validity
|
825 |
|
826 |
"""
|
827 |
if not os_obj.supported_variants: |
828 |
return
|
829 |
try:
|
830 |
variant = name.split("+", 1)[1] |
831 |
except IndexError: |
832 |
raise errors.OpPrereqError("OS name must include a variant", |
833 |
errors.ECODE_INVAL) |
834 |
|
835 |
if variant not in os_obj.supported_variants: |
836 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
837 |
|
838 |
|
839 |
def _GetNodeInstancesInner(cfg, fn): |
840 |
return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)] |
841 |
|
842 |
|
843 |
def _GetNodeInstances(cfg, node_name): |
844 |
"""Returns a list of all primary and secondary instances on a node.
|
845 |
|
846 |
"""
|
847 |
|
848 |
return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes) |
849 |
|
850 |
|
851 |
def _GetNodePrimaryInstances(cfg, node_name): |
852 |
"""Returns primary instances on a node.
|
853 |
|
854 |
"""
|
855 |
return _GetNodeInstancesInner(cfg,
|
856 |
lambda inst: node_name == inst.primary_node)
|
857 |
|
858 |
|
859 |
def _GetNodeSecondaryInstances(cfg, node_name): |
860 |
"""Returns secondary instances on a node.
|
861 |
|
862 |
"""
|
863 |
return _GetNodeInstancesInner(cfg,
|
864 |
lambda inst: node_name in inst.secondary_nodes) |
865 |
|
866 |
|
867 |
def _GetStorageTypeArgs(cfg, storage_type): |
868 |
"""Returns the arguments for a storage type.
|
869 |
|
870 |
"""
|
871 |
# Special case for file storage
|
872 |
if storage_type == constants.ST_FILE:
|
873 |
# storage.FileStorage wants a list of storage directories
|
874 |
return [[cfg.GetFileStorageDir()]]
|
875 |
|
876 |
return []
|
877 |
|
878 |
|
879 |
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq): |
880 |
faulty = [] |
881 |
|
882 |
for dev in instance.disks: |
883 |
cfg.SetDiskID(dev, node_name) |
884 |
|
885 |
result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks) |
886 |
result.Raise("Failed to get disk status from node %s" % node_name,
|
887 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
888 |
|
889 |
for idx, bdev_status in enumerate(result.payload): |
890 |
if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY: |
891 |
faulty.append(idx) |
892 |
|
893 |
return faulty
|
894 |
|
895 |
|
896 |
def _FormatTimestamp(secs): |
897 |
"""Formats a Unix timestamp with the local timezone.
|
898 |
|
899 |
"""
|
900 |
return time.strftime("%F %T %Z", time.gmtime(secs)) |
901 |
|
902 |
|
903 |
class LUPostInitCluster(LogicalUnit): |
904 |
"""Logical unit for running hooks after cluster initialization.
|
905 |
|
906 |
"""
|
907 |
HPATH = "cluster-init"
|
908 |
HTYPE = constants.HTYPE_CLUSTER |
909 |
_OP_REQP = [] |
910 |
|
911 |
def BuildHooksEnv(self): |
912 |
"""Build hooks env.
|
913 |
|
914 |
"""
|
915 |
env = {"OP_TARGET": self.cfg.GetClusterName()} |
916 |
mn = self.cfg.GetMasterNode()
|
917 |
return env, [], [mn]
|
918 |
|
919 |
def CheckPrereq(self): |
920 |
"""No prerequisites to check.
|
921 |
|
922 |
"""
|
923 |
return True |
924 |
|
925 |
def Exec(self, feedback_fn): |
926 |
"""Nothing to do.
|
927 |
|
928 |
"""
|
929 |
return True |
930 |
|
931 |
|
932 |
class LUDestroyCluster(LogicalUnit): |
933 |
"""Logical unit for destroying the cluster.
|
934 |
|
935 |
"""
|
936 |
HPATH = "cluster-destroy"
|
937 |
HTYPE = constants.HTYPE_CLUSTER |
938 |
_OP_REQP = [] |
939 |
|
940 |
def BuildHooksEnv(self): |
941 |
"""Build hooks env.
|
942 |
|
943 |
"""
|
944 |
env = {"OP_TARGET": self.cfg.GetClusterName()} |
945 |
return env, [], []
|
946 |
|
947 |
def CheckPrereq(self): |
948 |
"""Check prerequisites.
|
949 |
|
950 |
This checks whether the cluster is empty.
|
951 |
|
952 |
Any errors are signaled by raising errors.OpPrereqError.
|
953 |
|
954 |
"""
|
955 |
master = self.cfg.GetMasterNode()
|
956 |
|
957 |
nodelist = self.cfg.GetNodeList()
|
958 |
if len(nodelist) != 1 or nodelist[0] != master: |
959 |
raise errors.OpPrereqError("There are still %d node(s) in" |
960 |
" this cluster." % (len(nodelist) - 1), |
961 |
errors.ECODE_INVAL) |
962 |
instancelist = self.cfg.GetInstanceList()
|
963 |
if instancelist:
|
964 |
raise errors.OpPrereqError("There are still %d instance(s) in" |
965 |
" this cluster." % len(instancelist), |
966 |
errors.ECODE_INVAL) |
967 |
|
968 |
def Exec(self, feedback_fn): |
969 |
"""Destroys the cluster.
|
970 |
|
971 |
"""
|
972 |
master = self.cfg.GetMasterNode()
|
973 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
|
974 |
|
975 |
# Run post hooks on master node before it's removed
|
976 |
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) |
977 |
try:
|
978 |
hm.RunPhase(constants.HOOKS_PHASE_POST, [master]) |
979 |
except:
|
980 |
# pylint: disable-msg=W0702
|
981 |
self.LogWarning("Errors occurred running hooks on %s" % master) |
982 |
|
983 |
result = self.rpc.call_node_stop_master(master, False) |
984 |
result.Raise("Could not disable the master role")
|
985 |
|
986 |
if modify_ssh_setup:
|
987 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
988 |
utils.CreateBackup(priv_key) |
989 |
utils.CreateBackup(pub_key) |
990 |
|
991 |
return master
|
992 |
|
993 |
|
994 |
def _VerifyCertificateInner(filename, expired, not_before, not_after, now, |
995 |
warn_days=constants.SSL_CERT_EXPIRATION_WARN, |
996 |
error_days=constants.SSL_CERT_EXPIRATION_ERROR): |
997 |
"""Verifies certificate details for LUVerifyCluster.
|
998 |
|
999 |
"""
|
1000 |
if expired:
|
1001 |
msg = "Certificate %s is expired" % filename
|
1002 |
|
1003 |
if not_before is not None and not_after is not None: |
1004 |
msg += (" (valid from %s to %s)" %
|
1005 |
(_FormatTimestamp(not_before), |
1006 |
_FormatTimestamp(not_after))) |
1007 |
elif not_before is not None: |
1008 |
msg += " (valid from %s)" % _FormatTimestamp(not_before)
|
1009 |
elif not_after is not None: |
1010 |
msg += " (valid until %s)" % _FormatTimestamp(not_after)
|
1011 |
|
1012 |
return (LUVerifyCluster.ETYPE_ERROR, msg)
|
1013 |
|
1014 |
elif not_before is not None and not_before > now: |
1015 |
return (LUVerifyCluster.ETYPE_WARNING,
|
1016 |
"Certificate %s not yet valid (valid from %s)" %
|
1017 |
(filename, _FormatTimestamp(not_before))) |
1018 |
|
1019 |
elif not_after is not None: |
1020 |
remaining_days = int((not_after - now) / (24 * 3600)) |
1021 |
|
1022 |
msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
|
1023 |
|
1024 |
if remaining_days <= error_days:
|
1025 |
return (LUVerifyCluster.ETYPE_ERROR, msg)
|
1026 |
|
1027 |
if remaining_days <= warn_days:
|
1028 |
return (LUVerifyCluster.ETYPE_WARNING, msg)
|
1029 |
|
1030 |
return (None, None) |
1031 |
|
1032 |
|
1033 |
def _VerifyCertificate(filename): |
1034 |
"""Verifies a certificate for LUVerifyCluster.
|
1035 |
|
1036 |
@type filename: string
|
1037 |
@param filename: Path to PEM file
|
1038 |
|
1039 |
"""
|
1040 |
try:
|
1041 |
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, |
1042 |
utils.ReadFile(filename)) |
1043 |
except Exception, err: # pylint: disable-msg=W0703 |
1044 |
return (LUVerifyCluster.ETYPE_ERROR,
|
1045 |
"Failed to load X509 certificate %s: %s" % (filename, err))
|
1046 |
|
1047 |
# Depending on the pyOpenSSL version, this can just return (None, None)
|
1048 |
(not_before, not_after) = utils.GetX509CertValidity(cert) |
1049 |
|
1050 |
return _VerifyCertificateInner(filename, cert.has_expired(),
|
1051 |
not_before, not_after, time.time()) |
1052 |
|
1053 |
|
1054 |
class LUVerifyCluster(LogicalUnit): |
1055 |
"""Verifies the cluster status.
|
1056 |
|
1057 |
"""
|
1058 |
HPATH = "cluster-verify"
|
1059 |
HTYPE = constants.HTYPE_CLUSTER |
1060 |
_OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"] |
1061 |
REQ_BGL = False
|
1062 |
|
1063 |
TCLUSTER = "cluster"
|
1064 |
TNODE = "node"
|
1065 |
TINSTANCE = "instance"
|
1066 |
|
1067 |
ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
|
1068 |
ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
|
1069 |
EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
|
1070 |
EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
|
1071 |
EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
|
1072 |
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
|
1073 |
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
|
1074 |
EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
|
1075 |
ENODEDRBD = (TNODE, "ENODEDRBD")
|
1076 |
ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
|
1077 |
ENODEHOOKS = (TNODE, "ENODEHOOKS")
|
1078 |
ENODEHV = (TNODE, "ENODEHV")
|
1079 |
ENODELVM = (TNODE, "ENODELVM")
|
1080 |
ENODEN1 = (TNODE, "ENODEN1")
|
1081 |
ENODENET = (TNODE, "ENODENET")
|
1082 |
ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
|
1083 |
ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
|
1084 |
ENODERPC = (TNODE, "ENODERPC")
|
1085 |
ENODESSH = (TNODE, "ENODESSH")
|
1086 |
ENODEVERSION = (TNODE, "ENODEVERSION")
|
1087 |
ENODESETUP = (TNODE, "ENODESETUP")
|
1088 |
ENODETIME = (TNODE, "ENODETIME")
|
1089 |
|
1090 |
ETYPE_FIELD = "code"
|
1091 |
ETYPE_ERROR = "ERROR"
|
1092 |
ETYPE_WARNING = "WARNING"
|
1093 |
|
1094 |
class NodeImage(object): |
1095 |
"""A class representing the logical and physical status of a node.
|
1096 |
|
1097 |
@ivar volumes: a structure as returned from
|
1098 |
L{ganeti.backend.GetVolumeList} (runtime)
|
1099 |
@ivar instances: a list of running instances (runtime)
|
1100 |
@ivar pinst: list of configured primary instances (config)
|
1101 |
@ivar sinst: list of configured secondary instances (config)
|
1102 |
@ivar sbp: diction of {secondary-node: list of instances} of all peers
|
1103 |
of this node (config)
|
1104 |
@ivar mfree: free memory, as reported by hypervisor (runtime)
|
1105 |
@ivar dfree: free disk, as reported by the node (runtime)
|
1106 |
@ivar offline: the offline status (config)
|
1107 |
@type rpc_fail: boolean
|
1108 |
@ivar rpc_fail: whether the RPC verify call was successfull (overall,
|
1109 |
not whether the individual keys were correct) (runtime)
|
1110 |
@type lvm_fail: boolean
|
1111 |
@ivar lvm_fail: whether the RPC call didn't return valid LVM data
|
1112 |
@type hyp_fail: boolean
|
1113 |
@ivar hyp_fail: whether the RPC call didn't return the instance list
|
1114 |
@type ghost: boolean
|
1115 |
@ivar ghost: whether this is a known node or not (config)
|
1116 |
|
1117 |
"""
|
1118 |
def __init__(self, offline=False): |
1119 |
self.volumes = {}
|
1120 |
self.instances = []
|
1121 |
self.pinst = []
|
1122 |
self.sinst = []
|
1123 |
self.sbp = {}
|
1124 |
self.mfree = 0 |
1125 |
self.dfree = 0 |
1126 |
self.offline = offline
|
1127 |
self.rpc_fail = False |
1128 |
self.lvm_fail = False |
1129 |
self.hyp_fail = False |
1130 |
self.ghost = False |
1131 |
|
1132 |
def ExpandNames(self): |
1133 |
self.needed_locks = {
|
1134 |
locking.LEVEL_NODE: locking.ALL_SET, |
1135 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1136 |
} |
1137 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
1138 |
|
1139 |
def _Error(self, ecode, item, msg, *args, **kwargs): |
1140 |
"""Format an error message.
|
1141 |
|
1142 |
Based on the opcode's error_codes parameter, either format a
|
1143 |
parseable error code, or a simpler error string.
|
1144 |
|
1145 |
This must be called only from Exec and functions called from Exec.
|
1146 |
|
1147 |
"""
|
1148 |
ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) |
1149 |
itype, etxt = ecode |
1150 |
# first complete the msg
|
1151 |
if args:
|
1152 |
msg = msg % args |
1153 |
# then format the whole message
|
1154 |
if self.op.error_codes: |
1155 |
msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
|
1156 |
else:
|
1157 |
if item:
|
1158 |
item = " " + item
|
1159 |
else:
|
1160 |
item = ""
|
1161 |
msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
|
1162 |
# and finally report it via the feedback_fn
|
1163 |
self._feedback_fn(" - %s" % msg) |
1164 |
|
1165 |
def _ErrorIf(self, cond, *args, **kwargs): |
1166 |
"""Log an error message if the passed condition is True.
|
1167 |
|
1168 |
"""
|
1169 |
cond = bool(cond) or self.op.debug_simulate_errors |
1170 |
if cond:
|
1171 |
self._Error(*args, **kwargs)
|
1172 |
# do not mark the operation as failed for WARN cases only
|
1173 |
if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR: |
1174 |
self.bad = self.bad or cond |
1175 |
|
1176 |
def _VerifyNode(self, ninfo, nresult): |
1177 |
"""Run multiple tests against a node.
|
1178 |
|
1179 |
Test list:
|
1180 |
|
1181 |
- compares ganeti version
|
1182 |
- checks vg existence and size > 20G
|
1183 |
- checks config file checksum
|
1184 |
- checks ssh to other nodes
|
1185 |
|
1186 |
@type ninfo: L{objects.Node}
|
1187 |
@param ninfo: the node to check
|
1188 |
@param nresult: the results from the node
|
1189 |
@rtype: boolean
|
1190 |
@return: whether overall this call was successful (and we can expect
|
1191 |
reasonable values in the respose)
|
1192 |
|
1193 |
"""
|
1194 |
node = ninfo.name |
1195 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1196 |
|
1197 |
# main result, nresult should be a non-empty dict
|
1198 |
test = not nresult or not isinstance(nresult, dict) |
1199 |
_ErrorIf(test, self.ENODERPC, node,
|
1200 |
"unable to verify node: no data returned")
|
1201 |
if test:
|
1202 |
return False |
1203 |
|
1204 |
# compares ganeti version
|
1205 |
local_version = constants.PROTOCOL_VERSION |
1206 |
remote_version = nresult.get("version", None) |
1207 |
test = not (remote_version and |
1208 |
isinstance(remote_version, (list, tuple)) and |
1209 |
len(remote_version) == 2) |
1210 |
_ErrorIf(test, self.ENODERPC, node,
|
1211 |
"connection to node returned invalid data")
|
1212 |
if test:
|
1213 |
return False |
1214 |
|
1215 |
test = local_version != remote_version[0]
|
1216 |
_ErrorIf(test, self.ENODEVERSION, node,
|
1217 |
"incompatible protocol versions: master %s,"
|
1218 |
" node %s", local_version, remote_version[0]) |
1219 |
if test:
|
1220 |
return False |
1221 |
|
1222 |
# node seems compatible, we can actually try to look into its results
|
1223 |
|
1224 |
# full package version
|
1225 |
self._ErrorIf(constants.RELEASE_VERSION != remote_version[1], |
1226 |
self.ENODEVERSION, node,
|
1227 |
"software version mismatch: master %s, node %s",
|
1228 |
constants.RELEASE_VERSION, remote_version[1],
|
1229 |
code=self.ETYPE_WARNING)
|
1230 |
|
1231 |
hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
|
1232 |
if isinstance(hyp_result, dict): |
1233 |
for hv_name, hv_result in hyp_result.iteritems(): |
1234 |
test = hv_result is not None |
1235 |
_ErrorIf(test, self.ENODEHV, node,
|
1236 |
"hypervisor %s verify failure: '%s'", hv_name, hv_result)
|
1237 |
|
1238 |
|
1239 |
test = nresult.get(constants.NV_NODESETUP, |
1240 |
["Missing NODESETUP results"])
|
1241 |
_ErrorIf(test, self.ENODESETUP, node, "node setup error: %s", |
1242 |
"; ".join(test))
|
1243 |
|
1244 |
return True |
1245 |
|
1246 |
def _VerifyNodeTime(self, ninfo, nresult, |
1247 |
nvinfo_starttime, nvinfo_endtime): |
1248 |
"""Check the node time.
|
1249 |
|
1250 |
@type ninfo: L{objects.Node}
|
1251 |
@param ninfo: the node to check
|
1252 |
@param nresult: the remote results for the node
|
1253 |
@param nvinfo_starttime: the start time of the RPC call
|
1254 |
@param nvinfo_endtime: the end time of the RPC call
|
1255 |
|
1256 |
"""
|
1257 |
node = ninfo.name |
1258 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1259 |
|
1260 |
ntime = nresult.get(constants.NV_TIME, None)
|
1261 |
try:
|
1262 |
ntime_merged = utils.MergeTime(ntime) |
1263 |
except (ValueError, TypeError): |
1264 |
_ErrorIf(True, self.ENODETIME, node, "Node returned invalid time") |
1265 |
return
|
1266 |
|
1267 |
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
|
1268 |
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged) |
1269 |
elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
|
1270 |
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime) |
1271 |
else:
|
1272 |
ntime_diff = None
|
1273 |
|
1274 |
_ErrorIf(ntime_diff is not None, self.ENODETIME, node, |
1275 |
"Node time diverges by at least %s from master node time",
|
1276 |
ntime_diff) |
1277 |
|
1278 |
def _VerifyNodeLVM(self, ninfo, nresult, vg_name): |
1279 |
"""Check the node time.
|
1280 |
|
1281 |
@type ninfo: L{objects.Node}
|
1282 |
@param ninfo: the node to check
|
1283 |
@param nresult: the remote results for the node
|
1284 |
@param vg_name: the configured VG name
|
1285 |
|
1286 |
"""
|
1287 |
if vg_name is None: |
1288 |
return
|
1289 |
|
1290 |
node = ninfo.name |
1291 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1292 |
|
1293 |
# checks vg existence and size > 20G
|
1294 |
vglist = nresult.get(constants.NV_VGLIST, None)
|
1295 |
test = not vglist
|
1296 |
_ErrorIf(test, self.ENODELVM, node, "unable to check volume groups") |
1297 |
if not test: |
1298 |
vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name, |
1299 |
constants.MIN_VG_SIZE) |
1300 |
_ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
|
1301 |
|
1302 |
# check pv names
|
1303 |
pvlist = nresult.get(constants.NV_PVLIST, None)
|
1304 |
test = pvlist is None |
1305 |
_ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node") |
1306 |
if not test: |
1307 |
# check that ':' is not present in PV names, since it's a
|
1308 |
# special character for lvcreate (denotes the range of PEs to
|
1309 |
# use on the PV)
|
1310 |
for _, pvname, owner_vg in pvlist: |
1311 |
test = ":" in pvname |
1312 |
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV" |
1313 |
" '%s' of VG '%s'", pvname, owner_vg)
|
1314 |
|
1315 |
def _VerifyNodeNetwork(self, ninfo, nresult): |
1316 |
"""Check the node time.
|
1317 |
|
1318 |
@type ninfo: L{objects.Node}
|
1319 |
@param ninfo: the node to check
|
1320 |
@param nresult: the remote results for the node
|
1321 |
|
1322 |
"""
|
1323 |
node = ninfo.name |
1324 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1325 |
|
1326 |
test = constants.NV_NODELIST not in nresult |
1327 |
_ErrorIf(test, self.ENODESSH, node,
|
1328 |
"node hasn't returned node ssh connectivity data")
|
1329 |
if not test: |
1330 |
if nresult[constants.NV_NODELIST]:
|
1331 |
for a_node, a_msg in nresult[constants.NV_NODELIST].items(): |
1332 |
_ErrorIf(True, self.ENODESSH, node, |
1333 |
"ssh communication with node '%s': %s", a_node, a_msg)
|
1334 |
|
1335 |
test = constants.NV_NODENETTEST not in nresult |
1336 |
_ErrorIf(test, self.ENODENET, node,
|
1337 |
"node hasn't returned node tcp connectivity data")
|
1338 |
if not test: |
1339 |
if nresult[constants.NV_NODENETTEST]:
|
1340 |
nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys()) |
1341 |
for anode in nlist: |
1342 |
_ErrorIf(True, self.ENODENET, node, |
1343 |
"tcp communication with node '%s': %s",
|
1344 |
anode, nresult[constants.NV_NODENETTEST][anode]) |
1345 |
|
1346 |
def _VerifyInstance(self, instance, instanceconfig, node_image): |
1347 |
"""Verify an instance.
|
1348 |
|
1349 |
This function checks to see if the required block devices are
|
1350 |
available on the instance's node.
|
1351 |
|
1352 |
"""
|
1353 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1354 |
node_current = instanceconfig.primary_node |
1355 |
|
1356 |
node_vol_should = {} |
1357 |
instanceconfig.MapLVsByNode(node_vol_should) |
1358 |
|
1359 |
for node in node_vol_should: |
1360 |
n_img = node_image[node] |
1361 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
1362 |
# ignore missing volumes on offline or broken nodes
|
1363 |
continue
|
1364 |
for volume in node_vol_should[node]: |
1365 |
test = volume not in n_img.volumes |
1366 |
_ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
|
1367 |
"volume %s missing on node %s", volume, node)
|
1368 |
|
1369 |
if instanceconfig.admin_up:
|
1370 |
pri_img = node_image[node_current] |
1371 |
test = instance not in pri_img.instances and not pri_img.offline |
1372 |
_ErrorIf(test, self.EINSTANCEDOWN, instance,
|
1373 |
"instance not running on its primary node %s",
|
1374 |
node_current) |
1375 |
|
1376 |
for node, n_img in node_image.items(): |
1377 |
if (not node == node_current): |
1378 |
test = instance in n_img.instances
|
1379 |
_ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
|
1380 |
"instance should not run on node %s", node)
|
1381 |
|
1382 |
def _VerifyOrphanVolumes(self, node_vol_should, node_image): |
1383 |
"""Verify if there are any unknown volumes in the cluster.
|
1384 |
|
1385 |
The .os, .swap and backup volumes are ignored. All other volumes are
|
1386 |
reported as unknown.
|
1387 |
|
1388 |
"""
|
1389 |
for node, n_img in node_image.items(): |
1390 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
1391 |
# skip non-healthy nodes
|
1392 |
continue
|
1393 |
for volume in n_img.volumes: |
1394 |
test = (node not in node_vol_should or |
1395 |
volume not in node_vol_should[node]) |
1396 |
self._ErrorIf(test, self.ENODEORPHANLV, node, |
1397 |
"volume %s is unknown", volume)
|
1398 |
|
1399 |
def _VerifyOrphanInstances(self, instancelist, node_image): |
1400 |
"""Verify the list of running instances.
|
1401 |
|
1402 |
This checks what instances are running but unknown to the cluster.
|
1403 |
|
1404 |
"""
|
1405 |
for node, n_img in node_image.items(): |
1406 |
for o_inst in n_img.instances: |
1407 |
test = o_inst not in instancelist |
1408 |
self._ErrorIf(test, self.ENODEORPHANINSTANCE, node, |
1409 |
"instance %s on node %s should not exist", o_inst, node)
|
1410 |
|
1411 |
def _VerifyNPlusOneMemory(self, node_image, instance_cfg): |
1412 |
"""Verify N+1 Memory Resilience.
|
1413 |
|
1414 |
Check that if one single node dies we can still start all the
|
1415 |
instances it was primary for.
|
1416 |
|
1417 |
"""
|
1418 |
for node, n_img in node_image.items(): |
1419 |
# This code checks that every node which is now listed as
|
1420 |
# secondary has enough memory to host all instances it is
|
1421 |
# supposed to should a single other node in the cluster fail.
|
1422 |
# FIXME: not ready for failover to an arbitrary node
|
1423 |
# FIXME: does not support file-backed instances
|
1424 |
# WARNING: we currently take into account down instances as well
|
1425 |
# as up ones, considering that even if they're down someone
|
1426 |
# might want to start them even in the event of a node failure.
|
1427 |
for prinode, instances in n_img.sbp.items(): |
1428 |
needed_mem = 0
|
1429 |
for instance in instances: |
1430 |
bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
|
1431 |
if bep[constants.BE_AUTO_BALANCE]:
|
1432 |
needed_mem += bep[constants.BE_MEMORY] |
1433 |
test = n_img.mfree < needed_mem |
1434 |
self._ErrorIf(test, self.ENODEN1, node, |
1435 |
"not enough memory on to accommodate"
|
1436 |
" failovers should peer node %s fail", prinode)
|
1437 |
|
1438 |
def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum, |
1439 |
master_files): |
1440 |
"""Verifies and computes the node required file checksums.
|
1441 |
|
1442 |
@type ninfo: L{objects.Node}
|
1443 |
@param ninfo: the node to check
|
1444 |
@param nresult: the remote results for the node
|
1445 |
@param file_list: required list of files
|
1446 |
@param local_cksum: dictionary of local files and their checksums
|
1447 |
@param master_files: list of files that only masters should have
|
1448 |
|
1449 |
"""
|
1450 |
node = ninfo.name |
1451 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1452 |
|
1453 |
remote_cksum = nresult.get(constants.NV_FILELIST, None)
|
1454 |
test = not isinstance(remote_cksum, dict) |
1455 |
_ErrorIf(test, self.ENODEFILECHECK, node,
|
1456 |
"node hasn't returned file checksum data")
|
1457 |
if test:
|
1458 |
return
|
1459 |
|
1460 |
for file_name in file_list: |
1461 |
node_is_mc = ninfo.master_candidate |
1462 |
must_have = (file_name not in master_files) or node_is_mc |
1463 |
# missing
|
1464 |
test1 = file_name not in remote_cksum |
1465 |
# invalid checksum
|
1466 |
test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name] |
1467 |
# existing and good
|
1468 |
test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name] |
1469 |
_ErrorIf(test1 and must_have, self.ENODEFILECHECK, node, |
1470 |
"file '%s' missing", file_name)
|
1471 |
_ErrorIf(test2 and must_have, self.ENODEFILECHECK, node, |
1472 |
"file '%s' has wrong checksum", file_name)
|
1473 |
# not candidate and this is not a must-have file
|
1474 |
_ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node, |
1475 |
"file '%s' should not exist on non master"
|
1476 |
" candidates (and the file is outdated)", file_name)
|
1477 |
# all good, except non-master/non-must have combination
|
1478 |
_ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node, |
1479 |
"file '%s' should not exist"
|
1480 |
" on non master candidates", file_name)
|
1481 |
|
1482 |
def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map): |
1483 |
"""Verifies and the node DRBD status.
|
1484 |
|
1485 |
@type ninfo: L{objects.Node}
|
1486 |
@param ninfo: the node to check
|
1487 |
@param nresult: the remote results for the node
|
1488 |
@param instanceinfo: the dict of instances
|
1489 |
@param drbd_map: the DRBD map as returned by
|
1490 |
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
|
1491 |
|
1492 |
"""
|
1493 |
node = ninfo.name |
1494 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1495 |
|
1496 |
# compute the DRBD minors
|
1497 |
node_drbd = {} |
1498 |
for minor, instance in drbd_map[node].items(): |
1499 |
test = instance not in instanceinfo |
1500 |
_ErrorIf(test, self.ECLUSTERCFG, None, |
1501 |
"ghost instance '%s' in temporary DRBD map", instance)
|
1502 |
# ghost instance should not be running, but otherwise we
|
1503 |
# don't give double warnings (both ghost instance and
|
1504 |
# unallocated minor in use)
|
1505 |
if test:
|
1506 |
node_drbd[minor] = (instance, False)
|
1507 |
else:
|
1508 |
instance = instanceinfo[instance] |
1509 |
node_drbd[minor] = (instance.name, instance.admin_up) |
1510 |
|
1511 |
# and now check them
|
1512 |
used_minors = nresult.get(constants.NV_DRBDLIST, []) |
1513 |
test = not isinstance(used_minors, (tuple, list)) |
1514 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1515 |
"cannot parse drbd status file: %s", str(used_minors)) |
1516 |
if test:
|
1517 |
# we cannot check drbd status
|
1518 |
return
|
1519 |
|
1520 |
for minor, (iname, must_exist) in node_drbd.items(): |
1521 |
test = minor not in used_minors and must_exist |
1522 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1523 |
"drbd minor %d of instance %s is not active", minor, iname)
|
1524 |
for minor in used_minors: |
1525 |
test = minor not in node_drbd |
1526 |
_ErrorIf(test, self.ENODEDRBD, node,
|
1527 |
"unallocated drbd minor %d is in use", minor)
|
1528 |
|
1529 |
def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name): |
1530 |
"""Verifies and updates the node volume data.
|
1531 |
|
1532 |
This function will update a L{NodeImage}'s internal structures
|
1533 |
with data from the remote call.
|
1534 |
|
1535 |
@type ninfo: L{objects.Node}
|
1536 |
@param ninfo: the node to check
|
1537 |
@param nresult: the remote results for the node
|
1538 |
@param nimg: the node image object
|
1539 |
@param vg_name: the configured VG name
|
1540 |
|
1541 |
"""
|
1542 |
node = ninfo.name |
1543 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1544 |
|
1545 |
nimg.lvm_fail = True
|
1546 |
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
|
1547 |
if vg_name is None: |
1548 |
pass
|
1549 |
elif isinstance(lvdata, basestring): |
1550 |
_ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s", |
1551 |
utils.SafeEncode(lvdata)) |
1552 |
elif not isinstance(lvdata, dict): |
1553 |
_ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)") |
1554 |
else:
|
1555 |
nimg.volumes = lvdata |
1556 |
nimg.lvm_fail = False
|
1557 |
|
1558 |
def _UpdateNodeInstances(self, ninfo, nresult, nimg): |
1559 |
"""Verifies and updates the node instance list.
|
1560 |
|
1561 |
If the listing was successful, then updates this node's instance
|
1562 |
list. Otherwise, it marks the RPC call as failed for the instance
|
1563 |
list key.
|
1564 |
|
1565 |
@type ninfo: L{objects.Node}
|
1566 |
@param ninfo: the node to check
|
1567 |
@param nresult: the remote results for the node
|
1568 |
@param nimg: the node image object
|
1569 |
|
1570 |
"""
|
1571 |
idata = nresult.get(constants.NV_INSTANCELIST, None)
|
1572 |
test = not isinstance(idata, list) |
1573 |
self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed" |
1574 |
" (instancelist): %s", utils.SafeEncode(str(idata))) |
1575 |
if test:
|
1576 |
nimg.hyp_fail = True
|
1577 |
else:
|
1578 |
nimg.instances = idata |
1579 |
|
1580 |
def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name): |
1581 |
"""Verifies and computes a node information map
|
1582 |
|
1583 |
@type ninfo: L{objects.Node}
|
1584 |
@param ninfo: the node to check
|
1585 |
@param nresult: the remote results for the node
|
1586 |
@param nimg: the node image object
|
1587 |
@param vg_name: the configured VG name
|
1588 |
|
1589 |
"""
|
1590 |
node = ninfo.name |
1591 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1592 |
|
1593 |
# try to read free memory (from the hypervisor)
|
1594 |
hv_info = nresult.get(constants.NV_HVINFO, None)
|
1595 |
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info |
1596 |
_ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)") |
1597 |
if not test: |
1598 |
try:
|
1599 |
nimg.mfree = int(hv_info["memory_free"]) |
1600 |
except (ValueError, TypeError): |
1601 |
_ErrorIf(True, self.ENODERPC, node, |
1602 |
"node returned invalid nodeinfo, check hypervisor")
|
1603 |
|
1604 |
# FIXME: devise a free space model for file based instances as well
|
1605 |
if vg_name is not None: |
1606 |
test = (constants.NV_VGLIST not in nresult or |
1607 |
vg_name not in nresult[constants.NV_VGLIST]) |
1608 |
_ErrorIf(test, self.ENODELVM, node,
|
1609 |
"node didn't return data for the volume group '%s'"
|
1610 |
" - it is either missing or broken", vg_name)
|
1611 |
if not test: |
1612 |
try:
|
1613 |
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
|
1614 |
except (ValueError, TypeError): |
1615 |
_ErrorIf(True, self.ENODERPC, node, |
1616 |
"node returned invalid LVM info, check LVM status")
|
1617 |
|
1618 |
def CheckPrereq(self): |
1619 |
"""Check prerequisites.
|
1620 |
|
1621 |
Transform the list of checks we're going to skip into a set and check that
|
1622 |
all its members are valid.
|
1623 |
|
1624 |
"""
|
1625 |
self.skip_set = frozenset(self.op.skip_checks) |
1626 |
if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set): |
1627 |
raise errors.OpPrereqError("Invalid checks to be skipped specified", |
1628 |
errors.ECODE_INVAL) |
1629 |
|
1630 |
def BuildHooksEnv(self): |
1631 |
"""Build hooks env.
|
1632 |
|
1633 |
Cluster-Verify hooks just ran in the post phase and their failure makes
|
1634 |
the output be logged in the verify output and the verification to fail.
|
1635 |
|
1636 |
"""
|
1637 |
all_nodes = self.cfg.GetNodeList()
|
1638 |
env = { |
1639 |
"CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()) |
1640 |
} |
1641 |
for node in self.cfg.GetAllNodesInfo().values(): |
1642 |
env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags()) |
1643 |
|
1644 |
return env, [], all_nodes
|
1645 |
|
1646 |
def Exec(self, feedback_fn): |
1647 |
"""Verify integrity of cluster, performing various test on nodes.
|
1648 |
|
1649 |
"""
|
1650 |
self.bad = False |
1651 |
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 |
1652 |
verbose = self.op.verbose
|
1653 |
self._feedback_fn = feedback_fn
|
1654 |
feedback_fn("* Verifying global settings")
|
1655 |
for msg in self.cfg.VerifyConfig(): |
1656 |
_ErrorIf(True, self.ECLUSTERCFG, None, msg) |
1657 |
|
1658 |
# Check the cluster certificates
|
1659 |
for cert_filename in constants.ALL_CERT_FILES: |
1660 |
(errcode, msg) = _VerifyCertificate(cert_filename) |
1661 |
_ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode) |
1662 |
|
1663 |
vg_name = self.cfg.GetVGName()
|
1664 |
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
|
1665 |
nodelist = utils.NiceSort(self.cfg.GetNodeList())
|
1666 |
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist] |
1667 |
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
|
1668 |
instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname)) |
1669 |
for iname in instancelist) |
1670 |
i_non_redundant = [] # Non redundant instances
|
1671 |
i_non_a_balanced = [] # Non auto-balanced instances
|
1672 |
n_offline = 0 # Count of offline nodes |
1673 |
n_drained = 0 # Count of nodes being drained |
1674 |
node_vol_should = {} |
1675 |
|
1676 |
# FIXME: verify OS list
|
1677 |
# do local checksums
|
1678 |
master_files = [constants.CLUSTER_CONF_FILE] |
1679 |
|
1680 |
file_names = ssconf.SimpleStore().GetFileList() |
1681 |
file_names.extend(constants.ALL_CERT_FILES) |
1682 |
file_names.extend(master_files) |
1683 |
|
1684 |
local_checksums = utils.FingerprintFiles(file_names) |
1685 |
|
1686 |
feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
1687 |
node_verify_param = { |
1688 |
constants.NV_FILELIST: file_names, |
1689 |
constants.NV_NODELIST: [node.name for node in nodeinfo |
1690 |
if not node.offline], |
1691 |
constants.NV_HYPERVISOR: hypervisors, |
1692 |
constants.NV_NODENETTEST: [(node.name, node.primary_ip, |
1693 |
node.secondary_ip) for node in nodeinfo |
1694 |
if not node.offline], |
1695 |
constants.NV_INSTANCELIST: hypervisors, |
1696 |
constants.NV_VERSION: None,
|
1697 |
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
|
1698 |
constants.NV_NODESETUP: None,
|
1699 |
constants.NV_TIME: None,
|
1700 |
} |
1701 |
|
1702 |
if vg_name is not None: |
1703 |
node_verify_param[constants.NV_VGLIST] = None
|
1704 |
node_verify_param[constants.NV_LVLIST] = vg_name |
1705 |
node_verify_param[constants.NV_PVLIST] = [vg_name] |
1706 |
node_verify_param[constants.NV_DRBDLIST] = None
|
1707 |
|
1708 |
# Build our expected cluster state
|
1709 |
node_image = dict((node.name, self.NodeImage(offline=node.offline)) |
1710 |
for node in nodeinfo) |
1711 |
|
1712 |
for instance in instancelist: |
1713 |
inst_config = instanceinfo[instance] |
1714 |
|
1715 |
for nname in inst_config.all_nodes: |
1716 |
if nname not in node_image: |
1717 |
# ghost node
|
1718 |
gnode = self.NodeImage()
|
1719 |
gnode.ghost = True
|
1720 |
node_image[nname] = gnode |
1721 |
|
1722 |
inst_config.MapLVsByNode(node_vol_should) |
1723 |
|
1724 |
pnode = inst_config.primary_node |
1725 |
node_image[pnode].pinst.append(instance) |
1726 |
|
1727 |
for snode in inst_config.secondary_nodes: |
1728 |
nimg = node_image[snode] |
1729 |
nimg.sinst.append(instance) |
1730 |
if pnode not in nimg.sbp: |
1731 |
nimg.sbp[pnode] = [] |
1732 |
nimg.sbp[pnode].append(instance) |
1733 |
|
1734 |
# At this point, we have the in-memory data structures complete,
|
1735 |
# except for the runtime information, which we'll gather next
|
1736 |
|
1737 |
# Due to the way our RPC system works, exact response times cannot be
|
1738 |
# guaranteed (e.g. a broken node could run into a timeout). By keeping the
|
1739 |
# time before and after executing the request, we can at least have a time
|
1740 |
# window.
|
1741 |
nvinfo_starttime = time.time() |
1742 |
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
|
1743 |
self.cfg.GetClusterName())
|
1744 |
nvinfo_endtime = time.time() |
1745 |
|
1746 |
cluster = self.cfg.GetClusterInfo()
|
1747 |
master_node = self.cfg.GetMasterNode()
|
1748 |
all_drbd_map = self.cfg.ComputeDRBDMap()
|
1749 |
|
1750 |
feedback_fn("* Verifying node status")
|
1751 |
for node_i in nodeinfo: |
1752 |
node = node_i.name |
1753 |
nimg = node_image[node] |
1754 |
|
1755 |
if node_i.offline:
|
1756 |
if verbose:
|
1757 |
feedback_fn("* Skipping offline node %s" % (node,))
|
1758 |
n_offline += 1
|
1759 |
continue
|
1760 |
|
1761 |
if node == master_node:
|
1762 |
ntype = "master"
|
1763 |
elif node_i.master_candidate:
|
1764 |
ntype = "master candidate"
|
1765 |
elif node_i.drained:
|
1766 |
ntype = "drained"
|
1767 |
n_drained += 1
|
1768 |
else:
|
1769 |
ntype = "regular"
|
1770 |
if verbose:
|
1771 |
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
|
1772 |
|
1773 |
msg = all_nvinfo[node].fail_msg |
1774 |
_ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg) |
1775 |
if msg:
|
1776 |
nimg.rpc_fail = True
|
1777 |
continue
|
1778 |
|
1779 |
nresult = all_nvinfo[node].payload |
1780 |
|
1781 |
nimg.call_ok = self._VerifyNode(node_i, nresult)
|
1782 |
self._VerifyNodeNetwork(node_i, nresult)
|
1783 |
self._VerifyNodeLVM(node_i, nresult, vg_name)
|
1784 |
self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
|
1785 |
master_files) |
1786 |
self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
|
1787 |
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
|
1788 |
|
1789 |
self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
|
1790 |
self._UpdateNodeInstances(node_i, nresult, nimg)
|
1791 |
self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
|
1792 |
|
1793 |
feedback_fn("* Verifying instance status")
|
1794 |
for instance in instancelist: |
1795 |
if verbose:
|
1796 |
feedback_fn("* Verifying instance %s" % instance)
|
1797 |
inst_config = instanceinfo[instance] |
1798 |
self._VerifyInstance(instance, inst_config, node_image)
|
1799 |
inst_nodes_offline = [] |
1800 |
|
1801 |
pnode = inst_config.primary_node |
1802 |
pnode_img = node_image[pnode] |
1803 |
_ErrorIf(pnode_img.rpc_fail and not pnode_img.offline, |
1804 |
self.ENODERPC, pnode, "instance %s, connection to" |
1805 |
" primary node failed", instance)
|
1806 |
|
1807 |
if pnode_img.offline:
|
1808 |
inst_nodes_offline.append(pnode) |
1809 |
|
1810 |
# If the instance is non-redundant we cannot survive losing its primary
|
1811 |
# node, so we are not N+1 compliant. On the other hand we have no disk
|
1812 |
# templates with more than one secondary so that situation is not well
|
1813 |
# supported either.
|
1814 |
# FIXME: does not support file-backed instances
|
1815 |
if not inst_config.secondary_nodes: |
1816 |
i_non_redundant.append(instance) |
1817 |
_ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT, |
1818 |
instance, "instance has multiple secondary nodes: %s",
|
1819 |
utils.CommaJoin(inst_config.secondary_nodes), |
1820 |
code=self.ETYPE_WARNING)
|
1821 |
|
1822 |
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]: |
1823 |
i_non_a_balanced.append(instance) |
1824 |
|
1825 |
for snode in inst_config.secondary_nodes: |
1826 |
s_img = node_image[snode] |
1827 |
_ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode, |
1828 |
"instance %s, connection to secondary node failed", instance)
|
1829 |
|
1830 |
if s_img.offline:
|
1831 |
inst_nodes_offline.append(snode) |
1832 |
|
1833 |
# warn that the instance lives on offline nodes
|
1834 |
_ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
|
1835 |
"instance lives on offline node(s) %s",
|
1836 |
utils.CommaJoin(inst_nodes_offline)) |
1837 |
# ... or ghost nodes
|
1838 |
for node in inst_config.all_nodes: |
1839 |
_ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
|
1840 |
"instance lives on ghost node %s", node)
|
1841 |
|
1842 |
feedback_fn("* Verifying orphan volumes")
|
1843 |
self._VerifyOrphanVolumes(node_vol_should, node_image)
|
1844 |
|
1845 |
feedback_fn("* Verifying oprhan instances")
|
1846 |
self._VerifyOrphanInstances(instancelist, node_image)
|
1847 |
|
1848 |
if constants.VERIFY_NPLUSONE_MEM not in self.skip_set: |
1849 |
feedback_fn("* Verifying N+1 Memory redundancy")
|
1850 |
self._VerifyNPlusOneMemory(node_image, instanceinfo)
|
1851 |
|
1852 |
feedback_fn("* Other Notes")
|
1853 |
if i_non_redundant:
|
1854 |
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
|
1855 |
% len(i_non_redundant))
|
1856 |
|
1857 |
if i_non_a_balanced:
|
1858 |
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
|
1859 |
% len(i_non_a_balanced))
|
1860 |
|
1861 |
if n_offline:
|
1862 |
feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
|
1863 |
|
1864 |
if n_drained:
|
1865 |
feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
|
1866 |
|
1867 |
return not self.bad |
1868 |
|
1869 |
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): |
1870 |
"""Analyze the post-hooks' result
|
1871 |
|
1872 |
This method analyses the hook result, handles it, and sends some
|
1873 |
nicely-formatted feedback back to the user.
|
1874 |
|
1875 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
1876 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
1877 |
@param hooks_results: the results of the multi-node hooks rpc call
|
1878 |
@param feedback_fn: function used send feedback back to the caller
|
1879 |
@param lu_result: previous Exec result
|
1880 |
@return: the new Exec result, based on the previous result
|
1881 |
and hook results
|
1882 |
|
1883 |
"""
|
1884 |
# We only really run POST phase hooks, and are only interested in
|
1885 |
# their results
|
1886 |
if phase == constants.HOOKS_PHASE_POST:
|
1887 |
# Used to change hooks' output to proper indentation
|
1888 |
indent_re = re.compile('^', re.M)
|
1889 |
feedback_fn("* Hooks Results")
|
1890 |
assert hooks_results, "invalid result from hooks" |
1891 |
|
1892 |
for node_name in hooks_results: |
1893 |
res = hooks_results[node_name] |
1894 |
msg = res.fail_msg |
1895 |
test = msg and not res.offline |
1896 |
self._ErrorIf(test, self.ENODEHOOKS, node_name, |
1897 |
"Communication failure in hooks execution: %s", msg)
|
1898 |
if res.offline or msg: |
1899 |
# No need to investigate payload if node is offline or gave an error.
|
1900 |
# override manually lu_result here as _ErrorIf only
|
1901 |
# overrides self.bad
|
1902 |
lu_result = 1
|
1903 |
continue
|
1904 |
for script, hkr, output in res.payload: |
1905 |
test = hkr == constants.HKR_FAIL |
1906 |
self._ErrorIf(test, self.ENODEHOOKS, node_name, |
1907 |
"Script %s failed, output:", script)
|
1908 |
if test:
|
1909 |
output = indent_re.sub(' ', output)
|
1910 |
feedback_fn("%s" % output)
|
1911 |
lu_result = 0
|
1912 |
|
1913 |
return lu_result
|
1914 |
|
1915 |
|
1916 |
class LUVerifyDisks(NoHooksLU): |
1917 |
"""Verifies the cluster disks status.
|
1918 |
|
1919 |
"""
|
1920 |
_OP_REQP = [] |
1921 |
REQ_BGL = False
|
1922 |
|
1923 |
def ExpandNames(self): |
1924 |
self.needed_locks = {
|
1925 |
locking.LEVEL_NODE: locking.ALL_SET, |
1926 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1927 |
} |
1928 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
1929 |
|
1930 |
def CheckPrereq(self): |
1931 |
"""Check prerequisites.
|
1932 |
|
1933 |
This has no prerequisites.
|
1934 |
|
1935 |
"""
|
1936 |
pass
|
1937 |
|
1938 |
def Exec(self, feedback_fn): |
1939 |
"""Verify integrity of cluster disks.
|
1940 |
|
1941 |
@rtype: tuple of three items
|
1942 |
@return: a tuple of (dict of node-to-node_error, list of instances
|
1943 |
which need activate-disks, dict of instance: (node, volume) for
|
1944 |
missing volumes
|
1945 |
|
1946 |
"""
|
1947 |
result = res_nodes, res_instances, res_missing = {}, [], {} |
1948 |
|
1949 |
vg_name = self.cfg.GetVGName()
|
1950 |
nodes = utils.NiceSort(self.cfg.GetNodeList())
|
1951 |
instances = [self.cfg.GetInstanceInfo(name)
|
1952 |
for name in self.cfg.GetInstanceList()] |
1953 |
|
1954 |
nv_dict = {} |
1955 |
for inst in instances: |
1956 |
inst_lvs = {} |
1957 |
if (not inst.admin_up or |
1958 |
inst.disk_template not in constants.DTS_NET_MIRROR): |
1959 |
continue
|
1960 |
inst.MapLVsByNode(inst_lvs) |
1961 |
# transform { iname: {node: [vol,],},} to {(node, vol): iname}
|
1962 |
for node, vol_list in inst_lvs.iteritems(): |
1963 |
for vol in vol_list: |
1964 |
nv_dict[(node, vol)] = inst |
1965 |
|
1966 |
if not nv_dict: |
1967 |
return result
|
1968 |
|
1969 |
node_lvs = self.rpc.call_lv_list(nodes, vg_name)
|
1970 |
|
1971 |
for node in nodes: |
1972 |
# node_volume
|
1973 |
node_res = node_lvs[node] |
1974 |
if node_res.offline:
|
1975 |
continue
|
1976 |
msg = node_res.fail_msg |
1977 |
if msg:
|
1978 |
logging.warning("Error enumerating LVs on node %s: %s", node, msg)
|
1979 |
res_nodes[node] = msg |
1980 |
continue
|
1981 |
|
1982 |
lvs = node_res.payload |
1983 |
for lv_name, (_, _, lv_online) in lvs.items(): |
1984 |
inst = nv_dict.pop((node, lv_name), None)
|
1985 |
if (not lv_online and inst is not None |
1986 |
and inst.name not in res_instances): |
1987 |
res_instances.append(inst.name) |
1988 |
|
1989 |
# any leftover items in nv_dict are missing LVs, let's arrange the
|
1990 |
# data better
|
1991 |
for key, inst in nv_dict.iteritems(): |
1992 |
if inst.name not in res_missing: |
1993 |
res_missing[inst.name] = [] |
1994 |
res_missing[inst.name].append(key) |
1995 |
|
1996 |
return result
|
1997 |
|
1998 |
|
1999 |
class LURepairDiskSizes(NoHooksLU): |
2000 |
"""Verifies the cluster disks sizes.
|
2001 |
|
2002 |
"""
|
2003 |
_OP_REQP = ["instances"]
|
2004 |
REQ_BGL = False
|
2005 |
|
2006 |
def ExpandNames(self): |
2007 |
if not isinstance(self.op.instances, list): |
2008 |
raise errors.OpPrereqError("Invalid argument type 'instances'", |
2009 |
errors.ECODE_INVAL) |
2010 |
|
2011 |
if self.op.instances: |
2012 |
self.wanted_names = []
|
2013 |
for name in self.op.instances: |
2014 |
full_name = _ExpandInstanceName(self.cfg, name)
|
2015 |
self.wanted_names.append(full_name)
|
2016 |
self.needed_locks = {
|
2017 |
locking.LEVEL_NODE: [], |
2018 |
locking.LEVEL_INSTANCE: self.wanted_names,
|
2019 |
} |
2020 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2021 |
else:
|
2022 |
self.wanted_names = None |
2023 |
self.needed_locks = {
|
2024 |
locking.LEVEL_NODE: locking.ALL_SET, |
2025 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
2026 |
} |
2027 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
2028 |
|
2029 |
def DeclareLocks(self, level): |
2030 |
if level == locking.LEVEL_NODE and self.wanted_names is not None: |
2031 |
self._LockInstancesNodes(primary_only=True) |
2032 |
|
2033 |
def CheckPrereq(self): |
2034 |
"""Check prerequisites.
|
2035 |
|
2036 |
This only checks the optional instance list against the existing names.
|
2037 |
|
2038 |
"""
|
2039 |
if self.wanted_names is None: |
2040 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE] |
2041 |
|
2042 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
2043 |
in self.wanted_names] |
2044 |
|
2045 |
def _EnsureChildSizes(self, disk): |
2046 |
"""Ensure children of the disk have the needed disk size.
|
2047 |
|
2048 |
This is valid mainly for DRBD8 and fixes an issue where the
|
2049 |
children have smaller disk size.
|
2050 |
|
2051 |
@param disk: an L{ganeti.objects.Disk} object
|
2052 |
|
2053 |
"""
|
2054 |
if disk.dev_type == constants.LD_DRBD8:
|
2055 |
assert disk.children, "Empty children for DRBD8?" |
2056 |
fchild = disk.children[0]
|
2057 |
mismatch = fchild.size < disk.size |
2058 |
if mismatch:
|
2059 |
self.LogInfo("Child disk has size %d, parent %d, fixing", |
2060 |
fchild.size, disk.size) |
2061 |
fchild.size = disk.size |
2062 |
|
2063 |
# and we recurse on this child only, not on the metadev
|
2064 |
return self._EnsureChildSizes(fchild) or mismatch |
2065 |
else:
|
2066 |
return False |
2067 |
|
2068 |
def Exec(self, feedback_fn): |
2069 |
"""Verify the size of cluster disks.
|
2070 |
|
2071 |
"""
|
2072 |
# TODO: check child disks too
|
2073 |
# TODO: check differences in size between primary/secondary nodes
|
2074 |
per_node_disks = {} |
2075 |
for instance in self.wanted_instances: |
2076 |
pnode = instance.primary_node |
2077 |
if pnode not in per_node_disks: |
2078 |
per_node_disks[pnode] = [] |
2079 |
for idx, disk in enumerate(instance.disks): |
2080 |
per_node_disks[pnode].append((instance, idx, disk)) |
2081 |
|
2082 |
changed = [] |
2083 |
for node, dskl in per_node_disks.items(): |
2084 |
newl = [v[2].Copy() for v in dskl] |
2085 |
for dsk in newl: |
2086 |
self.cfg.SetDiskID(dsk, node)
|
2087 |
result = self.rpc.call_blockdev_getsizes(node, newl)
|
2088 |
if result.fail_msg:
|
2089 |
self.LogWarning("Failure in blockdev_getsizes call to node" |
2090 |
" %s, ignoring", node)
|
2091 |
continue
|
2092 |
if len(result.data) != len(dskl): |
2093 |
self.LogWarning("Invalid result from node %s, ignoring node results", |
2094 |
node) |
2095 |
continue
|
2096 |
for ((instance, idx, disk), size) in zip(dskl, result.data): |
2097 |
if size is None: |
2098 |
self.LogWarning("Disk %d of instance %s did not return size" |
2099 |
" information, ignoring", idx, instance.name)
|
2100 |
continue
|
2101 |
if not isinstance(size, (int, long)): |
2102 |
self.LogWarning("Disk %d of instance %s did not return valid" |
2103 |
" size information, ignoring", idx, instance.name)
|
2104 |
continue
|
2105 |
size = size >> 20
|
2106 |
if size != disk.size:
|
2107 |
self.LogInfo("Disk %d of instance %s has mismatched size," |
2108 |
" correcting: recorded %d, actual %d", idx,
|
2109 |
instance.name, disk.size, size) |
2110 |
disk.size = size |
2111 |
self.cfg.Update(instance, feedback_fn)
|
2112 |
changed.append((instance.name, idx, size)) |
2113 |
if self._EnsureChildSizes(disk): |
2114 |
self.cfg.Update(instance, feedback_fn)
|
2115 |
changed.append((instance.name, idx, disk.size)) |
2116 |
return changed
|
2117 |
|
2118 |
|
2119 |
class LURenameCluster(LogicalUnit): |
2120 |
"""Rename the cluster.
|
2121 |
|
2122 |
"""
|
2123 |
HPATH = "cluster-rename"
|
2124 |
HTYPE = constants.HTYPE_CLUSTER |
2125 |
_OP_REQP = ["name"]
|
2126 |
|
2127 |
def BuildHooksEnv(self): |
2128 |
"""Build hooks env.
|
2129 |
|
2130 |
"""
|
2131 |
env = { |
2132 |
"OP_TARGET": self.cfg.GetClusterName(), |
2133 |
"NEW_NAME": self.op.name, |
2134 |
} |
2135 |
mn = self.cfg.GetMasterNode()
|
2136 |
all_nodes = self.cfg.GetNodeList()
|
2137 |
return env, [mn], all_nodes
|
2138 |
|
2139 |
def CheckPrereq(self): |
2140 |
"""Verify that the passed name is a valid one.
|
2141 |
|
2142 |
"""
|
2143 |
hostname = utils.GetHostInfo(self.op.name)
|
2144 |
|
2145 |
new_name = hostname.name |
2146 |
self.ip = new_ip = hostname.ip
|
2147 |
old_name = self.cfg.GetClusterName()
|
2148 |
old_ip = self.cfg.GetMasterIP()
|
2149 |
if new_name == old_name and new_ip == old_ip: |
2150 |
raise errors.OpPrereqError("Neither the name nor the IP address of the" |
2151 |
" cluster has changed",
|
2152 |
errors.ECODE_INVAL) |
2153 |
if new_ip != old_ip:
|
2154 |
if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
|
2155 |
raise errors.OpPrereqError("The given cluster IP address (%s) is" |
2156 |
" reachable on the network. Aborting." %
|
2157 |
new_ip, errors.ECODE_NOTUNIQUE) |
2158 |
|
2159 |
self.op.name = new_name
|
2160 |
|
2161 |
def Exec(self, feedback_fn): |
2162 |
"""Rename the cluster.
|
2163 |
|
2164 |
"""
|
2165 |
clustername = self.op.name
|
2166 |
ip = self.ip
|
2167 |
|
2168 |
# shutdown the master IP
|
2169 |
master = self.cfg.GetMasterNode()
|
2170 |
result = self.rpc.call_node_stop_master(master, False) |
2171 |
result.Raise("Could not disable the master role")
|
2172 |
|
2173 |
try:
|
2174 |
cluster = self.cfg.GetClusterInfo()
|
2175 |
cluster.cluster_name = clustername |
2176 |
cluster.master_ip = ip |
2177 |
self.cfg.Update(cluster, feedback_fn)
|
2178 |
|
2179 |
# update the known hosts file
|
2180 |
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
|
2181 |
node_list = self.cfg.GetNodeList()
|
2182 |
try:
|
2183 |
node_list.remove(master) |
2184 |
except ValueError: |
2185 |
pass
|
2186 |
result = self.rpc.call_upload_file(node_list,
|
2187 |
constants.SSH_KNOWN_HOSTS_FILE) |
2188 |
for to_node, to_result in result.iteritems(): |
2189 |
msg = to_result.fail_msg |
2190 |
if msg:
|
2191 |
msg = ("Copy of file %s to node %s failed: %s" %
|
2192 |
(constants.SSH_KNOWN_HOSTS_FILE, to_node, msg)) |
2193 |
self.proc.LogWarning(msg)
|
2194 |
|
2195 |
finally:
|
2196 |
result = self.rpc.call_node_start_master(master, False, False) |
2197 |
msg = result.fail_msg |
2198 |
if msg:
|
2199 |
self.LogWarning("Could not re-enable the master role on" |
2200 |
" the master, please restart manually: %s", msg)
|
2201 |
|
2202 |
|
2203 |
def _RecursiveCheckIfLVMBased(disk): |
2204 |
"""Check if the given disk or its children are lvm-based.
|
2205 |
|
2206 |
@type disk: L{objects.Disk}
|
2207 |
@param disk: the disk to check
|
2208 |
@rtype: boolean
|
2209 |
@return: boolean indicating whether a LD_LV dev_type was found or not
|
2210 |
|
2211 |
"""
|
2212 |
if disk.children:
|
2213 |
for chdisk in disk.children: |
2214 |
if _RecursiveCheckIfLVMBased(chdisk):
|
2215 |
return True |
2216 |
return disk.dev_type == constants.LD_LV
|
2217 |
|
2218 |
|
2219 |
class LUSetClusterParams(LogicalUnit): |
2220 |
"""Change the parameters of the cluster.
|
2221 |
|
2222 |
"""
|
2223 |
HPATH = "cluster-modify"
|
2224 |
HTYPE = constants.HTYPE_CLUSTER |
2225 |
_OP_REQP = [] |
2226 |
REQ_BGL = False
|
2227 |
|
2228 |
def CheckArguments(self): |
2229 |
"""Check parameters
|
2230 |
|
2231 |
"""
|
2232 |
if not hasattr(self.op, "candidate_pool_size"): |
2233 |
self.op.candidate_pool_size = None |
2234 |
if self.op.candidate_pool_size is not None: |
2235 |
try:
|
2236 |
self.op.candidate_pool_size = int(self.op.candidate_pool_size) |
2237 |
except (ValueError, TypeError), err: |
2238 |
raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" % |
2239 |
str(err), errors.ECODE_INVAL)
|
2240 |
if self.op.candidate_pool_size < 1: |
2241 |
raise errors.OpPrereqError("At least one master candidate needed", |
2242 |
errors.ECODE_INVAL) |
2243 |
_CheckBooleanOpField(self.op, "maintain_node_health") |
2244 |
|
2245 |
def ExpandNames(self): |
2246 |
# FIXME: in the future maybe other cluster params won't require checking on
|
2247 |
# all nodes to be modified.
|
2248 |
self.needed_locks = {
|
2249 |
locking.LEVEL_NODE: locking.ALL_SET, |
2250 |
} |
2251 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2252 |
|
2253 |
def BuildHooksEnv(self): |
2254 |
"""Build hooks env.
|
2255 |
|
2256 |
"""
|
2257 |
env = { |
2258 |
"OP_TARGET": self.cfg.GetClusterName(), |
2259 |
"NEW_VG_NAME": self.op.vg_name, |
2260 |
} |
2261 |
mn = self.cfg.GetMasterNode()
|
2262 |
return env, [mn], [mn]
|
2263 |
|
2264 |
def CheckPrereq(self): |
2265 |
"""Check prerequisites.
|
2266 |
|
2267 |
This checks whether the given params don't conflict and
|
2268 |
if the given volume group is valid.
|
2269 |
|
2270 |
"""
|
2271 |
if self.op.vg_name is not None and not self.op.vg_name: |
2272 |
instances = self.cfg.GetAllInstancesInfo().values()
|
2273 |
for inst in instances: |
2274 |
for disk in inst.disks: |
2275 |
if _RecursiveCheckIfLVMBased(disk):
|
2276 |
raise errors.OpPrereqError("Cannot disable lvm storage while" |
2277 |
" lvm-based instances exist",
|
2278 |
errors.ECODE_INVAL) |
2279 |
|
2280 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
2281 |
|
2282 |
# if vg_name not None, checks given volume group on all nodes
|
2283 |
if self.op.vg_name: |
2284 |
vglist = self.rpc.call_vg_list(node_list)
|
2285 |
for node in node_list: |
2286 |
msg = vglist[node].fail_msg |
2287 |
if msg:
|
2288 |
# ignoring down node
|
2289 |
self.LogWarning("Error while gathering data on node %s" |
2290 |
" (ignoring node): %s", node, msg)
|
2291 |
continue
|
2292 |
vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload, |
2293 |
self.op.vg_name,
|
2294 |
constants.MIN_VG_SIZE) |
2295 |
if vgstatus:
|
2296 |
raise errors.OpPrereqError("Error on node '%s': %s" % |
2297 |
(node, vgstatus), errors.ECODE_ENVIRON) |
2298 |
|
2299 |
self.cluster = cluster = self.cfg.GetClusterInfo() |
2300 |
# validate params changes
|
2301 |
if self.op.beparams: |
2302 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
2303 |
self.new_beparams = objects.FillDict(
|
2304 |
cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
|
2305 |
|
2306 |
if self.op.nicparams: |
2307 |
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
|
2308 |
self.new_nicparams = objects.FillDict(
|
2309 |
cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
|
2310 |
objects.NIC.CheckParameterSyntax(self.new_nicparams)
|
2311 |
nic_errors = [] |
2312 |
|
2313 |
# check all instances for consistency
|
2314 |
for instance in self.cfg.GetAllInstancesInfo().values(): |
2315 |
for nic_idx, nic in enumerate(instance.nics): |
2316 |
params_copy = copy.deepcopy(nic.nicparams) |
2317 |
params_filled = objects.FillDict(self.new_nicparams, params_copy)
|
2318 |
|
2319 |
# check parameter syntax
|
2320 |
try:
|
2321 |
objects.NIC.CheckParameterSyntax(params_filled) |
2322 |
except errors.ConfigurationError, err:
|
2323 |
nic_errors.append("Instance %s, nic/%d: %s" %
|
2324 |
(instance.name, nic_idx, err)) |
2325 |
|
2326 |
# if we're moving instances to routed, check that they have an ip
|
2327 |
target_mode = params_filled[constants.NIC_MODE] |
2328 |
if target_mode == constants.NIC_MODE_ROUTED and not nic.ip: |
2329 |
nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
|
2330 |
(instance.name, nic_idx)) |
2331 |
if nic_errors:
|
2332 |
raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" % |
2333 |
"\n".join(nic_errors))
|
2334 |
|
2335 |
# hypervisor list/parameters
|
2336 |
self.new_hvparams = objects.FillDict(cluster.hvparams, {})
|
2337 |
if self.op.hvparams: |
2338 |
if not isinstance(self.op.hvparams, dict): |
2339 |
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input", |
2340 |
errors.ECODE_INVAL) |
2341 |
for hv_name, hv_dict in self.op.hvparams.items(): |
2342 |
if hv_name not in self.new_hvparams: |
2343 |
self.new_hvparams[hv_name] = hv_dict
|
2344 |
else:
|
2345 |
self.new_hvparams[hv_name].update(hv_dict)
|
2346 |
|
2347 |
# os hypervisor parameters
|
2348 |
self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
|
2349 |
if self.op.os_hvp: |
2350 |
if not isinstance(self.op.os_hvp, dict): |
2351 |
raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input", |
2352 |
errors.ECODE_INVAL) |
2353 |
for os_name, hvs in self.op.os_hvp.items(): |
2354 |
if not isinstance(hvs, dict): |
2355 |
raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on" |
2356 |
" input"), errors.ECODE_INVAL)
|
2357 |
if os_name not in self.new_os_hvp: |
2358 |
self.new_os_hvp[os_name] = hvs
|
2359 |
else:
|
2360 |
for hv_name, hv_dict in hvs.items(): |
2361 |
if hv_name not in self.new_os_hvp[os_name]: |
2362 |
self.new_os_hvp[os_name][hv_name] = hv_dict
|
2363 |
else:
|
2364 |
self.new_os_hvp[os_name][hv_name].update(hv_dict)
|
2365 |
|
2366 |
if self.op.enabled_hypervisors is not None: |
2367 |
self.hv_list = self.op.enabled_hypervisors |
2368 |
if not self.hv_list: |
2369 |
raise errors.OpPrereqError("Enabled hypervisors list must contain at" |
2370 |
" least one member",
|
2371 |
errors.ECODE_INVAL) |
2372 |
invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES |
2373 |
if invalid_hvs:
|
2374 |
raise errors.OpPrereqError("Enabled hypervisors contains invalid" |
2375 |
" entries: %s" %
|
2376 |
utils.CommaJoin(invalid_hvs), |
2377 |
errors.ECODE_INVAL) |
2378 |
else:
|
2379 |
self.hv_list = cluster.enabled_hypervisors
|
2380 |
|
2381 |
if self.op.hvparams or self.op.enabled_hypervisors is not None: |
2382 |
# either the enabled list has changed, or the parameters have, validate
|
2383 |
for hv_name, hv_params in self.new_hvparams.items(): |
2384 |
if ((self.op.hvparams and hv_name in self.op.hvparams) or |
2385 |
(self.op.enabled_hypervisors and |
2386 |
hv_name in self.op.enabled_hypervisors)): |
2387 |
# either this is a new hypervisor, or its parameters have changed
|
2388 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2389 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
2390 |
hv_class.CheckParameterSyntax(hv_params) |
2391 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
2392 |
|
2393 |
if self.op.os_hvp: |
2394 |
# no need to check any newly-enabled hypervisors, since the
|
2395 |
# defaults have already been checked in the above code-block
|
2396 |
for os_name, os_hvp in self.new_os_hvp.items(): |
2397 |
for hv_name, hv_params in os_hvp.items(): |
2398 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
2399 |
# we need to fill in the new os_hvp on top of the actual hv_p
|
2400 |
cluster_defaults = self.new_hvparams.get(hv_name, {})
|
2401 |
new_osp = objects.FillDict(cluster_defaults, hv_params) |
2402 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2403 |
hv_class.CheckParameterSyntax(new_osp) |
2404 |
_CheckHVParams(self, node_list, hv_name, new_osp)
|
2405 |
|
2406 |
|
2407 |
def Exec(self, feedback_fn): |
2408 |
"""Change the parameters of the cluster.
|
2409 |
|
2410 |
"""
|
2411 |
if self.op.vg_name is not None: |
2412 |
new_volume = self.op.vg_name
|
2413 |
if not new_volume: |
2414 |
new_volume = None
|
2415 |
if new_volume != self.cfg.GetVGName(): |
2416 |
self.cfg.SetVGName(new_volume)
|
2417 |
else:
|
2418 |
feedback_fn("Cluster LVM configuration already in desired"
|
2419 |
" state, not changing")
|
2420 |
if self.op.hvparams: |
2421 |
self.cluster.hvparams = self.new_hvparams |
2422 |
if self.op.os_hvp: |
2423 |
self.cluster.os_hvp = self.new_os_hvp |
2424 |
if self.op.enabled_hypervisors is not None: |
2425 |
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors |
2426 |
if self.op.beparams: |
2427 |
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams |
2428 |
if self.op.nicparams: |
2429 |
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams |
2430 |
|
2431 |
if self.op.candidate_pool_size is not None: |
2432 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
2433 |
# we need to update the pool size here, otherwise the save will fail
|
2434 |
_AdjustCandidatePool(self, [])
|
2435 |
|
2436 |
if self.op.maintain_node_health is not None: |
2437 |
self.cluster.maintain_node_health = self.op.maintain_node_health |
2438 |
|
2439 |
self.cfg.Update(self.cluster, feedback_fn) |
2440 |
|
2441 |
|
2442 |
def _RedistributeAncillaryFiles(lu, additional_nodes=None): |
2443 |
"""Distribute additional files which are part of the cluster configuration.
|
2444 |
|
2445 |
ConfigWriter takes care of distributing the config and ssconf files, but
|
2446 |
there are more files which should be distributed to all nodes. This function
|
2447 |
makes sure those are copied.
|
2448 |
|
2449 |
@param lu: calling logical unit
|
2450 |
@param additional_nodes: list of nodes not in the config to distribute to
|
2451 |
|
2452 |
"""
|
2453 |
# 1. Gather target nodes
|
2454 |
myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode()) |
2455 |
dist_nodes = lu.cfg.GetOnlineNodeList() |
2456 |
if additional_nodes is not None: |
2457 |
dist_nodes.extend(additional_nodes) |
2458 |
if myself.name in dist_nodes: |
2459 |
dist_nodes.remove(myself.name) |
2460 |
|
2461 |
# 2. Gather files to distribute
|
2462 |
dist_files = set([constants.ETC_HOSTS,
|
2463 |
constants.SSH_KNOWN_HOSTS_FILE, |
2464 |
constants.RAPI_CERT_FILE, |
2465 |
constants.RAPI_USERS_FILE, |
2466 |
constants.CONFD_HMAC_KEY, |
2467 |
]) |
2468 |
|
2469 |
enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors |
2470 |
for hv_name in enabled_hypervisors: |
2471 |
hv_class = hypervisor.GetHypervisor(hv_name) |
2472 |
dist_files.update(hv_class.GetAncillaryFiles()) |
2473 |
|
2474 |
# 3. Perform the files upload
|
2475 |
for fname in dist_files: |
2476 |
if os.path.exists(fname):
|
2477 |
result = lu.rpc.call_upload_file(dist_nodes, fname) |
2478 |
for to_node, to_result in result.items(): |
2479 |
msg = to_result.fail_msg |
2480 |
if msg:
|
2481 |
msg = ("Copy of file %s to node %s failed: %s" %
|
2482 |
(fname, to_node, msg)) |
2483 |
lu.proc.LogWarning(msg) |
2484 |
|
2485 |
|
2486 |
class LURedistributeConfig(NoHooksLU): |
2487 |
"""Force the redistribution of cluster configuration.
|
2488 |
|
2489 |
This is a very simple LU.
|
2490 |
|
2491 |
"""
|
2492 |
_OP_REQP = [] |
2493 |
REQ_BGL = False
|
2494 |
|
2495 |
def ExpandNames(self): |
2496 |
self.needed_locks = {
|
2497 |
locking.LEVEL_NODE: locking.ALL_SET, |
2498 |
} |
2499 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2500 |
|
2501 |
def CheckPrereq(self): |
2502 |
"""Check prerequisites.
|
2503 |
|
2504 |
"""
|
2505 |
|
2506 |
def Exec(self, feedback_fn): |
2507 |
"""Redistribute the configuration.
|
2508 |
|
2509 |
"""
|
2510 |
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn) |
2511 |
_RedistributeAncillaryFiles(self)
|
2512 |
|
2513 |
|
2514 |
def _WaitForSync(lu, instance, oneshot=False): |
2515 |
"""Sleep and poll for an instance's disk to sync.
|
2516 |
|
2517 |
"""
|
2518 |
if not instance.disks: |
2519 |
return True |
2520 |
|
2521 |
if not oneshot: |
2522 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
2523 |
|
2524 |
node = instance.primary_node |
2525 |
|
2526 |
for dev in instance.disks: |
2527 |
lu.cfg.SetDiskID(dev, node) |
2528 |
|
2529 |
# TODO: Convert to utils.Retry
|
2530 |
|
2531 |
retries = 0
|
2532 |
degr_retries = 10 # in seconds, as we sleep 1 second each time |
2533 |
while True: |
2534 |
max_time = 0
|
2535 |
done = True
|
2536 |
cumul_degraded = False
|
2537 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
2538 |
msg = rstats.fail_msg |
2539 |
if msg:
|
2540 |
lu.LogWarning("Can't get any data from node %s: %s", node, msg)
|
2541 |
retries += 1
|
2542 |
if retries >= 10: |
2543 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
2544 |
" aborting." % node)
|
2545 |
time.sleep(6)
|
2546 |
continue
|
2547 |
rstats = rstats.payload |
2548 |
retries = 0
|
2549 |
for i, mstat in enumerate(rstats): |
2550 |
if mstat is None: |
2551 |
lu.LogWarning("Can't compute data for node %s/%s",
|
2552 |
node, instance.disks[i].iv_name) |
2553 |
continue
|
2554 |
|
2555 |
cumul_degraded = (cumul_degraded or
|
2556 |
(mstat.is_degraded and mstat.sync_percent is None)) |
2557 |
if mstat.sync_percent is not None: |
2558 |
done = False
|
2559 |
if mstat.estimated_time is not None: |
2560 |
rem_time = "%d estimated seconds remaining" % mstat.estimated_time
|
2561 |
max_time = mstat.estimated_time |
2562 |
else:
|
2563 |
rem_time = "no time estimate"
|
2564 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
2565 |
(instance.disks[i].iv_name, mstat.sync_percent, |
2566 |
rem_time)) |
2567 |
|
2568 |
# if we're done but degraded, let's do a few small retries, to
|
2569 |
# make sure we see a stable and not transient situation; therefore
|
2570 |
# we force restart of the loop
|
2571 |
if (done or oneshot) and cumul_degraded and degr_retries > 0: |
2572 |
logging.info("Degraded disks found, %d retries left", degr_retries)
|
2573 |
degr_retries -= 1
|
2574 |
time.sleep(1)
|
2575 |
continue
|
2576 |
|
2577 |
if done or oneshot: |
2578 |
break
|
2579 |
|
2580 |
time.sleep(min(60, max_time)) |
2581 |
|
2582 |
if done:
|
2583 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
2584 |
return not cumul_degraded |
2585 |
|
2586 |
|
2587 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): |
2588 |
"""Check that mirrors are not degraded.
|
2589 |
|
2590 |
The ldisk parameter, if True, will change the test from the
|
2591 |
is_degraded attribute (which represents overall non-ok status for
|
2592 |
the device(s)) to the ldisk (representing the local storage status).
|
2593 |
|
2594 |
"""
|
2595 |
lu.cfg.SetDiskID(dev, node) |
2596 |
|
2597 |
result = True
|
2598 |
|
2599 |
if on_primary or dev.AssembleOnSecondary(): |
2600 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
2601 |
msg = rstats.fail_msg |
2602 |
if msg:
|
2603 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg)
|
2604 |
result = False
|
2605 |
elif not rstats.payload: |
2606 |
lu.LogWarning("Can't find disk on node %s", node)
|
2607 |
result = False
|
2608 |
else:
|
2609 |
if ldisk:
|
2610 |
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
|
2611 |
else:
|
2612 |
result = result and not rstats.payload.is_degraded |
2613 |
|
2614 |
if dev.children:
|
2615 |
for child in dev.children: |
2616 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
2617 |
|
2618 |
return result
|
2619 |
|
2620 |
|
2621 |
class LUDiagnoseOS(NoHooksLU): |
2622 |
"""Logical unit for OS diagnose/query.
|
2623 |
|
2624 |
"""
|
2625 |
_OP_REQP = ["output_fields", "names"] |
2626 |
REQ_BGL = False
|
2627 |
_FIELDS_STATIC = utils.FieldSet() |
2628 |
_FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants") |
2629 |
# Fields that need calculation of global os validity
|
2630 |
_FIELDS_NEEDVALID = frozenset(["valid", "variants"]) |
2631 |
|
2632 |
def ExpandNames(self): |
2633 |
if self.op.names: |
2634 |
raise errors.OpPrereqError("Selective OS query not supported", |
2635 |
errors.ECODE_INVAL) |
2636 |
|
2637 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2638 |
dynamic=self._FIELDS_DYNAMIC,
|
2639 |
selected=self.op.output_fields)
|
2640 |
|
2641 |
# Lock all nodes, in shared mode
|
2642 |
# Temporary removal of locks, should be reverted later
|
2643 |
# TODO: reintroduce locks when they are lighter-weight
|
2644 |
self.needed_locks = {}
|
2645 |
#self.share_locks[locking.LEVEL_NODE] = 1
|
2646 |
#self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
2647 |
|
2648 |
def CheckPrereq(self): |
2649 |
"""Check prerequisites.
|
2650 |
|
2651 |
"""
|
2652 |
|
2653 |
@staticmethod
|
2654 |
def _DiagnoseByOS(rlist): |
2655 |
"""Remaps a per-node return list into an a per-os per-node dictionary
|
2656 |
|
2657 |
@param rlist: a map with node names as keys and OS objects as values
|
2658 |
|
2659 |
@rtype: dict
|
2660 |
@return: a dictionary with osnames as keys and as value another map, with
|
2661 |
nodes as keys and tuples of (path, status, diagnose) as values, eg::
|
2662 |
|
2663 |
{"debian-etch": {"node1": [(/usr/lib/..., True, ""),
|
2664 |
(/srv/..., False, "invalid api")],
|
2665 |
"node2": [(/srv/..., True, "")]}
|
2666 |
}
|
2667 |
|
2668 |
"""
|
2669 |
all_os = {} |
2670 |
# we build here the list of nodes that didn't fail the RPC (at RPC
|
2671 |
# level), so that nodes with a non-responding node daemon don't
|
2672 |
# make all OSes invalid
|
2673 |
good_nodes = [node_name for node_name in rlist |
2674 |
if not rlist[node_name].fail_msg] |
2675 |
for node_name, nr in rlist.items(): |
2676 |
if nr.fail_msg or not nr.payload: |
2677 |
continue
|
2678 |
for name, path, status, diagnose, variants in nr.payload: |
2679 |
if name not in all_os: |
2680 |
# build a list of nodes for this os containing empty lists
|
2681 |
# for each node in node_list
|
2682 |
all_os[name] = {} |
2683 |
for nname in good_nodes: |
2684 |
all_os[name][nname] = [] |
2685 |
all_os[name][node_name].append((path, status, diagnose, variants)) |
2686 |
return all_os
|
2687 |
|
2688 |
def Exec(self, feedback_fn): |
2689 |
"""Compute the list of OSes.
|
2690 |
|
2691 |
"""
|
2692 |
valid_nodes = [node for node in self.cfg.GetOnlineNodeList()] |
2693 |
node_data = self.rpc.call_os_diagnose(valid_nodes)
|
2694 |
pol = self._DiagnoseByOS(node_data)
|
2695 |
output = [] |
2696 |
calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields) |
2697 |
calc_variants = "variants" in self.op.output_fields |
2698 |
|
2699 |
for os_name, os_data in pol.items(): |
2700 |
row = [] |
2701 |
if calc_valid:
|
2702 |
valid = True
|
2703 |
variants = None
|
2704 |
for osl in os_data.values(): |
2705 |
valid = valid and osl and osl[0][1] |
2706 |
if not valid: |
2707 |
variants = None
|
2708 |
break
|
2709 |
if calc_variants:
|
2710 |
node_variants = osl[0][3] |
2711 |
if variants is None: |
2712 |
variants = node_variants |
2713 |
else:
|
2714 |
variants = [v for v in variants if v in node_variants] |
2715 |
|
2716 |
for field in self.op.output_fields: |
2717 |
if field == "name": |
2718 |
val = os_name |
2719 |
elif field == "valid": |
2720 |
val = valid |
2721 |
elif field == "node_status": |
2722 |
# this is just a copy of the dict
|
2723 |
val = {} |
2724 |
for node_name, nos_list in os_data.items(): |
2725 |
val[node_name] = nos_list |
2726 |
elif field == "variants": |
2727 |
val = variants |
2728 |
else:
|
2729 |
raise errors.ParameterError(field)
|
2730 |
row.append(val) |
2731 |
output.append(row) |
2732 |
|
2733 |
return output
|
2734 |
|
2735 |
|
2736 |
class LURemoveNode(LogicalUnit): |
2737 |
"""Logical unit for removing a node.
|
2738 |
|
2739 |
"""
|
2740 |
HPATH = "node-remove"
|
2741 |
HTYPE = constants.HTYPE_NODE |
2742 |
_OP_REQP = ["node_name"]
|
2743 |
|
2744 |
def BuildHooksEnv(self): |
2745 |
"""Build hooks env.
|
2746 |
|
2747 |
This doesn't run on the target node in the pre phase as a failed
|
2748 |
node would then be impossible to remove.
|
2749 |
|
2750 |
"""
|
2751 |
env = { |
2752 |
"OP_TARGET": self.op.node_name, |
2753 |
"NODE_NAME": self.op.node_name, |
2754 |
} |
2755 |
all_nodes = self.cfg.GetNodeList()
|
2756 |
try:
|
2757 |
all_nodes.remove(self.op.node_name)
|
2758 |
except ValueError: |
2759 |
logging.warning("Node %s which is about to be removed not found"
|
2760 |
" in the all nodes list", self.op.node_name) |
2761 |
return env, all_nodes, all_nodes
|
2762 |
|
2763 |
def CheckPrereq(self): |
2764 |
"""Check prerequisites.
|
2765 |
|
2766 |
This checks:
|
2767 |
- the node exists in the configuration
|
2768 |
- it does not have primary or secondary instances
|
2769 |
- it's not the master
|
2770 |
|
2771 |
Any errors are signaled by raising errors.OpPrereqError.
|
2772 |
|
2773 |
"""
|
2774 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
2775 |
node = self.cfg.GetNodeInfo(self.op.node_name) |
2776 |
assert node is not None |
2777 |
|
2778 |
instance_list = self.cfg.GetInstanceList()
|
2779 |
|
2780 |
masternode = self.cfg.GetMasterNode()
|
2781 |
if node.name == masternode:
|
2782 |
raise errors.OpPrereqError("Node is the master node," |
2783 |
" you need to failover first.",
|
2784 |
errors.ECODE_INVAL) |
2785 |
|
2786 |
for instance_name in instance_list: |
2787 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
2788 |
if node.name in instance.all_nodes: |
2789 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
2790 |
" please remove first." % instance_name,
|
2791 |
errors.ECODE_INVAL) |
2792 |
self.op.node_name = node.name
|
2793 |
self.node = node
|
2794 |
|
2795 |
def Exec(self, feedback_fn): |
2796 |
"""Removes the node from the cluster.
|
2797 |
|
2798 |
"""
|
2799 |
node = self.node
|
2800 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
2801 |
node.name) |
2802 |
|
2803 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
|
2804 |
|
2805 |
# Promote nodes to master candidate as needed
|
2806 |
_AdjustCandidatePool(self, exceptions=[node.name])
|
2807 |
self.context.RemoveNode(node.name)
|
2808 |
|
2809 |
# Run post hooks on the node before it's removed
|
2810 |
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) |
2811 |
try:
|
2812 |
hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) |
2813 |
except:
|
2814 |
# pylint: disable-msg=W0702
|
2815 |
self.LogWarning("Errors occurred running hooks on %s" % node.name) |
2816 |
|
2817 |
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
|
2818 |
msg = result.fail_msg |
2819 |
if msg:
|
2820 |
self.LogWarning("Errors encountered on the remote node while leaving" |
2821 |
" the cluster: %s", msg)
|
2822 |
|
2823 |
|
2824 |
class LUQueryNodes(NoHooksLU): |
2825 |
"""Logical unit for querying nodes.
|
2826 |
|
2827 |
"""
|
2828 |
# pylint: disable-msg=W0142
|
2829 |
_OP_REQP = ["output_fields", "names", "use_locking"] |
2830 |
REQ_BGL = False
|
2831 |
|
2832 |
_SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid", |
2833 |
"master_candidate", "offline", "drained"] |
2834 |
|
2835 |
_FIELDS_DYNAMIC = utils.FieldSet( |
2836 |
"dtotal", "dfree", |
2837 |
"mtotal", "mnode", "mfree", |
2838 |
"bootid",
|
2839 |
"ctotal", "cnodes", "csockets", |
2840 |
) |
2841 |
|
2842 |
_FIELDS_STATIC = utils.FieldSet(*[ |
2843 |
"pinst_cnt", "sinst_cnt", |
2844 |
"pinst_list", "sinst_list", |
2845 |
"pip", "sip", "tags", |
2846 |
"master",
|
2847 |
"role"] + _SIMPLE_FIELDS
|
2848 |
) |
2849 |
|
2850 |
def ExpandNames(self): |
2851 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2852 |
dynamic=self._FIELDS_DYNAMIC,
|
2853 |
selected=self.op.output_fields)
|
2854 |
|
2855 |
self.needed_locks = {}
|
2856 |
self.share_locks[locking.LEVEL_NODE] = 1 |
2857 |
|
2858 |
if self.op.names: |
2859 |
self.wanted = _GetWantedNodes(self, self.op.names) |
2860 |
else:
|
2861 |
self.wanted = locking.ALL_SET
|
2862 |
|
2863 |
self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
2864 |
self.do_locking = self.do_node_query and self.op.use_locking |
2865 |
if self.do_locking: |
2866 |
# if we don't request only static fields, we need to lock the nodes
|
2867 |
self.needed_locks[locking.LEVEL_NODE] = self.wanted |
2868 |
|
2869 |
def CheckPrereq(self): |
2870 |
"""Check prerequisites.
|
2871 |
|
2872 |
"""
|
2873 |
# The validation of the node list is done in the _GetWantedNodes,
|
2874 |
# if non empty, and if empty, there's no validation to do
|
2875 |
pass
|
2876 |
|
2877 |
def Exec(self, feedback_fn): |
2878 |
"""Computes the list of nodes and their attributes.
|
2879 |
|
2880 |
"""
|
2881 |
all_info = self.cfg.GetAllNodesInfo()
|
2882 |
if self.do_locking: |
2883 |
nodenames = self.acquired_locks[locking.LEVEL_NODE]
|
2884 |
elif self.wanted != locking.ALL_SET: |
2885 |
nodenames = self.wanted
|
2886 |
missing = set(nodenames).difference(all_info.keys())
|
2887 |
if missing:
|
2888 |
raise errors.OpExecError(
|
2889 |
"Some nodes were removed before retrieving their data: %s" % missing)
|
2890 |
else:
|
2891 |
nodenames = all_info.keys() |
2892 |
|
2893 |
nodenames = utils.NiceSort(nodenames) |
2894 |
nodelist = [all_info[name] for name in nodenames] |
2895 |
|
2896 |
# begin data gathering
|
2897 |
|
2898 |
if self.do_node_query: |
2899 |
live_data = {} |
2900 |
node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
2901 |
self.cfg.GetHypervisorType())
|
2902 |
for name in nodenames: |
2903 |
nodeinfo = node_data[name] |
2904 |
if not nodeinfo.fail_msg and nodeinfo.payload: |
2905 |
nodeinfo = nodeinfo.payload |
2906 |
fn = utils.TryConvert |
2907 |
live_data[name] = { |
2908 |
"mtotal": fn(int, nodeinfo.get('memory_total', None)), |
2909 |
"mnode": fn(int, nodeinfo.get('memory_dom0', None)), |
2910 |
"mfree": fn(int, nodeinfo.get('memory_free', None)), |
2911 |
"dtotal": fn(int, nodeinfo.get('vg_size', None)), |
2912 |
"dfree": fn(int, nodeinfo.get('vg_free', None)), |
2913 |
"ctotal": fn(int, nodeinfo.get('cpu_total', None)), |
2914 |
"bootid": nodeinfo.get('bootid', None), |
2915 |
"cnodes": fn(int, nodeinfo.get('cpu_nodes', None)), |
2916 |
"csockets": fn(int, nodeinfo.get('cpu_sockets', None)), |
2917 |
} |
2918 |
else:
|
2919 |
live_data[name] = {} |
2920 |
else:
|
2921 |
live_data = dict.fromkeys(nodenames, {})
|
2922 |
|
2923 |
node_to_primary = dict([(name, set()) for name in nodenames]) |
2924 |
node_to_secondary = dict([(name, set()) for name in nodenames]) |
2925 |
|
2926 |
inst_fields = frozenset(("pinst_cnt", "pinst_list", |
2927 |
"sinst_cnt", "sinst_list")) |
2928 |
if inst_fields & frozenset(self.op.output_fields): |
2929 |
inst_data = self.cfg.GetAllInstancesInfo()
|
2930 |
|
2931 |
for inst in inst_data.values(): |
2932 |
if inst.primary_node in node_to_primary: |
2933 |
node_to_primary[inst.primary_node].add(inst.name) |
2934 |
for secnode in inst.secondary_nodes: |
2935 |
if secnode in node_to_secondary: |
2936 |
node_to_secondary[secnode].add(inst.name) |
2937 |
|
2938 |
master_node = self.cfg.GetMasterNode()
|
2939 |
|
2940 |
# end data gathering
|
2941 |
|
2942 |
output = [] |
2943 |
for node in nodelist: |
2944 |
node_output = [] |
2945 |
for field in self.op.output_fields: |
2946 |
if field in self._SIMPLE_FIELDS: |
2947 |
val = getattr(node, field)
|
2948 |
elif field == "pinst_list": |
2949 |
val = list(node_to_primary[node.name])
|
2950 |
elif field == "sinst_list": |
2951 |
val = list(node_to_secondary[node.name])
|
2952 |
elif field == "pinst_cnt": |
2953 |
val = len(node_to_primary[node.name])
|
2954 |
elif field == "sinst_cnt": |
2955 |
val = len(node_to_secondary[node.name])
|
2956 |
elif field == "pip": |
2957 |
val = node.primary_ip |
2958 |
elif field == "sip": |
2959 |
val = node.secondary_ip |
2960 |
elif field == "tags": |
2961 |
val = list(node.GetTags())
|
2962 |
elif field == "master": |
2963 |
val = node.name == master_node |
2964 |
elif self._FIELDS_DYNAMIC.Matches(field): |
2965 |
val = live_data[node.name].get(field, None)
|
2966 |
elif field == "role": |
2967 |
if node.name == master_node:
|
2968 |
val = "M"
|
2969 |
elif node.master_candidate:
|
2970 |
val = "C"
|
2971 |
elif node.drained:
|
2972 |
val = "D"
|
2973 |
elif node.offline:
|
2974 |
val = "O"
|
2975 |
else:
|
2976 |
val = "R"
|
2977 |
else:
|
2978 |
raise errors.ParameterError(field)
|
2979 |
node_output.append(val) |
2980 |
output.append(node_output) |
2981 |
|
2982 |
return output
|
2983 |
|
2984 |
|
2985 |
class LUQueryNodeVolumes(NoHooksLU): |
2986 |
"""Logical unit for getting volumes on node(s).
|
2987 |
|
2988 |
"""
|
2989 |
_OP_REQP = ["nodes", "output_fields"] |
2990 |
REQ_BGL = False
|
2991 |
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance") |
2992 |
_FIELDS_STATIC = utils.FieldSet("node")
|
2993 |
|
2994 |
def ExpandNames(self): |
2995 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2996 |
dynamic=self._FIELDS_DYNAMIC,
|
2997 |
selected=self.op.output_fields)
|
2998 |
|
2999 |
self.needed_locks = {}
|
3000 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3001 |
if not self.op.nodes: |
3002 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
3003 |
else:
|
3004 |
self.needed_locks[locking.LEVEL_NODE] = \
|
3005 |
_GetWantedNodes(self, self.op.nodes) |
3006 |
|
3007 |
def CheckPrereq(self): |
3008 |
"""Check prerequisites.
|
3009 |
|
3010 |
This checks that the fields required are valid output fields.
|
3011 |
|
3012 |
"""
|
3013 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
3014 |
|
3015 |
def Exec(self, feedback_fn): |
3016 |
"""Computes the list of nodes and their attributes.
|
3017 |
|
3018 |
"""
|
3019 |
nodenames = self.nodes
|
3020 |
volumes = self.rpc.call_node_volumes(nodenames)
|
3021 |
|
3022 |
ilist = [self.cfg.GetInstanceInfo(iname) for iname |
3023 |
in self.cfg.GetInstanceList()] |
3024 |
|
3025 |
lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist]) |
3026 |
|
3027 |
output = [] |
3028 |
for node in nodenames: |
3029 |
nresult = volumes[node] |
3030 |
if nresult.offline:
|
3031 |
continue
|
3032 |
msg = nresult.fail_msg |
3033 |
if msg:
|
3034 |
self.LogWarning("Can't compute volume data on node %s: %s", node, msg) |
3035 |
continue
|
3036 |
|
3037 |
node_vols = nresult.payload[:] |
3038 |
node_vols.sort(key=lambda vol: vol['dev']) |
3039 |
|
3040 |
for vol in node_vols: |
3041 |
node_output = [] |
3042 |
for field in self.op.output_fields: |
3043 |
if field == "node": |
3044 |
val = node |
3045 |
elif field == "phys": |
3046 |
val = vol['dev']
|
3047 |
elif field == "vg": |
3048 |
val = vol['vg']
|
3049 |
elif field == "name": |
3050 |
val = vol['name']
|
3051 |
elif field == "size": |
3052 |
val = int(float(vol['size'])) |
3053 |
elif field == "instance": |
3054 |
for inst in ilist: |
3055 |
if node not in lv_by_node[inst]: |
3056 |
continue
|
3057 |
if vol['name'] in lv_by_node[inst][node]: |
3058 |
val = inst.name |
3059 |
break
|
3060 |
else:
|
3061 |
val = '-'
|
3062 |
else:
|
3063 |
raise errors.ParameterError(field)
|
3064 |
node_output.append(str(val))
|
3065 |
|
3066 |
output.append(node_output) |
3067 |
|
3068 |
return output
|
3069 |
|
3070 |
|
3071 |
class LUQueryNodeStorage(NoHooksLU): |
3072 |
"""Logical unit for getting information on storage units on node(s).
|
3073 |
|
3074 |
"""
|
3075 |
_OP_REQP = ["nodes", "storage_type", "output_fields"] |
3076 |
REQ_BGL = False
|
3077 |
_FIELDS_STATIC = utils.FieldSet(constants.SF_NODE) |
3078 |
|
3079 |
def ExpandNames(self): |
3080 |
storage_type = self.op.storage_type
|
3081 |
|
3082 |
if storage_type not in constants.VALID_STORAGE_TYPES: |
3083 |
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type, |
3084 |
errors.ECODE_INVAL) |
3085 |
|
3086 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3087 |
dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS), |
3088 |
selected=self.op.output_fields)
|
3089 |
|
3090 |
self.needed_locks = {}
|
3091 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3092 |
|
3093 |
if self.op.nodes: |
3094 |
self.needed_locks[locking.LEVEL_NODE] = \
|
3095 |
_GetWantedNodes(self, self.op.nodes) |
3096 |
else:
|
3097 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
3098 |
|
3099 |
def CheckPrereq(self): |
3100 |
"""Check prerequisites.
|
3101 |
|
3102 |
This checks that the fields required are valid output fields.
|
3103 |
|
3104 |
"""
|
3105 |
self.op.name = getattr(self.op, "name", None) |
3106 |
|
3107 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
3108 |
|
3109 |
def Exec(self, feedback_fn): |
3110 |
"""Computes the list of nodes and their attributes.
|
3111 |
|
3112 |
"""
|
3113 |
# Always get name to sort by
|
3114 |
if constants.SF_NAME in self.op.output_fields: |
3115 |
fields = self.op.output_fields[:]
|
3116 |
else:
|
3117 |
fields = [constants.SF_NAME] + self.op.output_fields
|
3118 |
|
3119 |
# Never ask for node or type as it's only known to the LU
|
3120 |
for extra in [constants.SF_NODE, constants.SF_TYPE]: |
3121 |
while extra in fields: |
3122 |
fields.remove(extra) |
3123 |
|
3124 |
field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)]) |
3125 |
name_idx = field_idx[constants.SF_NAME] |
3126 |
|
3127 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
3128 |
data = self.rpc.call_storage_list(self.nodes, |
3129 |
self.op.storage_type, st_args,
|
3130 |
self.op.name, fields)
|
3131 |
|
3132 |
result = [] |
3133 |
|
3134 |
for node in utils.NiceSort(self.nodes): |
3135 |
nresult = data[node] |
3136 |
if nresult.offline:
|
3137 |
continue
|
3138 |
|
3139 |
msg = nresult.fail_msg |
3140 |
if msg:
|
3141 |
self.LogWarning("Can't get storage data from node %s: %s", node, msg) |
3142 |
continue
|
3143 |
|
3144 |
rows = dict([(row[name_idx], row) for row in nresult.payload]) |
3145 |
|
3146 |
for name in utils.NiceSort(rows.keys()): |
3147 |
row = rows[name] |
3148 |
|
3149 |
out = [] |
3150 |
|
3151 |
for field in self.op.output_fields: |
3152 |
if field == constants.SF_NODE:
|
3153 |
val = node |
3154 |
elif field == constants.SF_TYPE:
|
3155 |
val = self.op.storage_type
|
3156 |
elif field in field_idx: |
3157 |
val = row[field_idx[field]] |
3158 |
else:
|
3159 |
raise errors.ParameterError(field)
|
3160 |
|
3161 |
out.append(val) |
3162 |
|
3163 |
result.append(out) |
3164 |
|
3165 |
return result
|
3166 |
|
3167 |
|
3168 |
class LUModifyNodeStorage(NoHooksLU): |
3169 |
"""Logical unit for modifying a storage volume on a node.
|
3170 |
|
3171 |
"""
|
3172 |
_OP_REQP = ["node_name", "storage_type", "name", "changes"] |
3173 |
REQ_BGL = False
|
3174 |
|
3175 |
def CheckArguments(self): |
3176 |
self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3177 |
|
3178 |
storage_type = self.op.storage_type
|
3179 |
if storage_type not in constants.VALID_STORAGE_TYPES: |
3180 |
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type, |
3181 |
errors.ECODE_INVAL) |
3182 |
|
3183 |
def ExpandNames(self): |
3184 |
self.needed_locks = {
|
3185 |
locking.LEVEL_NODE: self.op.node_name,
|
3186 |
} |
3187 |
|
3188 |
def CheckPrereq(self): |
3189 |
"""Check prerequisites.
|
3190 |
|
3191 |
"""
|
3192 |
storage_type = self.op.storage_type
|
3193 |
|
3194 |
try:
|
3195 |
modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type] |
3196 |
except KeyError: |
3197 |
raise errors.OpPrereqError("Storage units of type '%s' can not be" |
3198 |
" modified" % storage_type,
|
3199 |
errors.ECODE_INVAL) |
3200 |
|
3201 |
diff = set(self.op.changes.keys()) - modifiable |
3202 |
if diff:
|
3203 |
raise errors.OpPrereqError("The following fields can not be modified for" |
3204 |
" storage units of type '%s': %r" %
|
3205 |
(storage_type, list(diff)),
|
3206 |
errors.ECODE_INVAL) |
3207 |
|
3208 |
def Exec(self, feedback_fn): |
3209 |
"""Computes the list of nodes and their attributes.
|
3210 |
|
3211 |
"""
|
3212 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
3213 |
result = self.rpc.call_storage_modify(self.op.node_name, |
3214 |
self.op.storage_type, st_args,
|
3215 |
self.op.name, self.op.changes) |
3216 |
result.Raise("Failed to modify storage unit '%s' on %s" %
|
3217 |
(self.op.name, self.op.node_name)) |
3218 |
|
3219 |
|
3220 |
class LUAddNode(LogicalUnit): |
3221 |
"""Logical unit for adding node to the cluster.
|
3222 |
|
3223 |
"""
|
3224 |
HPATH = "node-add"
|
3225 |
HTYPE = constants.HTYPE_NODE |
3226 |
_OP_REQP = ["node_name"]
|
3227 |
|
3228 |
def CheckArguments(self): |
3229 |
# validate/normalize the node name
|
3230 |
self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name) |
3231 |
|
3232 |
def BuildHooksEnv(self): |
3233 |
"""Build hooks env.
|
3234 |
|
3235 |
This will run on all nodes before, and on all nodes + the new node after.
|
3236 |
|
3237 |
"""
|
3238 |
env = { |
3239 |
"OP_TARGET": self.op.node_name, |
3240 |
"NODE_NAME": self.op.node_name, |
3241 |
"NODE_PIP": self.op.primary_ip, |
3242 |
"NODE_SIP": self.op.secondary_ip, |
3243 |
} |
3244 |
nodes_0 = self.cfg.GetNodeList()
|
3245 |
nodes_1 = nodes_0 + [self.op.node_name, ]
|
3246 |
return env, nodes_0, nodes_1
|
3247 |
|
3248 |
def CheckPrereq(self): |
3249 |
"""Check prerequisites.
|
3250 |
|
3251 |
This checks:
|
3252 |
- the new node is not already in the config
|
3253 |
- it is resolvable
|
3254 |
- its parameters (single/dual homed) matches the cluster
|
3255 |
|
3256 |
Any errors are signaled by raising errors.OpPrereqError.
|
3257 |
|
3258 |
"""
|
3259 |
node_name = self.op.node_name
|
3260 |
cfg = self.cfg
|
3261 |
|
3262 |
dns_data = utils.GetHostInfo(node_name) |
3263 |
|
3264 |
node = dns_data.name |
3265 |
primary_ip = self.op.primary_ip = dns_data.ip
|
3266 |
secondary_ip = getattr(self.op, "secondary_ip", None) |
3267 |
if secondary_ip is None: |
3268 |
secondary_ip = primary_ip |
3269 |
if not utils.IsValidIP(secondary_ip): |
3270 |
raise errors.OpPrereqError("Invalid secondary IP given", |
3271 |
errors.ECODE_INVAL) |
3272 |
self.op.secondary_ip = secondary_ip
|
3273 |
|
3274 |
node_list = cfg.GetNodeList() |
3275 |
if not self.op.readd and node in node_list: |
3276 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
3277 |
node, errors.ECODE_EXISTS) |
3278 |
elif self.op.readd and node not in node_list: |
3279 |
raise errors.OpPrereqError("Node %s is not in the configuration" % node, |
3280 |
errors.ECODE_NOENT) |
3281 |
|
3282 |
for existing_node_name in node_list: |
3283 |
existing_node = cfg.GetNodeInfo(existing_node_name) |
3284 |
|
3285 |
if self.op.readd and node == existing_node_name: |
3286 |
if (existing_node.primary_ip != primary_ip or |
3287 |
existing_node.secondary_ip != secondary_ip): |
3288 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
3289 |
" address configuration as before",
|
3290 |
errors.ECODE_INVAL) |
3291 |
continue
|
3292 |
|
3293 |
if (existing_node.primary_ip == primary_ip or |
3294 |
existing_node.secondary_ip == primary_ip or
|
3295 |
existing_node.primary_ip == secondary_ip or
|
3296 |
existing_node.secondary_ip == secondary_ip): |
3297 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
3298 |
" existing node %s" % existing_node.name,
|
3299 |
errors.ECODE_NOTUNIQUE) |
3300 |
|
3301 |
# check that the type of the node (single versus dual homed) is the
|
3302 |
# same as for the master
|
3303 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
3304 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
3305 |
newbie_singlehomed = secondary_ip == primary_ip |
3306 |
if master_singlehomed != newbie_singlehomed:
|
3307 |
if master_singlehomed:
|
3308 |
raise errors.OpPrereqError("The master has no private ip but the" |
3309 |
" new node has one",
|
3310 |
errors.ECODE_INVAL) |
3311 |
else:
|
3312 |
raise errors.OpPrereqError("The master has a private ip but the" |
3313 |
" new node doesn't have one",
|
3314 |
errors.ECODE_INVAL) |
3315 |
|
3316 |
# checks reachability
|
3317 |
if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
3318 |
raise errors.OpPrereqError("Node not reachable by ping", |
3319 |
errors.ECODE_ENVIRON) |
3320 |
|
3321 |
if not newbie_singlehomed: |
3322 |
# check reachability from my secondary ip to newbie's secondary ip
|
3323 |
if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
3324 |
source=myself.secondary_ip): |
3325 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
3326 |
" based ping to noded port",
|
3327 |
errors.ECODE_ENVIRON) |
3328 |
|
3329 |
if self.op.readd: |
3330 |
exceptions = [node] |
3331 |
else:
|
3332 |
exceptions = [] |
3333 |
|
3334 |
self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions) |
3335 |
|
3336 |
if self.op.readd: |
3337 |
self.new_node = self.cfg.GetNodeInfo(node) |
3338 |
assert self.new_node is not None, "Can't retrieve locked node %s" % node |
3339 |
else:
|
3340 |
self.new_node = objects.Node(name=node,
|
3341 |
primary_ip=primary_ip, |
3342 |
secondary_ip=secondary_ip, |
3343 |
master_candidate=self.master_candidate,
|
3344 |
offline=False, drained=False) |
3345 |
|
3346 |
def Exec(self, feedback_fn): |
3347 |
"""Adds the new node to the cluster.
|
3348 |
|
3349 |
"""
|
3350 |
new_node = self.new_node
|
3351 |
node = new_node.name |
3352 |
|
3353 |
# for re-adds, reset the offline/drained/master-candidate flags;
|
3354 |
# we need to reset here, otherwise offline would prevent RPC calls
|
3355 |
# later in the procedure; this also means that if the re-add
|
3356 |
# fails, we are left with a non-offlined, broken node
|
3357 |
if self.op.readd: |
3358 |
new_node.drained = new_node.offline = False # pylint: disable-msg=W0201 |
3359 |
self.LogInfo("Readding a node, the offline/drained flags were reset") |
3360 |
# if we demote the node, we do cleanup later in the procedure
|
3361 |
new_node.master_candidate = self.master_candidate
|
3362 |
|
3363 |
# notify the user about any possible mc promotion
|
3364 |
if new_node.master_candidate:
|
3365 |
self.LogInfo("Node will be a master candidate") |
3366 |
|
3367 |
# check connectivity
|
3368 |
result = self.rpc.call_version([node])[node]
|
3369 |
result.Raise("Can't get version information from node %s" % node)
|
3370 |
if constants.PROTOCOL_VERSION == result.payload:
|
3371 |
logging.info("Communication to node %s fine, sw version %s match",
|
3372 |
node, result.payload) |
3373 |
else:
|
3374 |
raise errors.OpExecError("Version mismatch master version %s," |
3375 |
" node version %s" %
|
3376 |
(constants.PROTOCOL_VERSION, result.payload)) |
3377 |
|
3378 |
# setup ssh on node
|
3379 |
if self.cfg.GetClusterInfo().modify_ssh_setup: |
3380 |
logging.info("Copy ssh key to node %s", node)
|
3381 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
3382 |
keyarray = [] |
3383 |
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, |
3384 |
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, |
3385 |
priv_key, pub_key] |
3386 |
|
3387 |
for i in keyfiles: |
3388 |
keyarray.append(utils.ReadFile(i)) |
3389 |
|
3390 |
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1], |
3391 |
keyarray[2], keyarray[3], keyarray[4], |
3392 |
keyarray[5])
|
3393 |
result.Raise("Cannot transfer ssh keys to the new node")
|
3394 |
|
3395 |
# Add node to our /etc/hosts, and add key to known_hosts
|
3396 |
if self.cfg.GetClusterInfo().modify_etc_hosts: |
3397 |
utils.AddHostToEtcHosts(new_node.name) |
3398 |
|
3399 |
if new_node.secondary_ip != new_node.primary_ip:
|
3400 |
result = self.rpc.call_node_has_ip_address(new_node.name,
|
3401 |
new_node.secondary_ip) |
3402 |
result.Raise("Failure checking secondary ip on node %s" % new_node.name,
|
3403 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
3404 |
if not result.payload: |
3405 |
raise errors.OpExecError("Node claims it doesn't have the secondary ip" |
3406 |
" you gave (%s). Please fix and re-run this"
|
3407 |
" command." % new_node.secondary_ip)
|
3408 |
|
3409 |
node_verify_list = [self.cfg.GetMasterNode()]
|
3410 |
node_verify_param = { |
3411 |
constants.NV_NODELIST: [node], |
3412 |
# TODO: do a node-net-test as well?
|
3413 |
} |
3414 |
|
3415 |
result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
|
3416 |
self.cfg.GetClusterName())
|
3417 |
for verifier in node_verify_list: |
3418 |
result[verifier].Raise("Cannot communicate with node %s" % verifier)
|
3419 |
nl_payload = result[verifier].payload[constants.NV_NODELIST] |
3420 |
if nl_payload:
|
3421 |
for failed in nl_payload: |
3422 |
feedback_fn("ssh/hostname verification failed"
|
3423 |
" (checking from %s): %s" %
|
3424 |
(verifier, nl_payload[failed])) |
3425 |
raise errors.OpExecError("ssh/hostname verification failed.") |
3426 |
|
3427 |
if self.op.readd: |
3428 |
_RedistributeAncillaryFiles(self)
|
3429 |
self.context.ReaddNode(new_node)
|
3430 |
# make sure we redistribute the config
|
3431 |
self.cfg.Update(new_node, feedback_fn)
|
3432 |
# and make sure the new node will not have old files around
|
3433 |
if not new_node.master_candidate: |
3434 |
result = self.rpc.call_node_demote_from_mc(new_node.name)
|
3435 |
msg = result.fail_msg |
3436 |
if msg:
|
3437 |
self.LogWarning("Node failed to demote itself from master" |
3438 |
" candidate status: %s" % msg)
|
3439 |
else:
|
3440 |
_RedistributeAncillaryFiles(self, additional_nodes=[node])
|
3441 |
self.context.AddNode(new_node, self.proc.GetECId()) |
3442 |
|
3443 |
|
3444 |
class LUSetNodeParams(LogicalUnit): |
3445 |
"""Modifies the parameters of a node.
|
3446 |
|
3447 |
"""
|
3448 |
HPATH = "node-modify"
|
3449 |
HTYPE = constants.HTYPE_NODE |
3450 |
_OP_REQP = ["node_name"]
|
3451 |
REQ_BGL = False
|
3452 |
|
3453 |
def CheckArguments(self): |
3454 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3455 |
_CheckBooleanOpField(self.op, 'master_candidate') |
3456 |
_CheckBooleanOpField(self.op, 'offline') |
3457 |
_CheckBooleanOpField(self.op, 'drained') |
3458 |
_CheckBooleanOpField(self.op, 'auto_promote') |
3459 |
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] |
3460 |
if all_mods.count(None) == 3: |
3461 |
raise errors.OpPrereqError("Please pass at least one modification", |
3462 |
errors.ECODE_INVAL) |
3463 |
if all_mods.count(True) > 1: |
3464 |
raise errors.OpPrereqError("Can't set the node into more than one" |
3465 |
" state at the same time",
|
3466 |
errors.ECODE_INVAL) |
3467 |
|
3468 |
# Boolean value that tells us whether we're offlining or draining the node
|
3469 |
self.offline_or_drain = (self.op.offline == True or |
3470 |
self.op.drained == True) |
3471 |
self.deoffline_or_drain = (self.op.offline == False or |
3472 |
self.op.drained == False) |
3473 |
self.might_demote = (self.op.master_candidate == False or |
3474 |
self.offline_or_drain)
|
3475 |
|
3476 |
self.lock_all = self.op.auto_promote and self.might_demote |
3477 |
|
3478 |
|
3479 |
def ExpandNames(self): |
3480 |
if self.lock_all: |
3481 |
self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
|
3482 |
else:
|
3483 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
3484 |
|
3485 |
def BuildHooksEnv(self): |
3486 |
"""Build hooks env.
|
3487 |
|
3488 |
This runs on the master node.
|
3489 |
|
3490 |
"""
|
3491 |
env = { |
3492 |
"OP_TARGET": self.op.node_name, |
3493 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
3494 |
"OFFLINE": str(self.op.offline), |
3495 |
"DRAINED": str(self.op.drained), |
3496 |
} |
3497 |
nl = [self.cfg.GetMasterNode(),
|
3498 |
self.op.node_name]
|
3499 |
return env, nl, nl
|
3500 |
|
3501 |
def CheckPrereq(self): |
3502 |
"""Check prerequisites.
|
3503 |
|
3504 |
This only checks the instance list against the existing names.
|
3505 |
|
3506 |
"""
|
3507 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
3508 |
|
3509 |
if (self.op.master_candidate is not None or |
3510 |
self.op.drained is not None or |
3511 |
self.op.offline is not None): |
3512 |
# we can't change the master's node flags
|
3513 |
if self.op.node_name == self.cfg.GetMasterNode(): |
3514 |
raise errors.OpPrereqError("The master role can be changed" |
3515 |
" only via masterfailover",
|
3516 |
errors.ECODE_INVAL) |
3517 |
|
3518 |
|
3519 |
if node.master_candidate and self.might_demote and not self.lock_all: |
3520 |
assert not self.op.auto_promote, "auto-promote set but lock_all not" |
3521 |
# check if after removing the current node, we're missing master
|
3522 |
# candidates
|
3523 |
(mc_remaining, mc_should, _) = \ |
3524 |
self.cfg.GetMasterCandidateStats(exceptions=[node.name])
|
3525 |
if mc_remaining < mc_should:
|
3526 |
raise errors.OpPrereqError("Not enough master candidates, please" |
3527 |
" pass auto_promote to allow promotion",
|
3528 |
errors.ECODE_INVAL) |
3529 |
|
3530 |
if (self.op.master_candidate == True and |
3531 |
((node.offline and not self.op.offline == False) or |
3532 |
(node.drained and not self.op.drained == False))): |
3533 |
raise errors.OpPrereqError("Node '%s' is offline or drained, can't set" |
3534 |
" to master_candidate" % node.name,
|
3535 |
errors.ECODE_INVAL) |
3536 |
|
3537 |
# If we're being deofflined/drained, we'll MC ourself if needed
|
3538 |
if (self.deoffline_or_drain and not self.offline_or_drain and not |
3539 |
self.op.master_candidate == True and not node.master_candidate): |
3540 |
self.op.master_candidate = _DecideSelfPromotion(self) |
3541 |
if self.op.master_candidate: |
3542 |
self.LogInfo("Autopromoting node to master candidate") |
3543 |
|
3544 |
return
|
3545 |
|
3546 |
def Exec(self, feedback_fn): |
3547 |
"""Modifies a node.
|
3548 |
|
3549 |
"""
|
3550 |
node = self.node
|
3551 |
|
3552 |
result = [] |
3553 |
changed_mc = False
|
3554 |
|
3555 |
if self.op.offline is not None: |
3556 |
node.offline = self.op.offline
|
3557 |
result.append(("offline", str(self.op.offline))) |
3558 |
if self.op.offline == True: |
3559 |
if node.master_candidate:
|
3560 |
node.master_candidate = False
|
3561 |
changed_mc = True
|
3562 |
result.append(("master_candidate", "auto-demotion due to offline")) |
3563 |
if node.drained:
|
3564 |
node.drained = False
|
3565 |
result.append(("drained", "clear drained status due to offline")) |
3566 |
|
3567 |
if self.op.master_candidate is not None: |
3568 |
node.master_candidate = self.op.master_candidate
|
3569 |
changed_mc = True
|
3570 |
result.append(("master_candidate", str(self.op.master_candidate))) |
3571 |
if self.op.master_candidate == False: |
3572 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
3573 |
msg = rrc.fail_msg |
3574 |
if msg:
|
3575 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
3576 |
|
3577 |
if self.op.drained is not None: |
3578 |
node.drained = self.op.drained
|
3579 |
result.append(("drained", str(self.op.drained))) |
3580 |
if self.op.drained == True: |
3581 |
if node.master_candidate:
|
3582 |
node.master_candidate = False
|
3583 |
changed_mc = True
|
3584 |
result.append(("master_candidate", "auto-demotion due to drain")) |
3585 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
3586 |
msg = rrc.fail_msg |
3587 |
if msg:
|
3588 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
3589 |
if node.offline:
|
3590 |
node.offline = False
|
3591 |
result.append(("offline", "clear offline status due to drain")) |
3592 |
|
3593 |
# we locked all nodes, we adjust the CP before updating this node
|
3594 |
if self.lock_all: |
3595 |
_AdjustCandidatePool(self, [node.name])
|
3596 |
|
3597 |
# this will trigger configuration file update, if needed
|
3598 |
self.cfg.Update(node, feedback_fn)
|
3599 |
|
3600 |
# this will trigger job queue propagation or cleanup
|
3601 |
if changed_mc:
|
3602 |
self.context.ReaddNode(node)
|
3603 |
|
3604 |
return result
|
3605 |
|
3606 |
|
3607 |
class LUPowercycleNode(NoHooksLU): |
3608 |
"""Powercycles a node.
|
3609 |
|
3610 |
"""
|
3611 |
_OP_REQP = ["node_name", "force"] |
3612 |
REQ_BGL = False
|
3613 |
|
3614 |
def CheckArguments(self): |
3615 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
3616 |
if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force: |
3617 |
raise errors.OpPrereqError("The node is the master and the force" |
3618 |
" parameter was not set",
|
3619 |
errors.ECODE_INVAL) |
3620 |
|
3621 |
def ExpandNames(self): |
3622 |
"""Locking for PowercycleNode.
|
3623 |
|
3624 |
This is a last-resort option and shouldn't block on other
|
3625 |
jobs. Therefore, we grab no locks.
|
3626 |
|
3627 |
"""
|
3628 |
self.needed_locks = {}
|
3629 |
|
3630 |
def CheckPrereq(self): |
3631 |
"""Check prerequisites.
|
3632 |
|
3633 |
This LU has no prereqs.
|
3634 |
|
3635 |
"""
|
3636 |
pass
|
3637 |
|
3638 |
def Exec(self, feedback_fn): |
3639 |
"""Reboots a node.
|
3640 |
|
3641 |
"""
|
3642 |
result = self.rpc.call_node_powercycle(self.op.node_name, |
3643 |
self.cfg.GetHypervisorType())
|
3644 |
result.Raise("Failed to schedule the reboot")
|
3645 |
return result.payload
|
3646 |
|
3647 |
|
3648 |
class LUQueryClusterInfo(NoHooksLU): |
3649 |
"""Query cluster configuration.
|
3650 |
|
3651 |
"""
|
3652 |
_OP_REQP = [] |
3653 |
REQ_BGL = False
|
3654 |
|
3655 |
def ExpandNames(self): |
3656 |
self.needed_locks = {}
|
3657 |
|
3658 |
def CheckPrereq(self): |
3659 |
"""No prerequsites needed for this LU.
|
3660 |
|
3661 |
"""
|
3662 |
pass
|
3663 |
|
3664 |
def Exec(self, feedback_fn): |
3665 |
"""Return cluster config.
|
3666 |
|
3667 |
"""
|
3668 |
cluster = self.cfg.GetClusterInfo()
|
3669 |
os_hvp = {} |
3670 |
|
3671 |
# Filter just for enabled hypervisors
|
3672 |
for os_name, hv_dict in cluster.os_hvp.items(): |
3673 |
os_hvp[os_name] = {} |
3674 |
for hv_name, hv_params in hv_dict.items(): |
3675 |
if hv_name in cluster.enabled_hypervisors: |
3676 |
os_hvp[os_name][hv_name] = hv_params |
3677 |
|
3678 |
result = { |
3679 |
"software_version": constants.RELEASE_VERSION,
|
3680 |
"protocol_version": constants.PROTOCOL_VERSION,
|
3681 |
"config_version": constants.CONFIG_VERSION,
|
3682 |
"os_api_version": max(constants.OS_API_VERSIONS), |
3683 |
"export_version": constants.EXPORT_VERSION,
|
3684 |
"architecture": (platform.architecture()[0], platform.machine()), |
3685 |
"name": cluster.cluster_name,
|
3686 |
"master": cluster.master_node,
|
3687 |
"default_hypervisor": cluster.enabled_hypervisors[0], |
3688 |
"enabled_hypervisors": cluster.enabled_hypervisors,
|
3689 |
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name]) |
3690 |
for hypervisor_name in cluster.enabled_hypervisors]), |
3691 |
"os_hvp": os_hvp,
|
3692 |
"beparams": cluster.beparams,
|
3693 |
"nicparams": cluster.nicparams,
|
3694 |
"candidate_pool_size": cluster.candidate_pool_size,
|
3695 |
"master_netdev": cluster.master_netdev,
|
3696 |
"volume_group_name": cluster.volume_group_name,
|
3697 |
"file_storage_dir": cluster.file_storage_dir,
|
3698 |
"maintain_node_health": cluster.maintain_node_health,
|
3699 |
"ctime": cluster.ctime,
|
3700 |
"mtime": cluster.mtime,
|
3701 |
"uuid": cluster.uuid,
|
3702 |
"tags": list(cluster.GetTags()), |
3703 |
} |
3704 |
|
3705 |
return result
|
3706 |
|
3707 |
|
3708 |
class LUQueryConfigValues(NoHooksLU): |
3709 |
"""Return configuration values.
|
3710 |
|
3711 |
"""
|
3712 |
_OP_REQP = [] |
3713 |
REQ_BGL = False
|
3714 |
_FIELDS_DYNAMIC = utils.FieldSet() |
3715 |
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag", |
3716 |
"watcher_pause")
|
3717 |
|
3718 |
def ExpandNames(self): |
3719 |
self.needed_locks = {}
|
3720 |
|
3721 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3722 |
dynamic=self._FIELDS_DYNAMIC,
|
3723 |
selected=self.op.output_fields)
|
3724 |
|
3725 |
def CheckPrereq(self): |
3726 |
"""No prerequisites.
|
3727 |
|
3728 |
"""
|
3729 |
pass
|
3730 |
|
3731 |
def Exec(self, feedback_fn): |
3732 |
"""Dump a representation of the cluster config to the standard output.
|
3733 |
|
3734 |
"""
|
3735 |
values = [] |
3736 |
for field in self.op.output_fields: |
3737 |
if field == "cluster_name": |
3738 |
entry = self.cfg.GetClusterName()
|
3739 |
elif field == "master_node": |
3740 |
entry = self.cfg.GetMasterNode()
|
3741 |
elif field == "drain_flag": |
3742 |
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE) |
3743 |
elif field == "watcher_pause": |
3744 |
entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE) |
3745 |
else:
|
3746 |
raise errors.ParameterError(field)
|
3747 |
values.append(entry) |
3748 |
return values
|
3749 |
|
3750 |
|
3751 |
class LUActivateInstanceDisks(NoHooksLU): |
3752 |
"""Bring up an instance's disks.
|
3753 |
|
3754 |
"""
|
3755 |
_OP_REQP = ["instance_name"]
|
3756 |
REQ_BGL = False
|
3757 |
|
3758 |
def ExpandNames(self): |
3759 |
self._ExpandAndLockInstance()
|
3760 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3761 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3762 |
|
3763 |
def DeclareLocks(self, level): |
3764 |
if level == locking.LEVEL_NODE:
|
3765 |
self._LockInstancesNodes()
|
3766 |
|
3767 |
def CheckPrereq(self): |
3768 |
"""Check prerequisites.
|
3769 |
|
3770 |
This checks that the instance is in the cluster.
|
3771 |
|
3772 |
"""
|
3773 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3774 |
assert self.instance is not None, \ |
3775 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3776 |
_CheckNodeOnline(self, self.instance.primary_node) |
3777 |
if not hasattr(self.op, "ignore_size"): |
3778 |
self.op.ignore_size = False |
3779 |
|
3780 |
def Exec(self, feedback_fn): |
3781 |
"""Activate the disks.
|
3782 |
|
3783 |
"""
|
3784 |
disks_ok, disks_info = \ |
3785 |
_AssembleInstanceDisks(self, self.instance, |
3786 |
ignore_size=self.op.ignore_size)
|
3787 |
if not disks_ok: |
3788 |
raise errors.OpExecError("Cannot activate block devices") |
3789 |
|
3790 |
return disks_info
|
3791 |
|
3792 |
|
3793 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False, |
3794 |
ignore_size=False):
|
3795 |
"""Prepare the block devices for an instance.
|
3796 |
|
3797 |
This sets up the block devices on all nodes.
|
3798 |
|
3799 |
@type lu: L{LogicalUnit}
|
3800 |
@param lu: the logical unit on whose behalf we execute
|
3801 |
@type instance: L{objects.Instance}
|
3802 |
@param instance: the instance for whose disks we assemble
|
3803 |
@type ignore_secondaries: boolean
|
3804 |
@param ignore_secondaries: if true, errors on secondary nodes
|
3805 |
won't result in an error return from the function
|
3806 |
@type ignore_size: boolean
|
3807 |
@param ignore_size: if true, the current known size of the disk
|
3808 |
will not be used during the disk activation, useful for cases
|
3809 |
when the size is wrong
|
3810 |
@return: False if the operation failed, otherwise a list of
|
3811 |
(host, instance_visible_name, node_visible_name)
|
3812 |
with the mapping from node devices to instance devices
|
3813 |
|
3814 |
"""
|
3815 |
device_info = [] |
3816 |
disks_ok = True
|
3817 |
iname = instance.name |
3818 |
# With the two passes mechanism we try to reduce the window of
|
3819 |
# opportunity for the race condition of switching DRBD to primary
|
3820 |
# before handshaking occured, but we do not eliminate it
|
3821 |
|
3822 |
# The proper fix would be to wait (with some limits) until the
|
3823 |
# connection has been made and drbd transitions from WFConnection
|
3824 |
# into any other network-connected state (Connected, SyncTarget,
|
3825 |
# SyncSource, etc.)
|
3826 |
|
3827 |
# 1st pass, assemble on all nodes in secondary mode
|
3828 |
for inst_disk in instance.disks: |
3829 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
3830 |
if ignore_size:
|
3831 |
node_disk = node_disk.Copy() |
3832 |
node_disk.UnsetSize() |
3833 |
lu.cfg.SetDiskID(node_disk, node) |
3834 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
|
3835 |
msg = result.fail_msg |
3836 |
if msg:
|
3837 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
3838 |
" (is_primary=False, pass=1): %s",
|
3839 |
inst_disk.iv_name, node, msg) |
3840 |
if not ignore_secondaries: |
3841 |
disks_ok = False
|
3842 |
|
3843 |
# FIXME: race condition on drbd migration to primary
|
3844 |
|
3845 |
# 2nd pass, do only the primary node
|
3846 |
for inst_disk in instance.disks: |
3847 |
dev_path = None
|
3848 |
|
3849 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
3850 |
if node != instance.primary_node:
|
3851 |
continue
|
3852 |
if ignore_size:
|
3853 |
node_disk = node_disk.Copy() |
3854 |
node_disk.UnsetSize() |
3855 |
lu.cfg.SetDiskID(node_disk, node) |
3856 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
|
3857 |
msg = result.fail_msg |
3858 |
if msg:
|
3859 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
3860 |
" (is_primary=True, pass=2): %s",
|
3861 |
inst_disk.iv_name, node, msg) |
3862 |
disks_ok = False
|
3863 |
else:
|
3864 |
dev_path = result.payload |
3865 |
|
3866 |
device_info.append((instance.primary_node, inst_disk.iv_name, dev_path)) |
3867 |
|
3868 |
# leave the disks configured for the primary node
|
3869 |
# this is a workaround that would be fixed better by
|
3870 |
# improving the logical/physical id handling
|
3871 |
for disk in instance.disks: |
3872 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
3873 |
|
3874 |
return disks_ok, device_info
|
3875 |
|
3876 |
|
3877 |
def _StartInstanceDisks(lu, instance, force): |
3878 |
"""Start the disks of an instance.
|
3879 |
|
3880 |
"""
|
3881 |
disks_ok, _ = _AssembleInstanceDisks(lu, instance, |
3882 |
ignore_secondaries=force) |
3883 |
if not disks_ok: |
3884 |
_ShutdownInstanceDisks(lu, instance) |
3885 |
if force is not None and not force: |
3886 |
lu.proc.LogWarning("", hint="If the message above refers to a" |
3887 |
" secondary node,"
|
3888 |
" you can retry the operation using '--force'.")
|
3889 |
raise errors.OpExecError("Disk consistency error") |
3890 |
|
3891 |
|
3892 |
class LUDeactivateInstanceDisks(NoHooksLU): |
3893 |
"""Shutdown an instance's disks.
|
3894 |
|
3895 |
"""
|
3896 |
_OP_REQP = ["instance_name"]
|
3897 |
REQ_BGL = False
|
3898 |
|
3899 |
def ExpandNames(self): |
3900 |
self._ExpandAndLockInstance()
|
3901 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3902 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3903 |
|
3904 |
def DeclareLocks(self, level): |
3905 |
if level == locking.LEVEL_NODE:
|
3906 |
self._LockInstancesNodes()
|
3907 |
|
3908 |
def CheckPrereq(self): |
3909 |
"""Check prerequisites.
|
3910 |
|
3911 |
This checks that the instance is in the cluster.
|
3912 |
|
3913 |
"""
|
3914 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3915 |
assert self.instance is not None, \ |
3916 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3917 |
|
3918 |
def Exec(self, feedback_fn): |
3919 |
"""Deactivate the disks
|
3920 |
|
3921 |
"""
|
3922 |
instance = self.instance
|
3923 |
_SafeShutdownInstanceDisks(self, instance)
|
3924 |
|
3925 |
|
3926 |
def _SafeShutdownInstanceDisks(lu, instance): |
3927 |
"""Shutdown block devices of an instance.
|
3928 |
|
3929 |
This function checks if an instance is running, before calling
|
3930 |
_ShutdownInstanceDisks.
|
3931 |
|
3932 |
"""
|
3933 |
_CheckInstanceDown(lu, instance, "cannot shutdown disks")
|
3934 |
_ShutdownInstanceDisks(lu, instance) |
3935 |
|
3936 |
|
3937 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): |
3938 |
"""Shutdown block devices of an instance.
|
3939 |
|
3940 |
This does the shutdown on all nodes of the instance.
|
3941 |
|
3942 |
If the ignore_primary is false, errors on the primary node are
|
3943 |
ignored.
|
3944 |
|
3945 |
"""
|
3946 |
all_result = True
|
3947 |
for disk in instance.disks: |
3948 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
3949 |
lu.cfg.SetDiskID(top_disk, node) |
3950 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
3951 |
msg = result.fail_msg |
3952 |
if msg:
|
3953 |
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
|
3954 |
disk.iv_name, node, msg) |
3955 |
if not ignore_primary or node != instance.primary_node: |
3956 |
all_result = False
|
3957 |
return all_result
|
3958 |
|
3959 |
|
3960 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): |
3961 |
"""Checks if a node has enough free memory.
|
3962 |
|
3963 |
This function check if a given node has the needed amount of free
|
3964 |
memory. In case the node has less memory or we cannot get the
|
3965 |
information from the node, this function raise an OpPrereqError
|
3966 |
exception.
|
3967 |
|
3968 |
@type lu: C{LogicalUnit}
|
3969 |
@param lu: a logical unit from which we get configuration data
|
3970 |
@type node: C{str}
|
3971 |
@param node: the node to check
|
3972 |
@type reason: C{str}
|
3973 |
@param reason: string to use in the error message
|
3974 |
@type requested: C{int}
|
3975 |
@param requested: the amount of memory in MiB to check for
|
3976 |
@type hypervisor_name: C{str}
|
3977 |
@param hypervisor_name: the hypervisor to ask for memory stats
|
3978 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
3979 |
we cannot check the node
|
3980 |
|
3981 |
"""
|
3982 |
nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name) |
3983 |
nodeinfo[node].Raise("Can't get data from node %s" % node,
|
3984 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
3985 |
free_mem = nodeinfo[node].payload.get('memory_free', None) |
3986 |
if not isinstance(free_mem, int): |
3987 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
3988 |
" was '%s'" % (node, free_mem),
|
3989 |
errors.ECODE_ENVIRON) |
3990 |
if requested > free_mem:
|
3991 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
3992 |
" needed %s MiB, available %s MiB" %
|
3993 |
(node, reason, requested, free_mem), |
3994 |
errors.ECODE_NORES) |
3995 |
|
3996 |
|
3997 |
def _CheckNodesFreeDisk(lu, nodenames, requested): |
3998 |
"""Checks if nodes have enough free disk space in the default VG.
|
3999 |
|
4000 |
This function check if all given nodes have the needed amount of
|
4001 |
free disk. In case any node has less disk or we cannot get the
|
4002 |
information from the node, this function raise an OpPrereqError
|
4003 |
exception.
|
4004 |
|
4005 |
@type lu: C{LogicalUnit}
|
4006 |
@param lu: a logical unit from which we get configuration data
|
4007 |
@type nodenames: C{list}
|
4008 |
@param nodenames: the list of node names to check
|
4009 |
@type requested: C{int}
|
4010 |
@param requested: the amount of disk in MiB to check for
|
4011 |
@raise errors.OpPrereqError: if the node doesn't have enough disk, or
|
4012 |
we cannot check the node
|
4013 |
|
4014 |
"""
|
4015 |
nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(), |
4016 |
lu.cfg.GetHypervisorType()) |
4017 |
for node in nodenames: |
4018 |
info = nodeinfo[node] |
4019 |
info.Raise("Cannot get current information from node %s" % node,
|
4020 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
4021 |
vg_free = info.payload.get("vg_free", None) |
4022 |
if not isinstance(vg_free, int): |
4023 |
raise errors.OpPrereqError("Can't compute free disk space on node %s," |
4024 |
" result was '%s'" % (node, vg_free),
|
4025 |
errors.ECODE_ENVIRON) |
4026 |
if requested > vg_free:
|
4027 |
raise errors.OpPrereqError("Not enough disk space on target node %s:" |
4028 |
" required %d MiB, available %d MiB" %
|
4029 |
(node, requested, vg_free), |
4030 |
errors.ECODE_NORES) |
4031 |
|
4032 |
|
4033 |
class LUStartupInstance(LogicalUnit): |
4034 |
"""Starts an instance.
|
4035 |
|
4036 |
"""
|
4037 |
HPATH = "instance-start"
|
4038 |
HTYPE = constants.HTYPE_INSTANCE |
4039 |
_OP_REQP = ["instance_name", "force"] |
4040 |
REQ_BGL = False
|
4041 |
|
4042 |
def ExpandNames(self): |
4043 |
self._ExpandAndLockInstance()
|
4044 |
|
4045 |
def BuildHooksEnv(self): |
4046 |
"""Build hooks env.
|
4047 |
|
4048 |
This runs on master, primary and secondary nodes of the instance.
|
4049 |
|
4050 |
"""
|
4051 |
env = { |
4052 |
"FORCE": self.op.force, |
4053 |
} |
4054 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
4055 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4056 |
return env, nl, nl
|
4057 |
|
4058 |
def CheckPrereq(self): |
4059 |
"""Check prerequisites.
|
4060 |
|
4061 |
This checks that the instance is in the cluster.
|
4062 |
|
4063 |
"""
|
4064 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4065 |
assert self.instance is not None, \ |
4066 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4067 |
|
4068 |
# extra beparams
|
4069 |
self.beparams = getattr(self.op, "beparams", {}) |
4070 |
if self.beparams: |
4071 |
if not isinstance(self.beparams, dict): |
4072 |
raise errors.OpPrereqError("Invalid beparams passed: %s, expected" |
4073 |
" dict" % (type(self.beparams), ), |
4074 |
errors.ECODE_INVAL) |
4075 |
# fill the beparams dict
|
4076 |
utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
|
4077 |
self.op.beparams = self.beparams |
4078 |
|
4079 |
# extra hvparams
|
4080 |
self.hvparams = getattr(self.op, "hvparams", {}) |
4081 |
if self.hvparams: |
4082 |
if not isinstance(self.hvparams, dict): |
4083 |
raise errors.OpPrereqError("Invalid hvparams passed: %s, expected" |
4084 |
" dict" % (type(self.hvparams), ), |
4085 |
errors.ECODE_INVAL) |
4086 |
|
4087 |
# check hypervisor parameter syntax (locally)
|
4088 |
cluster = self.cfg.GetClusterInfo()
|
4089 |
utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
|
4090 |
filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor], |
4091 |
instance.hvparams) |
4092 |
filled_hvp.update(self.hvparams)
|
4093 |
hv_type = hypervisor.GetHypervisor(instance.hypervisor) |
4094 |
hv_type.CheckParameterSyntax(filled_hvp) |
4095 |
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
|
4096 |
self.op.hvparams = self.hvparams |
4097 |
|
4098 |
_CheckNodeOnline(self, instance.primary_node)
|
4099 |
|
4100 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
4101 |
# check bridges existence
|
4102 |
_CheckInstanceBridgesExist(self, instance)
|
4103 |
|
4104 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
4105 |
instance.name, |
4106 |
instance.hypervisor) |
4107 |
remote_info.Raise("Error checking node %s" % instance.primary_node,
|
4108 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
4109 |
if not remote_info.payload: # not running already |
4110 |
_CheckNodeFreeMemory(self, instance.primary_node,
|
4111 |
"starting instance %s" % instance.name,
|
4112 |
bep[constants.BE_MEMORY], instance.hypervisor) |
4113 |
|
4114 |
def Exec(self, feedback_fn): |
4115 |
"""Start the instance.
|
4116 |
|
4117 |
"""
|
4118 |
instance = self.instance
|
4119 |
force = self.op.force
|
4120 |
|
4121 |
self.cfg.MarkInstanceUp(instance.name)
|
4122 |
|
4123 |
node_current = instance.primary_node |
4124 |
|
4125 |
_StartInstanceDisks(self, instance, force)
|
4126 |
|
4127 |
result = self.rpc.call_instance_start(node_current, instance,
|
4128 |
self.hvparams, self.beparams) |
4129 |
msg = result.fail_msg |
4130 |
if msg:
|
4131 |
_ShutdownInstanceDisks(self, instance)
|
4132 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
4133 |
|
4134 |
|
4135 |
class LURebootInstance(LogicalUnit): |
4136 |
"""Reboot an instance.
|
4137 |
|
4138 |
"""
|
4139 |
HPATH = "instance-reboot"
|
4140 |
HTYPE = constants.HTYPE_INSTANCE |
4141 |
_OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] |
4142 |
REQ_BGL = False
|
4143 |
|
4144 |
def CheckArguments(self): |
4145 |
"""Check the arguments.
|
4146 |
|
4147 |
"""
|
4148 |
self.shutdown_timeout = getattr(self.op, "shutdown_timeout", |
4149 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
4150 |
|
4151 |
def ExpandNames(self): |
4152 |
if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT, |
4153 |
constants.INSTANCE_REBOOT_HARD, |
4154 |
constants.INSTANCE_REBOOT_FULL]: |
4155 |
raise errors.ParameterError("reboot type not in [%s, %s, %s]" % |
4156 |
(constants.INSTANCE_REBOOT_SOFT, |
4157 |
constants.INSTANCE_REBOOT_HARD, |
4158 |
constants.INSTANCE_REBOOT_FULL)) |
4159 |
self._ExpandAndLockInstance()
|
4160 |
|
4161 |
def BuildHooksEnv(self): |
4162 |
"""Build hooks env.
|
4163 |
|
4164 |
This runs on master, primary and secondary nodes of the instance.
|
4165 |
|
4166 |
"""
|
4167 |
env = { |
4168 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
4169 |
"REBOOT_TYPE": self.op.reboot_type, |
4170 |
"SHUTDOWN_TIMEOUT": self.shutdown_timeout, |
4171 |
} |
4172 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
4173 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4174 |
return env, nl, nl
|
4175 |
|
4176 |
def CheckPrereq(self): |
4177 |
"""Check prerequisites.
|
4178 |
|
4179 |
This checks that the instance is in the cluster.
|
4180 |
|
4181 |
"""
|
4182 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4183 |
assert self.instance is not None, \ |
4184 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4185 |
|
4186 |
_CheckNodeOnline(self, instance.primary_node)
|
4187 |
|
4188 |
# check bridges existence
|
4189 |
_CheckInstanceBridgesExist(self, instance)
|
4190 |
|
4191 |
def Exec(self, feedback_fn): |
4192 |
"""Reboot the instance.
|
4193 |
|
4194 |
"""
|
4195 |
instance = self.instance
|
4196 |
ignore_secondaries = self.op.ignore_secondaries
|
4197 |
reboot_type = self.op.reboot_type
|
4198 |
|
4199 |
node_current = instance.primary_node |
4200 |
|
4201 |
if reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
4202 |
constants.INSTANCE_REBOOT_HARD]: |
4203 |
for disk in instance.disks: |
4204 |
self.cfg.SetDiskID(disk, node_current)
|
4205 |
result = self.rpc.call_instance_reboot(node_current, instance,
|
4206 |
reboot_type, |
4207 |
self.shutdown_timeout)
|
4208 |
result.Raise("Could not reboot instance")
|
4209 |
else:
|
4210 |
result = self.rpc.call_instance_shutdown(node_current, instance,
|
4211 |
self.shutdown_timeout)
|
4212 |
result.Raise("Could not shutdown instance for full reboot")
|
4213 |
_ShutdownInstanceDisks(self, instance)
|
4214 |
_StartInstanceDisks(self, instance, ignore_secondaries)
|
4215 |
result = self.rpc.call_instance_start(node_current, instance, None, None) |
4216 |
msg = result.fail_msg |
4217 |
if msg:
|
4218 |
_ShutdownInstanceDisks(self, instance)
|
4219 |
raise errors.OpExecError("Could not start instance for" |
4220 |
" full reboot: %s" % msg)
|
4221 |
|
4222 |
self.cfg.MarkInstanceUp(instance.name)
|
4223 |
|
4224 |
|
4225 |
class LUShutdownInstance(LogicalUnit): |
4226 |
"""Shutdown an instance.
|
4227 |
|
4228 |
"""
|
4229 |
HPATH = "instance-stop"
|
4230 |
HTYPE = constants.HTYPE_INSTANCE |
4231 |
_OP_REQP = ["instance_name"]
|
4232 |
REQ_BGL = False
|
4233 |
|
4234 |
def CheckArguments(self): |
4235 |
"""Check the arguments.
|
4236 |
|
4237 |
"""
|
4238 |
self.timeout = getattr(self.op, "timeout", |
4239 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
4240 |
|
4241 |
def ExpandNames(self): |
4242 |
self._ExpandAndLockInstance()
|
4243 |
|
4244 |
def BuildHooksEnv(self): |
4245 |
"""Build hooks env.
|
4246 |
|
4247 |
This runs on master, primary and secondary nodes of the instance.
|
4248 |
|
4249 |
"""
|
4250 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
4251 |
env["TIMEOUT"] = self.timeout |
4252 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4253 |
return env, nl, nl
|
4254 |
|
4255 |
def CheckPrereq(self): |
4256 |
"""Check prerequisites.
|
4257 |
|
4258 |
This checks that the instance is in the cluster.
|
4259 |
|
4260 |
"""
|
4261 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4262 |
assert self.instance is not None, \ |
4263 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4264 |
_CheckNodeOnline(self, self.instance.primary_node) |
4265 |
|
4266 |
def Exec(self, feedback_fn): |
4267 |
"""Shutdown the instance.
|
4268 |
|
4269 |
"""
|
4270 |
instance = self.instance
|
4271 |
node_current = instance.primary_node |
4272 |
timeout = self.timeout
|
4273 |
self.cfg.MarkInstanceDown(instance.name)
|
4274 |
result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
|
4275 |
msg = result.fail_msg |
4276 |
if msg:
|
4277 |
self.proc.LogWarning("Could not shutdown instance: %s" % msg) |
4278 |
|
4279 |
_ShutdownInstanceDisks(self, instance)
|
4280 |
|
4281 |
|
4282 |
class LUReinstallInstance(LogicalUnit): |
4283 |
"""Reinstall an instance.
|
4284 |
|
4285 |
"""
|
4286 |
HPATH = "instance-reinstall"
|
4287 |
HTYPE = constants.HTYPE_INSTANCE |
4288 |
_OP_REQP = ["instance_name"]
|
4289 |
REQ_BGL = False
|
4290 |
|
4291 |
def ExpandNames(self): |
4292 |
self._ExpandAndLockInstance()
|
4293 |
|
4294 |
def BuildHooksEnv(self): |
4295 |
"""Build hooks env.
|
4296 |
|
4297 |
This runs on master, primary and secondary nodes of the instance.
|
4298 |
|
4299 |
"""
|
4300 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
4301 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4302 |
return env, nl, nl
|
4303 |
|
4304 |
def CheckPrereq(self): |
4305 |
"""Check prerequisites.
|
4306 |
|
4307 |
This checks that the instance is in the cluster and is not running.
|
4308 |
|
4309 |
"""
|
4310 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4311 |
assert instance is not None, \ |
4312 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4313 |
_CheckNodeOnline(self, instance.primary_node)
|
4314 |
|
4315 |
if instance.disk_template == constants.DT_DISKLESS:
|
4316 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
4317 |
self.op.instance_name,
|
4318 |
errors.ECODE_INVAL) |
4319 |
_CheckInstanceDown(self, instance, "cannot reinstall") |
4320 |
|
4321 |
self.op.os_type = getattr(self.op, "os_type", None) |
4322 |
self.op.force_variant = getattr(self.op, "force_variant", False) |
4323 |
if self.op.os_type is not None: |
4324 |
# OS verification
|
4325 |
pnode = _ExpandNodeName(self.cfg, instance.primary_node)
|
4326 |
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant) |
4327 |
|
4328 |
self.instance = instance
|
4329 |
|
4330 |
def Exec(self, feedback_fn): |
4331 |
"""Reinstall the instance.
|
4332 |
|
4333 |
"""
|
4334 |
inst = self.instance
|
4335 |
|
4336 |
if self.op.os_type is not None: |
4337 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
4338 |
inst.os = self.op.os_type
|
4339 |
self.cfg.Update(inst, feedback_fn)
|
4340 |
|
4341 |
_StartInstanceDisks(self, inst, None) |
4342 |
try:
|
4343 |
feedback_fn("Running the instance OS create scripts...")
|
4344 |
# FIXME: pass debug option from opcode to backend
|
4345 |
result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, |
4346 |
self.op.debug_level)
|
4347 |
result.Raise("Could not install OS for instance %s on node %s" %
|
4348 |
(inst.name, inst.primary_node)) |
4349 |
finally:
|
4350 |
_ShutdownInstanceDisks(self, inst)
|
4351 |
|
4352 |
|
4353 |
class LURecreateInstanceDisks(LogicalUnit): |
4354 |
"""Recreate an instance's missing disks.
|
4355 |
|
4356 |
"""
|
4357 |
HPATH = "instance-recreate-disks"
|
4358 |
HTYPE = constants.HTYPE_INSTANCE |
4359 |
_OP_REQP = ["instance_name", "disks"] |
4360 |
REQ_BGL = False
|
4361 |
|
4362 |
def CheckArguments(self): |
4363 |
"""Check the arguments.
|
4364 |
|
4365 |
"""
|
4366 |
if not isinstance(self.op.disks, list): |
4367 |
raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL) |
4368 |
for item in self.op.disks: |
4369 |
if (not isinstance(item, int) or |
4370 |
item < 0):
|
4371 |
raise errors.OpPrereqError("Invalid disk specification '%s'" % |
4372 |
str(item), errors.ECODE_INVAL)
|
4373 |
|
4374 |
def ExpandNames(self): |
4375 |
self._ExpandAndLockInstance()
|
4376 |
|
4377 |
def BuildHooksEnv(self): |
4378 |
"""Build hooks env.
|
4379 |
|
4380 |
This runs on master, primary and secondary nodes of the instance.
|
4381 |
|
4382 |
"""
|
4383 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
4384 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4385 |
return env, nl, nl
|
4386 |
|
4387 |
def CheckPrereq(self): |
4388 |
"""Check prerequisites.
|
4389 |
|
4390 |
This checks that the instance is in the cluster and is not running.
|
4391 |
|
4392 |
"""
|
4393 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4394 |
assert instance is not None, \ |
4395 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4396 |
_CheckNodeOnline(self, instance.primary_node)
|
4397 |
|
4398 |
if instance.disk_template == constants.DT_DISKLESS:
|
4399 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
4400 |
self.op.instance_name, errors.ECODE_INVAL)
|
4401 |
_CheckInstanceDown(self, instance, "cannot recreate disks") |
4402 |
|
4403 |
if not self.op.disks: |
4404 |
self.op.disks = range(len(instance.disks)) |
4405 |
else:
|
4406 |
for idx in self.op.disks: |
4407 |
if idx >= len(instance.disks): |
4408 |
raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx, |
4409 |
errors.ECODE_INVAL) |
4410 |
|
4411 |
self.instance = instance
|
4412 |
|
4413 |
def Exec(self, feedback_fn): |
4414 |
"""Recreate the disks.
|
4415 |
|
4416 |
"""
|
4417 |
to_skip = [] |
4418 |
for idx, _ in enumerate(self.instance.disks): |
4419 |
if idx not in self.op.disks: # disk idx has not been passed in |
4420 |
to_skip.append(idx) |
4421 |
continue
|
4422 |
|
4423 |
_CreateDisks(self, self.instance, to_skip=to_skip) |
4424 |
|
4425 |
|
4426 |
class LURenameInstance(LogicalUnit): |
4427 |
"""Rename an instance.
|
4428 |
|
4429 |
"""
|
4430 |
HPATH = "instance-rename"
|
4431 |
HTYPE = constants.HTYPE_INSTANCE |
4432 |
_OP_REQP = ["instance_name", "new_name"] |
4433 |
|
4434 |
def BuildHooksEnv(self): |
4435 |
"""Build hooks env.
|
4436 |
|
4437 |
This runs on master, primary and secondary nodes of the instance.
|
4438 |
|
4439 |
"""
|
4440 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
4441 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
4442 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
4443 |
return env, nl, nl
|
4444 |
|
4445 |
def CheckPrereq(self): |
4446 |
"""Check prerequisites.
|
4447 |
|
4448 |
This checks that the instance is in the cluster and is not running.
|
4449 |
|
4450 |
"""
|
4451 |
self.op.instance_name = _ExpandInstanceName(self.cfg, |
4452 |
self.op.instance_name)
|
4453 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4454 |
assert instance is not None |
4455 |
_CheckNodeOnline(self, instance.primary_node)
|
4456 |
_CheckInstanceDown(self, instance, "cannot rename") |
4457 |
self.instance = instance
|
4458 |
|
4459 |
# new name verification
|
4460 |
name_info = utils.GetHostInfo(self.op.new_name)
|
4461 |
|
4462 |
self.op.new_name = new_name = name_info.name
|
4463 |
instance_list = self.cfg.GetInstanceList()
|
4464 |
if new_name in instance_list: |
4465 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
4466 |
new_name, errors.ECODE_EXISTS) |
4467 |
|
4468 |
if not getattr(self.op, "ignore_ip", False): |
4469 |
if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
|
4470 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
4471 |
(name_info.ip, new_name), |
4472 |
errors.ECODE_NOTUNIQUE) |
4473 |
|
4474 |
|
4475 |
def Exec(self, feedback_fn): |
4476 |
"""Reinstall the instance.
|
4477 |
|
4478 |
"""
|
4479 |
inst = self.instance
|
4480 |
old_name = inst.name |
4481 |
|
4482 |
if inst.disk_template == constants.DT_FILE:
|
4483 |
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
4484 |
|
4485 |
self.cfg.RenameInstance(inst.name, self.op.new_name) |
4486 |
# Change the instance lock. This is definitely safe while we hold the BGL
|
4487 |
self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
|
4488 |
self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) |
4489 |
|
4490 |
# re-read the instance from the configuration after rename
|
4491 |
inst = self.cfg.GetInstanceInfo(self.op.new_name) |
4492 |
|
4493 |
if inst.disk_template == constants.DT_FILE:
|
4494 |
new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
4495 |
result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
|
4496 |
old_file_storage_dir, |
4497 |
new_file_storage_dir) |
4498 |
result.Raise("Could not rename on node %s directory '%s' to '%s'"
|
4499 |
" (but the instance has been renamed in Ganeti)" %
|
4500 |
(inst.primary_node, old_file_storage_dir, |
4501 |
new_file_storage_dir)) |
4502 |
|
4503 |
_StartInstanceDisks(self, inst, None) |
4504 |
try:
|
4505 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
|
4506 |
old_name, self.op.debug_level)
|
4507 |
msg = result.fail_msg |
4508 |
if msg:
|
4509 |
msg = ("Could not run OS rename script for instance %s on node %s"
|
4510 |
" (but the instance has been renamed in Ganeti): %s" %
|
4511 |
(inst.name, inst.primary_node, msg)) |
4512 |
self.proc.LogWarning(msg)
|
4513 |
finally:
|
4514 |
_ShutdownInstanceDisks(self, inst)
|
4515 |
|
4516 |
|
4517 |
class LURemoveInstance(LogicalUnit): |
4518 |
"""Remove an instance.
|
4519 |
|
4520 |
"""
|
4521 |
HPATH = "instance-remove"
|
4522 |
HTYPE = constants.HTYPE_INSTANCE |
4523 |
_OP_REQP = ["instance_name", "ignore_failures"] |
4524 |
REQ_BGL = False
|
4525 |
|
4526 |
def CheckArguments(self): |
4527 |
"""Check the arguments.
|
4528 |
|
4529 |
"""
|
4530 |
self.shutdown_timeout = getattr(self.op, "shutdown_timeout", |
4531 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
4532 |
|
4533 |
def ExpandNames(self): |
4534 |
self._ExpandAndLockInstance()
|
4535 |
self.needed_locks[locking.LEVEL_NODE] = []
|
4536 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
4537 |
|
4538 |
def DeclareLocks(self, level): |
4539 |
if level == locking.LEVEL_NODE:
|
4540 |
self._LockInstancesNodes()
|
4541 |
|
4542 |
def BuildHooksEnv(self): |
4543 |
"""Build hooks env.
|
4544 |
|
4545 |
This runs on master, primary and secondary nodes of the instance.
|
4546 |
|
4547 |
"""
|
4548 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
4549 |
env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout |
4550 |
nl = [self.cfg.GetMasterNode()]
|
4551 |
nl_post = list(self.instance.all_nodes) + nl |
4552 |
return env, nl, nl_post
|
4553 |
|
4554 |
def CheckPrereq(self): |
4555 |
"""Check prerequisites.
|
4556 |
|
4557 |
This checks that the instance is in the cluster.
|
4558 |
|
4559 |
"""
|
4560 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4561 |
assert self.instance is not None, \ |
4562 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4563 |
|
4564 |
def Exec(self, feedback_fn): |
4565 |
"""Remove the instance.
|
4566 |
|
4567 |
"""
|
4568 |
instance = self.instance
|
4569 |
logging.info("Shutting down instance %s on node %s",
|
4570 |
instance.name, instance.primary_node) |
4571 |
|
4572 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
|
4573 |
self.shutdown_timeout)
|
4574 |
msg = result.fail_msg |
4575 |
if msg:
|
4576 |
if self.op.ignore_failures: |
4577 |
feedback_fn("Warning: can't shutdown instance: %s" % msg)
|
4578 |
else:
|
4579 |
raise errors.OpExecError("Could not shutdown instance %s on" |
4580 |
" node %s: %s" %
|
4581 |
(instance.name, instance.primary_node, msg)) |
4582 |
|
4583 |
logging.info("Removing block devices for instance %s", instance.name)
|
4584 |
|
4585 |
if not _RemoveDisks(self, instance): |
4586 |
if self.op.ignore_failures: |
4587 |
feedback_fn("Warning: can't remove instance's disks")
|
4588 |
else:
|
4589 |
raise errors.OpExecError("Can't remove instance's disks") |
4590 |
|
4591 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
4592 |
|
4593 |
self.cfg.RemoveInstance(instance.name)
|
4594 |
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
|
4595 |
|
4596 |
|
4597 |
class LUQueryInstances(NoHooksLU): |
4598 |
"""Logical unit for querying instances.
|
4599 |
|
4600 |
"""
|
4601 |
# pylint: disable-msg=W0142
|
4602 |
_OP_REQP = ["output_fields", "names", "use_locking"] |
4603 |
REQ_BGL = False
|
4604 |
_SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor", |
4605 |
"serial_no", "ctime", "mtime", "uuid"] |
4606 |
_FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes", |
4607 |
"admin_state",
|
4608 |
"disk_template", "ip", "mac", "bridge", |
4609 |
"nic_mode", "nic_link", |
4610 |
"sda_size", "sdb_size", "vcpus", "tags", |
4611 |
"network_port", "beparams", |
4612 |
r"(disk)\.(size)/([0-9]+)",
|
4613 |
r"(disk)\.(sizes)", "disk_usage", |
4614 |
r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
|
4615 |
r"(nic)\.(bridge)/([0-9]+)",
|
4616 |
r"(nic)\.(macs|ips|modes|links|bridges)",
|
4617 |
r"(disk|nic)\.(count)",
|
4618 |
"hvparams",
|
4619 |
] + _SIMPLE_FIELDS + |
4620 |
["hv/%s" % name
|
4621 |
for name in constants.HVS_PARAMETERS |
4622 |
if name not in constants.HVC_GLOBALS] + |
4623 |
["be/%s" % name
|
4624 |
for name in constants.BES_PARAMETERS]) |
4625 |
_FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status") |
4626 |
|
4627 |
|
4628 |
def ExpandNames(self): |
4629 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
4630 |
dynamic=self._FIELDS_DYNAMIC,
|
4631 |
selected=self.op.output_fields)
|
4632 |
|
4633 |
self.needed_locks = {}
|
4634 |
self.share_locks[locking.LEVEL_INSTANCE] = 1 |
4635 |
self.share_locks[locking.LEVEL_NODE] = 1 |
4636 |
|
4637 |
if self.op.names: |
4638 |
self.wanted = _GetWantedInstances(self, self.op.names) |
4639 |
else:
|
4640 |
self.wanted = locking.ALL_SET
|
4641 |
|
4642 |
self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
4643 |
self.do_locking = self.do_node_query and self.op.use_locking |
4644 |
if self.do_locking: |
4645 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted |
4646 |
self.needed_locks[locking.LEVEL_NODE] = []
|
4647 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
4648 |
|
4649 |
def DeclareLocks(self, level): |
4650 |
if level == locking.LEVEL_NODE and self.do_locking: |
4651 |
self._LockInstancesNodes()
|
4652 |
|
4653 |
def CheckPrereq(self): |
4654 |
"""Check prerequisites.
|
4655 |
|
4656 |
"""
|
4657 |
pass
|
4658 |
|
4659 |
def Exec(self, feedback_fn): |
4660 |
"""Computes the list of nodes and their attributes.
|
4661 |
|
4662 |
"""
|
4663 |
# pylint: disable-msg=R0912
|
4664 |
# way too many branches here
|
4665 |
all_info = self.cfg.GetAllInstancesInfo()
|
4666 |
if self.wanted == locking.ALL_SET: |
4667 |
# caller didn't specify instance names, so ordering is not important
|
4668 |
if self.do_locking: |
4669 |
instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
|
4670 |
else:
|
4671 |
instance_names = all_info.keys() |
4672 |
instance_names = utils.NiceSort(instance_names) |
4673 |
else:
|
4674 |
# caller did specify names, so we must keep the ordering
|
4675 |
if self.do_locking: |
4676 |
tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
|
4677 |
else:
|
4678 |
tgt_set = all_info.keys() |
4679 |
missing = set(self.wanted).difference(tgt_set) |
4680 |
if missing:
|
4681 |
raise errors.OpExecError("Some instances were removed before" |
4682 |
" retrieving their data: %s" % missing)
|
4683 |
instance_names = self.wanted
|
4684 |
|
4685 |
instance_list = [all_info[iname] for iname in instance_names] |
4686 |
|
4687 |
# begin data gathering
|
4688 |
|
4689 |
nodes = frozenset([inst.primary_node for inst in instance_list]) |
4690 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
4691 |
|
4692 |
bad_nodes = [] |
4693 |
off_nodes = [] |
4694 |
if self.do_node_query: |
4695 |
live_data = {} |
4696 |
node_data = self.rpc.call_all_instances_info(nodes, hv_list)
|
4697 |
for name in nodes: |
4698 |
result = node_data[name] |
4699 |
if result.offline:
|
4700 |
# offline nodes will be in both lists
|
4701 |
off_nodes.append(name) |
4702 |
if result.fail_msg:
|
4703 |
bad_nodes.append(name) |
4704 |
else:
|
4705 |
if result.payload:
|
4706 |
live_data.update(result.payload) |
4707 |
# else no instance is alive
|
4708 |
else:
|
4709 |
live_data = dict([(name, {}) for name in instance_names]) |
4710 |
|
4711 |
# end data gathering
|
4712 |
|
4713 |
HVPREFIX = "hv/"
|
4714 |
BEPREFIX = "be/"
|
4715 |
output = [] |
4716 |
cluster = self.cfg.GetClusterInfo()
|
4717 |
for instance in instance_list: |
4718 |
iout = [] |
4719 |
i_hv = cluster.FillHV(instance, skip_globals=True)
|
4720 |
i_be = cluster.FillBE(instance) |
4721 |
i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT], |
4722 |
nic.nicparams) for nic in instance.nics] |
4723 |
for field in self.op.output_fields: |
4724 |
st_match = self._FIELDS_STATIC.Matches(field)
|
4725 |
if field in self._SIMPLE_FIELDS: |
4726 |
val = getattr(instance, field)
|
4727 |
elif field == "pnode": |
4728 |
val = instance.primary_node |
4729 |
elif field == "snodes": |
4730 |
val = list(instance.secondary_nodes)
|
4731 |
elif field == "admin_state": |
4732 |
val = instance.admin_up |
4733 |
elif field == "oper_state": |
4734 |
if instance.primary_node in bad_nodes: |
4735 |
val = None
|
4736 |
else:
|
4737 |
val = bool(live_data.get(instance.name))
|
4738 |
elif field == "status": |
4739 |
if instance.primary_node in off_nodes: |
4740 |
val = "ERROR_nodeoffline"
|
4741 |
elif instance.primary_node in bad_nodes: |
4742 |
val = "ERROR_nodedown"
|
4743 |
else:
|
4744 |
running = bool(live_data.get(instance.name))
|
4745 |
if running:
|
4746 |
if instance.admin_up:
|
4747 |
val = "running"
|
4748 |
else:
|
4749 |
val = "ERROR_up"
|
4750 |
else:
|
4751 |
if instance.admin_up:
|
4752 |
val = "ERROR_down"
|
4753 |
else:
|
4754 |
val = "ADMIN_down"
|
4755 |
elif field == "oper_ram": |
4756 |
if instance.primary_node in bad_nodes: |
4757 |
val = None
|
4758 |
elif instance.name in live_data: |
4759 |
val = live_data[instance.name].get("memory", "?") |
4760 |
else:
|
4761 |
val = "-"
|
4762 |
elif field == "vcpus": |
4763 |
val = i_be[constants.BE_VCPUS] |
4764 |
elif field == "disk_template": |
4765 |
val = instance.disk_template |
4766 |
elif field == "ip": |
4767 |
if instance.nics:
|
4768 |
val = instance.nics[0].ip
|
4769 |
else:
|
4770 |
val = None
|
4771 |
elif field == "nic_mode": |
4772 |
if instance.nics:
|
4773 |
val = i_nicp[0][constants.NIC_MODE]
|
4774 |
else:
|
4775 |
val = None
|
4776 |
elif field == "nic_link": |
4777 |
if instance.nics:
|
4778 |
val = i_nicp[0][constants.NIC_LINK]
|
4779 |
else:
|
4780 |
val = None
|
4781 |
elif field == "bridge": |
4782 |
if (instance.nics and |
4783 |
i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
|
4784 |
val = i_nicp[0][constants.NIC_LINK]
|
4785 |
else:
|
4786 |
val = None
|
4787 |
elif field == "mac": |
4788 |
if instance.nics:
|
4789 |
val = instance.nics[0].mac
|
4790 |
else:
|
4791 |
val = None
|
4792 |
elif field == "sda_size" or field == "sdb_size": |
4793 |
idx = ord(field[2]) - ord('a') |
4794 |
try:
|
4795 |
val = instance.FindDisk(idx).size |
4796 |
except errors.OpPrereqError:
|
4797 |
val = None
|
4798 |
elif field == "disk_usage": # total disk usage per node |
4799 |
disk_sizes = [{'size': disk.size} for disk in instance.disks] |
4800 |
val = _ComputeDiskSize(instance.disk_template, disk_sizes) |
4801 |
elif field == "tags": |
4802 |
val = list(instance.GetTags())
|
4803 |
elif field == "hvparams": |
4804 |
val = i_hv |
4805 |
elif (field.startswith(HVPREFIX) and |
4806 |
field[len(HVPREFIX):] in constants.HVS_PARAMETERS and |
4807 |
field[len(HVPREFIX):] not in constants.HVC_GLOBALS): |
4808 |
val = i_hv.get(field[len(HVPREFIX):], None) |
4809 |
elif field == "beparams": |
4810 |
val = i_be |
4811 |
elif (field.startswith(BEPREFIX) and |
4812 |
field[len(BEPREFIX):] in constants.BES_PARAMETERS): |
4813 |
val = i_be.get(field[len(BEPREFIX):], None) |
4814 |
elif st_match and st_match.groups(): |
4815 |
# matches a variable list
|
4816 |
st_groups = st_match.groups() |
4817 |
if st_groups and st_groups[0] == "disk": |
4818 |
if st_groups[1] == "count": |
4819 |
val = len(instance.disks)
|
4820 |
elif st_groups[1] == "sizes": |
4821 |
val = [disk.size for disk in instance.disks] |
4822 |
elif st_groups[1] == "size": |
4823 |
try:
|
4824 |
val = instance.FindDisk(st_groups[2]).size
|
4825 |
except errors.OpPrereqError:
|
4826 |
val = None
|
4827 |
else:
|
4828 |
assert False, "Unhandled disk parameter" |
4829 |
elif st_groups[0] == "nic": |
4830 |
if st_groups[1] == "count": |
4831 |
val = len(instance.nics)
|
4832 |
elif st_groups[1] == "macs": |
4833 |
val = [nic.mac for nic in instance.nics] |
4834 |
elif st_groups[1] == "ips": |
4835 |
val = [nic.ip for nic in instance.nics] |
4836 |
elif st_groups[1] == "modes": |
4837 |
val = [nicp[constants.NIC_MODE] for nicp in i_nicp] |
4838 |
elif st_groups[1] == "links": |
4839 |
val = [nicp[constants.NIC_LINK] for nicp in i_nicp] |
4840 |
elif st_groups[1] == "bridges": |
4841 |
val = [] |
4842 |
for nicp in i_nicp: |
4843 |
if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
4844 |
val.append(nicp[constants.NIC_LINK]) |
4845 |
else:
|
4846 |
val.append(None)
|
4847 |
else:
|
4848 |
# index-based item
|
4849 |
nic_idx = int(st_groups[2]) |
4850 |
if nic_idx >= len(instance.nics): |
4851 |
val = None
|
4852 |
else:
|
4853 |
if st_groups[1] == "mac": |
4854 |
val = instance.nics[nic_idx].mac |
4855 |
elif st_groups[1] == "ip": |
4856 |
val = instance.nics[nic_idx].ip |
4857 |
elif st_groups[1] == "mode": |
4858 |
val = i_nicp[nic_idx][constants.NIC_MODE] |
4859 |
elif st_groups[1] == "link": |
4860 |
val = i_nicp[nic_idx][constants.NIC_LINK] |
4861 |
elif st_groups[1] == "bridge": |
4862 |
nic_mode = i_nicp[nic_idx][constants.NIC_MODE] |
4863 |
if nic_mode == constants.NIC_MODE_BRIDGED:
|
4864 |
val = i_nicp[nic_idx][constants.NIC_LINK] |
4865 |
else:
|
4866 |
val = None
|
4867 |
else:
|
4868 |
assert False, "Unhandled NIC parameter" |
4869 |
else:
|
4870 |
assert False, ("Declared but unhandled variable parameter '%s'" % |
4871 |
field) |
4872 |
else:
|
4873 |
assert False, "Declared but unhandled parameter '%s'" % field |
4874 |
iout.append(val) |
4875 |
output.append(iout) |
4876 |
|
4877 |
return output
|
4878 |
|
4879 |
|
4880 |
class LUFailoverInstance(LogicalUnit): |
4881 |
"""Failover an instance.
|
4882 |
|
4883 |
"""
|
4884 |
HPATH = "instance-failover"
|
4885 |
HTYPE = constants.HTYPE_INSTANCE |
4886 |
_OP_REQP = ["instance_name", "ignore_consistency"] |
4887 |
REQ_BGL = False
|
4888 |
|
4889 |
def CheckArguments(self): |
4890 |
"""Check the arguments.
|
4891 |
|
4892 |
"""
|
4893 |
self.shutdown_timeout = getattr(self.op, "shutdown_timeout", |
4894 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
4895 |
|
4896 |
def ExpandNames(self): |
4897 |
self._ExpandAndLockInstance()
|
4898 |
self.needed_locks[locking.LEVEL_NODE] = []
|
4899 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
4900 |
|
4901 |
def DeclareLocks(self, level): |
4902 |
if level == locking.LEVEL_NODE:
|
4903 |
self._LockInstancesNodes()
|
4904 |
|
4905 |
def BuildHooksEnv(self): |
4906 |
"""Build hooks env.
|
4907 |
|
4908 |
This runs on master, primary and secondary nodes of the instance.
|
4909 |
|
4910 |
"""
|
4911 |
instance = self.instance
|
4912 |
source_node = instance.primary_node |
4913 |
target_node = instance.secondary_nodes[0]
|
4914 |
env = { |
4915 |
"IGNORE_CONSISTENCY": self.op.ignore_consistency, |
4916 |
"SHUTDOWN_TIMEOUT": self.shutdown_timeout, |
4917 |
"OLD_PRIMARY": source_node,
|
4918 |
"OLD_SECONDARY": target_node,
|
4919 |
"NEW_PRIMARY": target_node,
|
4920 |
"NEW_SECONDARY": source_node,
|
4921 |
} |
4922 |
env.update(_BuildInstanceHookEnvByObject(self, instance))
|
4923 |
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) |
4924 |
nl_post = list(nl)
|
4925 |
nl_post.append(source_node) |
4926 |
return env, nl, nl_post
|
4927 |
|
4928 |
def CheckPrereq(self): |
4929 |
"""Check prerequisites.
|
4930 |
|
4931 |
This checks that the instance is in the cluster.
|
4932 |
|
4933 |
"""
|
4934 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4935 |
assert self.instance is not None, \ |
4936 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4937 |
|
4938 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
4939 |
if instance.disk_template not in constants.DTS_NET_MIRROR: |
4940 |
raise errors.OpPrereqError("Instance's disk layout is not" |
4941 |
" network mirrored, cannot failover.",
|
4942 |
errors.ECODE_STATE) |
4943 |
|
4944 |
secondary_nodes = instance.secondary_nodes |
4945 |
if not secondary_nodes: |
4946 |
raise errors.ProgrammerError("no secondary node but using " |
4947 |
"a mirrored disk template")
|
4948 |
|
4949 |
target_node = secondary_nodes[0]
|
4950 |
_CheckNodeOnline(self, target_node)
|
4951 |
_CheckNodeNotDrained(self, target_node)
|
4952 |
if instance.admin_up:
|
4953 |
# check memory requirements on the secondary node
|
4954 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
4955 |
instance.name, bep[constants.BE_MEMORY], |
4956 |
instance.hypervisor) |
4957 |
else:
|
4958 |
self.LogInfo("Not checking memory on the secondary node as" |
4959 |
" instance will not be started")
|
4960 |
|
4961 |
# check bridge existance
|
4962 |
_CheckInstanceBridgesExist(self, instance, node=target_node)
|
4963 |
|
4964 |
def Exec(self, feedback_fn): |
4965 |
"""Failover an instance.
|
4966 |
|
4967 |
The failover is done by shutting it down on its present node and
|
4968 |
starting it on the secondary.
|
4969 |
|
4970 |
"""
|
4971 |
instance = self.instance
|
4972 |
|
4973 |
source_node = instance.primary_node |
4974 |
target_node = instance.secondary_nodes[0]
|
4975 |
|
4976 |
if instance.admin_up:
|
4977 |
feedback_fn("* checking disk consistency between source and target")
|
4978 |
for dev in instance.disks: |
4979 |
# for drbd, these are drbd over lvm
|
4980 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
4981 |
if not self.op.ignore_consistency: |
4982 |
raise errors.OpExecError("Disk %s is degraded on target node," |
4983 |
" aborting failover." % dev.iv_name)
|
4984 |
else:
|
4985 |
feedback_fn("* not checking disk consistency as instance is not running")
|
4986 |
|
4987 |
feedback_fn("* shutting down instance on source node")
|
4988 |
logging.info("Shutting down instance %s on node %s",
|
4989 |
instance.name, source_node) |
4990 |
|
4991 |
result = self.rpc.call_instance_shutdown(source_node, instance,
|
4992 |
self.shutdown_timeout)
|
4993 |
msg = result.fail_msg |
4994 |
if msg:
|
4995 |
if self.op.ignore_consistency: |
4996 |
self.proc.LogWarning("Could not shutdown instance %s on node %s." |
4997 |
" Proceeding anyway. Please make sure node"
|
4998 |
" %s is down. Error details: %s",
|
4999 |
instance.name, source_node, source_node, msg) |
5000 |
else:
|
5001 |
raise errors.OpExecError("Could not shutdown instance %s on" |
5002 |
" node %s: %s" %
|
5003 |
(instance.name, source_node, msg)) |
5004 |
|
5005 |
feedback_fn("* deactivating the instance's disks on source node")
|
5006 |
if not _ShutdownInstanceDisks(self, instance, ignore_primary=True): |
5007 |
raise errors.OpExecError("Can't shut down the instance's disks.") |
5008 |
|
5009 |
instance.primary_node = target_node |
5010 |
# distribute new instance config to the other nodes
|
5011 |
self.cfg.Update(instance, feedback_fn)
|
5012 |
|
5013 |
# Only start the instance if it's marked as up
|
5014 |
if instance.admin_up:
|
5015 |
feedback_fn("* activating the instance's disks on target node")
|
5016 |
logging.info("Starting instance %s on node %s",
|
5017 |
instance.name, target_node) |
5018 |
|
5019 |
disks_ok, _ = _AssembleInstanceDisks(self, instance,
|
5020 |
ignore_secondaries=True)
|
5021 |
if not disks_ok: |
5022 |
_ShutdownInstanceDisks(self, instance)
|
5023 |
raise errors.OpExecError("Can't activate the instance's disks") |
5024 |
|
5025 |
feedback_fn("* starting the instance on the target node")
|
5026 |
result = self.rpc.call_instance_start(target_node, instance, None, None) |
5027 |
msg = result.fail_msg |
5028 |
if msg:
|
5029 |
_ShutdownInstanceDisks(self, instance)
|
5030 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
5031 |
(instance.name, target_node, msg)) |
5032 |
|
5033 |
|
5034 |
class LUMigrateInstance(LogicalUnit): |
5035 |
"""Migrate an instance.
|
5036 |
|
5037 |
This is migration without shutting down, compared to the failover,
|
5038 |
which is done with shutdown.
|
5039 |
|
5040 |
"""
|
5041 |
HPATH = "instance-migrate"
|
5042 |
HTYPE = constants.HTYPE_INSTANCE |
5043 |
_OP_REQP = ["instance_name", "live", "cleanup"] |
5044 |
|
5045 |
REQ_BGL = False
|
5046 |
|
5047 |
def ExpandNames(self): |
5048 |
self._ExpandAndLockInstance()
|
5049 |
|
5050 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5051 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5052 |
|
5053 |
self._migrater = TLMigrateInstance(self, self.op.instance_name, |
5054 |
self.op.live, self.op.cleanup) |
5055 |
self.tasklets = [self._migrater] |
5056 |
|
5057 |
def DeclareLocks(self, level): |
5058 |
if level == locking.LEVEL_NODE:
|
5059 |
self._LockInstancesNodes()
|
5060 |
|
5061 |
def BuildHooksEnv(self): |
5062 |
"""Build hooks env.
|
5063 |
|
5064 |
This runs on master, primary and secondary nodes of the instance.
|
5065 |
|
5066 |
"""
|
5067 |
instance = self._migrater.instance
|
5068 |
source_node = instance.primary_node |
5069 |
target_node = instance.secondary_nodes[0]
|
5070 |
env = _BuildInstanceHookEnvByObject(self, instance)
|
5071 |
env["MIGRATE_LIVE"] = self.op.live |
5072 |
env["MIGRATE_CLEANUP"] = self.op.cleanup |
5073 |
env.update({ |
5074 |
"OLD_PRIMARY": source_node,
|
5075 |
"OLD_SECONDARY": target_node,
|
5076 |
"NEW_PRIMARY": target_node,
|
5077 |
"NEW_SECONDARY": source_node,
|
5078 |
}) |
5079 |
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) |
5080 |
nl_post = list(nl)
|
5081 |
nl_post.append(source_node) |
5082 |
return env, nl, nl_post
|
5083 |
|
5084 |
|
5085 |
class LUMoveInstance(LogicalUnit): |
5086 |
"""Move an instance by data-copying.
|
5087 |
|
5088 |
"""
|
5089 |
HPATH = "instance-move"
|
5090 |
HTYPE = constants.HTYPE_INSTANCE |
5091 |
_OP_REQP = ["instance_name", "target_node"] |
5092 |
REQ_BGL = False
|
5093 |
|
5094 |
def CheckArguments(self): |
5095 |
"""Check the arguments.
|
5096 |
|
5097 |
"""
|
5098 |
self.shutdown_timeout = getattr(self.op, "shutdown_timeout", |
5099 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
5100 |
|
5101 |
def ExpandNames(self): |
5102 |
self._ExpandAndLockInstance()
|
5103 |
target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
5104 |
self.op.target_node = target_node
|
5105 |
self.needed_locks[locking.LEVEL_NODE] = [target_node]
|
5106 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
5107 |
|
5108 |
def DeclareLocks(self, level): |
5109 |
if level == locking.LEVEL_NODE:
|
5110 |
self._LockInstancesNodes(primary_only=True) |
5111 |
|
5112 |
def BuildHooksEnv(self): |
5113 |
"""Build hooks env.
|
5114 |
|
5115 |
This runs on master, primary and secondary nodes of the instance.
|
5116 |
|
5117 |
"""
|
5118 |
env = { |
5119 |
"TARGET_NODE": self.op.target_node, |
5120 |
"SHUTDOWN_TIMEOUT": self.shutdown_timeout, |
5121 |
} |
5122 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
5123 |
nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node, |
5124 |
self.op.target_node]
|
5125 |
return env, nl, nl
|
5126 |
|
5127 |
def CheckPrereq(self): |
5128 |
"""Check prerequisites.
|
5129 |
|
5130 |
This checks that the instance is in the cluster.
|
5131 |
|
5132 |
"""
|
5133 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
5134 |
assert self.instance is not None, \ |
5135 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
5136 |
|
5137 |
node = self.cfg.GetNodeInfo(self.op.target_node) |
5138 |
assert node is not None, \ |
5139 |
"Cannot retrieve locked node %s" % self.op.target_node |
5140 |
|
5141 |
self.target_node = target_node = node.name
|
5142 |
|
5143 |
if target_node == instance.primary_node:
|
5144 |
raise errors.OpPrereqError("Instance %s is already on the node %s" % |
5145 |
(instance.name, target_node), |
5146 |
errors.ECODE_STATE) |
5147 |
|
5148 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
5149 |
|
5150 |
for idx, dsk in enumerate(instance.disks): |
5151 |
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE): |
5152 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
5153 |
" cannot copy" % idx, errors.ECODE_STATE)
|
5154 |
|
5155 |
_CheckNodeOnline(self, target_node)
|
5156 |
_CheckNodeNotDrained(self, target_node)
|
5157 |
|
5158 |
if instance.admin_up:
|
5159 |
# check memory requirements on the secondary node
|
5160 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
5161 |
instance.name, bep[constants.BE_MEMORY], |
5162 |
instance.hypervisor) |
5163 |
else:
|
5164 |
self.LogInfo("Not checking memory on the secondary node as" |
5165 |
" instance will not be started")
|
5166 |
|
5167 |
# check bridge existance
|
5168 |
_CheckInstanceBridgesExist(self, instance, node=target_node)
|
5169 |
|
5170 |
def Exec(self, feedback_fn): |
5171 |
"""Move an instance.
|
5172 |
|
5173 |
The move is done by shutting it down on its present node, copying
|
5174 |
the data over (slow) and starting it on the new node.
|
5175 |
|
5176 |
"""
|
5177 |
instance = self.instance
|
5178 |
|
5179 |
source_node = instance.primary_node |
5180 |
target_node = self.target_node
|
5181 |
|
5182 |
self.LogInfo("Shutting down instance %s on source node %s", |
5183 |
instance.name, source_node) |
5184 |
|
5185 |
result = self.rpc.call_instance_shutdown(source_node, instance,
|
5186 |
self.shutdown_timeout)
|
5187 |
msg = result.fail_msg |
5188 |
if msg:
|
5189 |
if self.op.ignore_consistency: |
5190 |
self.proc.LogWarning("Could not shutdown instance %s on node %s." |
5191 |
" Proceeding anyway. Please make sure node"
|
5192 |
" %s is down. Error details: %s",
|
5193 |
instance.name, source_node, source_node, msg) |
5194 |
else:
|
5195 |
raise errors.OpExecError("Could not shutdown instance %s on" |
5196 |
" node %s: %s" %
|
5197 |
(instance.name, source_node, msg)) |
5198 |
|
5199 |
# create the target disks
|
5200 |
try:
|
5201 |
_CreateDisks(self, instance, target_node=target_node)
|
5202 |
except errors.OpExecError:
|
5203 |
self.LogWarning("Device creation failed, reverting...") |
5204 |
try:
|
5205 |
_RemoveDisks(self, instance, target_node=target_node)
|
5206 |
finally:
|
5207 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
5208 |
raise
|
5209 |
|
5210 |
cluster_name = self.cfg.GetClusterInfo().cluster_name
|
5211 |
|
5212 |
errs = [] |
5213 |
# activate, get path, copy the data over
|
5214 |
for idx, disk in enumerate(instance.disks): |
5215 |
self.LogInfo("Copying data for disk %d", idx) |
5216 |
result = self.rpc.call_blockdev_assemble(target_node, disk,
|
5217 |
instance.name, True)
|
5218 |
if result.fail_msg:
|
5219 |
self.LogWarning("Can't assemble newly created disk %d: %s", |
5220 |
idx, result.fail_msg) |
5221 |
errs.append(result.fail_msg) |
5222 |
break
|
5223 |
dev_path = result.payload |
5224 |
result = self.rpc.call_blockdev_export(source_node, disk,
|
5225 |
target_node, dev_path, |
5226 |
cluster_name) |
5227 |
if result.fail_msg:
|
5228 |
self.LogWarning("Can't copy data over for disk %d: %s", |
5229 |
idx, result.fail_msg) |
5230 |
errs.append(result.fail_msg) |
5231 |
break
|
5232 |
|
5233 |
if errs:
|
5234 |
self.LogWarning("Some disks failed to copy, aborting") |
5235 |
try:
|
5236 |
_RemoveDisks(self, instance, target_node=target_node)
|
5237 |
finally:
|
5238 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
5239 |
raise errors.OpExecError("Errors during disk copy: %s" % |
5240 |
(",".join(errs),))
|
5241 |
|
5242 |
instance.primary_node = target_node |
5243 |
self.cfg.Update(instance, feedback_fn)
|
5244 |
|
5245 |
self.LogInfo("Removing the disks on the original node") |
5246 |
_RemoveDisks(self, instance, target_node=source_node)
|
5247 |
|
5248 |
# Only start the instance if it's marked as up
|
5249 |
if instance.admin_up:
|
5250 |
self.LogInfo("Starting instance %s on node %s", |
5251 |
instance.name, target_node) |
5252 |
|
5253 |
disks_ok, _ = _AssembleInstanceDisks(self, instance,
|
5254 |
ignore_secondaries=True)
|
5255 |
if not disks_ok: |
5256 |
_ShutdownInstanceDisks(self, instance)
|
5257 |
raise errors.OpExecError("Can't activate the instance's disks") |
5258 |
|
5259 |
result = self.rpc.call_instance_start(target_node, instance, None, None) |
5260 |
msg = result.fail_msg |
5261 |
if msg:
|
5262 |
_ShutdownInstanceDisks(self, instance)
|
5263 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
5264 |
(instance.name, target_node, msg)) |
5265 |
|
5266 |
|
5267 |
class LUMigrateNode(LogicalUnit): |
5268 |
"""Migrate all instances from a node.
|
5269 |
|
5270 |
"""
|
5271 |
HPATH = "node-migrate"
|
5272 |
HTYPE = constants.HTYPE_NODE |
5273 |
_OP_REQP = ["node_name", "live"] |
5274 |
REQ_BGL = False
|
5275 |
|
5276 |
def ExpandNames(self): |
5277 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
5278 |
|
5279 |
self.needed_locks = {
|
5280 |
locking.LEVEL_NODE: [self.op.node_name],
|
5281 |
} |
5282 |
|
5283 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
5284 |
|
5285 |
# Create tasklets for migrating instances for all instances on this node
|
5286 |
names = [] |
5287 |
tasklets = [] |
5288 |
|
5289 |
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name): |
5290 |
logging.debug("Migrating instance %s", inst.name)
|
5291 |
names.append(inst.name) |
5292 |
|
5293 |
tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False)) |
5294 |
|
5295 |
self.tasklets = tasklets
|
5296 |
|
5297 |
# Declare instance locks
|
5298 |
self.needed_locks[locking.LEVEL_INSTANCE] = names
|
5299 |
|
5300 |
def DeclareLocks(self, level): |
5301 |
if level == locking.LEVEL_NODE:
|
5302 |
self._LockInstancesNodes()
|
5303 |
|
5304 |
def BuildHooksEnv(self): |
5305 |
"""Build hooks env.
|
5306 |
|
5307 |
This runs on the master, the primary and all the secondaries.
|
5308 |
|
5309 |
"""
|
5310 |
env = { |
5311 |
"NODE_NAME": self.op.node_name, |
5312 |
} |
5313 |
|
5314 |
nl = [self.cfg.GetMasterNode()]
|
5315 |
|
5316 |
return (env, nl, nl)
|
5317 |
|
5318 |
|
5319 |
class TLMigrateInstance(Tasklet): |
5320 |
def __init__(self, lu, instance_name, live, cleanup): |
5321 |
"""Initializes this class.
|
5322 |
|
5323 |
"""
|
5324 |
Tasklet.__init__(self, lu)
|
5325 |
|
5326 |
# Parameters
|
5327 |
self.instance_name = instance_name
|
5328 |
self.live = live
|
5329 |
self.cleanup = cleanup
|
5330 |
|
5331 |
def CheckPrereq(self): |
5332 |
"""Check prerequisites.
|
5333 |
|
5334 |
This checks that the instance is in the cluster.
|
5335 |
|
5336 |
"""
|
5337 |
instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) |
5338 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
5339 |
assert instance is not None |
5340 |
|
5341 |
if instance.disk_template != constants.DT_DRBD8:
|
5342 |
raise errors.OpPrereqError("Instance's disk layout is not" |
5343 |
" drbd8, cannot migrate.", errors.ECODE_STATE)
|
5344 |
|
5345 |
secondary_nodes = instance.secondary_nodes |
5346 |
if not secondary_nodes: |
5347 |
raise errors.ConfigurationError("No secondary node but using" |
5348 |
" drbd8 disk template")
|
5349 |
|
5350 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
5351 |
|
5352 |
target_node = secondary_nodes[0]
|
5353 |
# check memory requirements on the secondary node
|
5354 |
_CheckNodeFreeMemory(self, target_node, "migrating instance %s" % |
5355 |
instance.name, i_be[constants.BE_MEMORY], |
5356 |
instance.hypervisor) |
5357 |
|
5358 |
# check bridge existance
|
5359 |
_CheckInstanceBridgesExist(self, instance, node=target_node)
|
5360 |
|
5361 |
if not self.cleanup: |
5362 |
_CheckNodeNotDrained(self, target_node)
|
5363 |
result = self.rpc.call_instance_migratable(instance.primary_node,
|
5364 |
instance) |
5365 |
result.Raise("Can't migrate, please use failover",
|
5366 |
prereq=True, ecode=errors.ECODE_STATE)
|
5367 |
|
5368 |
self.instance = instance
|
5369 |
|
5370 |
def _WaitUntilSync(self): |
5371 |
"""Poll with custom rpc for disk sync.
|
5372 |
|
5373 |
This uses our own step-based rpc call.
|
5374 |
|
5375 |
"""
|
5376 |
self.feedback_fn("* wait until resync is done") |
5377 |
all_done = False
|
5378 |
while not all_done: |
5379 |
all_done = True
|
5380 |
result = self.rpc.call_drbd_wait_sync(self.all_nodes, |
5381 |
self.nodes_ip,
|
5382 |
self.instance.disks)
|
5383 |
min_percent = 100
|
5384 |
for node, nres in result.items(): |
5385 |
nres.Raise("Cannot resync disks on node %s" % node)
|
5386 |
node_done, node_percent = nres.payload |
5387 |
all_done = all_done and node_done
|
5388 |
if node_percent is not None: |
5389 |
min_percent = min(min_percent, node_percent)
|
5390 |
if not all_done: |
5391 |
if min_percent < 100: |
5392 |
self.feedback_fn(" - progress: %.1f%%" % min_percent) |
5393 |
time.sleep(2)
|
5394 |
|
5395 |
def _EnsureSecondary(self, node): |
5396 |
"""Demote a node to secondary.
|
5397 |
|
5398 |
"""
|
5399 |
self.feedback_fn("* switching node %s to secondary mode" % node) |
5400 |
|
5401 |
for dev in self.instance.disks: |
5402 |
self.cfg.SetDiskID(dev, node)
|
5403 |
|
5404 |
result = self.rpc.call_blockdev_close(node, self.instance.name, |
5405 |
self.instance.disks)
|
5406 |
result.Raise("Cannot change disk to secondary on node %s" % node)
|
5407 |
|
5408 |
def _GoStandalone(self): |
5409 |
"""Disconnect from the network.
|
5410 |
|
5411 |
"""
|
5412 |
self.feedback_fn("* changing into standalone mode") |
5413 |
result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip, |
5414 |
self.instance.disks)
|
5415 |
for node, nres in result.items(): |
5416 |
nres.Raise("Cannot disconnect disks node %s" % node)
|
5417 |
|
5418 |
def _GoReconnect(self, multimaster): |
5419 |
"""Reconnect to the network.
|
5420 |
|
5421 |
"""
|
5422 |
if multimaster:
|
5423 |
msg = "dual-master"
|
5424 |
else:
|
5425 |
msg = "single-master"
|
5426 |
self.feedback_fn("* changing disks into %s mode" % msg) |
5427 |
result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip, |
5428 |
self.instance.disks,
|
5429 |
self.instance.name, multimaster)
|
5430 |
for node, nres in result.items(): |
5431 |
nres.Raise("Cannot change disks config on node %s" % node)
|
5432 |
|
5433 |
def _ExecCleanup(self): |
5434 |
"""Try to cleanup after a failed migration.
|
5435 |
|
5436 |
The cleanup is done by:
|
5437 |
- check that the instance is running only on one node
|
5438 |
(and update the config if needed)
|
5439 |
- change disks on its secondary node to secondary
|
5440 |
- wait until disks are fully synchronized
|
5441 |
- disconnect from the network
|
5442 |
- change disks into single-master mode
|
5443 |
- wait again until disks are fully synchronized
|
5444 |
|
5445 |
"""
|
5446 |
instance = self.instance
|
5447 |
target_node = self.target_node
|
5448 |
source_node = self.source_node
|
5449 |
|
5450 |
# check running on only one node
|
5451 |
self.feedback_fn("* checking where the instance actually runs" |
5452 |
" (if this hangs, the hypervisor might be in"
|
5453 |
" a bad state)")
|
5454 |
ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) |
5455 |
for node, result in ins_l.items(): |
5456 |
result.Raise("Can't contact node %s" % node)
|
5457 |
|
5458 |
runningon_source = instance.name in ins_l[source_node].payload
|
5459 |
runningon_target = instance.name in ins_l[target_node].payload
|
5460 |
|
5461 |
if runningon_source and runningon_target: |
5462 |
raise errors.OpExecError("Instance seems to be running on two nodes," |
5463 |
" or the hypervisor is confused. You will have"
|
5464 |
" to ensure manually that it runs only on one"
|
5465 |
" and restart this operation.")
|
5466 |
|
5467 |
if not (runningon_source or runningon_target): |
5468 |
raise errors.OpExecError("Instance does not seem to be running at all." |
5469 |
" In this case, it's safer to repair by"
|
5470 |
" running 'gnt-instance stop' to ensure disk"
|
5471 |
" shutdown, and then restarting it.")
|
5472 |
|
5473 |
if runningon_target:
|
5474 |
# the migration has actually succeeded, we need to update the config
|
5475 |
self.feedback_fn("* instance running on secondary node (%s)," |
5476 |
" updating config" % target_node)
|
5477 |
instance.primary_node = target_node |
5478 |
self.cfg.Update(instance, self.feedback_fn) |
5479 |
demoted_node = source_node |
5480 |
else:
|
5481 |
self.feedback_fn("* instance confirmed to be running on its" |
5482 |
" primary node (%s)" % source_node)
|
5483 |
demoted_node = target_node |
5484 |
|
5485 |
self._EnsureSecondary(demoted_node)
|
5486 |
try:
|
5487 |
self._WaitUntilSync()
|
5488 |
except errors.OpExecError:
|
5489 |
# we ignore here errors, since if the device is standalone, it
|
5490 |
# won't be able to sync
|
5491 |
pass
|
5492 |
self._GoStandalone()
|
5493 |
self._GoReconnect(False) |
5494 |
self._WaitUntilSync()
|
5495 |
|
5496 |
self.feedback_fn("* done") |
5497 |
|
5498 |
def _RevertDiskStatus(self): |
5499 |
"""Try to revert the disk status after a failed migration.
|
5500 |
|
5501 |
"""
|
5502 |
target_node = self.target_node
|
5503 |
try:
|
5504 |
self._EnsureSecondary(target_node)
|
5505 |
self._GoStandalone()
|
5506 |
self._GoReconnect(False) |
5507 |
self._WaitUntilSync()
|
5508 |
except errors.OpExecError, err:
|
5509 |
self.lu.LogWarning("Migration failed and I can't reconnect the" |
5510 |
" drives: error '%s'\n"
|
5511 |
"Please look and recover the instance status" %
|
5512 |
str(err))
|
5513 |
|
5514 |
def _AbortMigration(self): |
5515 |
"""Call the hypervisor code to abort a started migration.
|
5516 |
|
5517 |
"""
|
5518 |
instance = self.instance
|
5519 |
target_node = self.target_node
|
5520 |
migration_info = self.migration_info
|
5521 |
|
5522 |
abort_result = self.rpc.call_finalize_migration(target_node,
|
5523 |
instance, |
5524 |
migration_info, |
5525 |
False)
|
5526 |
abort_msg = abort_result.fail_msg |
5527 |
if abort_msg:
|
5528 |
logging.error("Aborting migration failed on target node %s: %s",
|
5529 |
target_node, abort_msg) |
5530 |
# Don't raise an exception here, as we stil have to try to revert the
|
5531 |
# disk status, even if this step failed.
|
5532 |
|
5533 |
def _ExecMigration(self): |
5534 |
"""Migrate an instance.
|
5535 |
|
5536 |
The migrate is done by:
|
5537 |
- change the disks into dual-master mode
|
5538 |
- wait until disks are fully synchronized again
|
5539 |
- migrate the instance
|
5540 |
- change disks on the new secondary node (the old primary) to secondary
|
5541 |
- wait until disks are fully synchronized
|
5542 |
- change disks into single-master mode
|
5543 |
|
5544 |
"""
|
5545 |
instance = self.instance
|
5546 |
target_node = self.target_node
|
5547 |
source_node = self.source_node
|
5548 |
|
5549 |
self.feedback_fn("* checking disk consistency between source and target") |
5550 |
for dev in instance.disks: |
5551 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
5552 |
raise errors.OpExecError("Disk %s is degraded or not fully" |
5553 |
" synchronized on target node,"
|
5554 |
" aborting migrate." % dev.iv_name)
|
5555 |
|
5556 |
# First get the migration information from the remote node
|
5557 |
result = self.rpc.call_migration_info(source_node, instance)
|
5558 |
msg = result.fail_msg |
5559 |
if msg:
|
5560 |
log_err = ("Failed fetching source migration information from %s: %s" %
|
5561 |
(source_node, msg)) |
5562 |
logging.error(log_err) |
5563 |
raise errors.OpExecError(log_err)
|
5564 |
|
5565 |
self.migration_info = migration_info = result.payload
|
5566 |
|
5567 |
# Then switch the disks to master/master mode
|
5568 |
self._EnsureSecondary(target_node)
|
5569 |
self._GoStandalone()
|
5570 |
self._GoReconnect(True) |
5571 |
self._WaitUntilSync()
|
5572 |
|
5573 |
self.feedback_fn("* preparing %s to accept the instance" % target_node) |
5574 |
result = self.rpc.call_accept_instance(target_node,
|
5575 |
instance, |
5576 |
migration_info, |
5577 |
self.nodes_ip[target_node])
|
5578 |
|
5579 |
msg = result.fail_msg |
5580 |
if msg:
|
5581 |
logging.error("Instance pre-migration failed, trying to revert"
|
5582 |
" disk status: %s", msg)
|
5583 |
self.feedback_fn("Pre-migration failed, aborting") |
5584 |
self._AbortMigration()
|
5585 |
self._RevertDiskStatus()
|
5586 |
raise errors.OpExecError("Could not pre-migrate instance %s: %s" % |
5587 |
(instance.name, msg)) |
5588 |
|
5589 |
self.feedback_fn("* migrating instance to %s" % target_node) |
5590 |
time.sleep(10)
|
5591 |
result = self.rpc.call_instance_migrate(source_node, instance,
|
5592 |
self.nodes_ip[target_node],
|
5593 |
self.live)
|
5594 |
msg = result.fail_msg |
5595 |
if msg:
|
5596 |
logging.error("Instance migration failed, trying to revert"
|
5597 |
" disk status: %s", msg)
|
5598 |
self.feedback_fn("Migration failed, aborting") |
5599 |
self._AbortMigration()
|
5600 |
self._RevertDiskStatus()
|
5601 |
raise errors.OpExecError("Could not migrate instance %s: %s" % |
5602 |
(instance.name, msg)) |
5603 |
time.sleep(10)
|
5604 |
|
5605 |
instance.primary_node = target_node |
5606 |
# distribute new instance config to the other nodes
|
5607 |
self.cfg.Update(instance, self.feedback_fn) |
5608 |
|
5609 |
result = self.rpc.call_finalize_migration(target_node,
|
5610 |
instance, |
5611 |
migration_info, |
5612 |
True)
|
5613 |
msg = result.fail_msg |
5614 |
if msg:
|
5615 |
logging.error("Instance migration succeeded, but finalization failed:"
|
5616 |
" %s", msg)
|
5617 |
raise errors.OpExecError("Could not finalize instance migration: %s" % |
5618 |
msg) |
5619 |
|
5620 |
self._EnsureSecondary(source_node)
|
5621 |
self._WaitUntilSync()
|
5622 |
self._GoStandalone()
|
5623 |
self._GoReconnect(False) |
5624 |
self._WaitUntilSync()
|
5625 |
|
5626 |
self.feedback_fn("* done") |
5627 |
|
5628 |
def Exec(self, feedback_fn): |
5629 |
"""Perform the migration.
|
5630 |
|
5631 |
"""
|
5632 |
feedback_fn("Migrating instance %s" % self.instance.name) |
5633 |
|
5634 |
self.feedback_fn = feedback_fn
|
5635 |
|
5636 |
self.source_node = self.instance.primary_node |
5637 |
self.target_node = self.instance.secondary_nodes[0] |
5638 |
self.all_nodes = [self.source_node, self.target_node] |
5639 |
self.nodes_ip = {
|
5640 |
self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, |
5641 |
self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip, |
5642 |
} |
5643 |
|
5644 |
if self.cleanup: |
5645 |
return self._ExecCleanup() |
5646 |
else:
|
5647 |
return self._ExecMigration() |
5648 |
|
5649 |
|
5650 |
def _CreateBlockDev(lu, node, instance, device, force_create, |
5651 |
info, force_open): |
5652 |
"""Create a tree of block devices on a given node.
|
5653 |
|
5654 |
If this device type has to be created on secondaries, create it and
|
5655 |
all its children.
|
5656 |
|
5657 |
If not, just recurse to children keeping the same 'force' value.
|
5658 |
|
5659 |
@param lu: the lu on whose behalf we execute
|
5660 |
@param node: the node on which to create the device
|
5661 |
@type instance: L{objects.Instance}
|
5662 |
@param instance: the instance which owns the device
|
5663 |
@type device: L{objects.Disk}
|
5664 |
@param device: the device to create
|
5665 |
@type force_create: boolean
|
5666 |
@param force_create: whether to force creation of this device; this
|
5667 |
will be change to True whenever we find a device which has
|
5668 |
CreateOnSecondary() attribute
|
5669 |
@param info: the extra 'metadata' we should attach to the device
|
5670 |
(this will be represented as a LVM tag)
|
5671 |
@type force_open: boolean
|
5672 |
@param force_open: this parameter will be passes to the
|
5673 |
L{backend.BlockdevCreate} function where it specifies
|
5674 |
whether we run on primary or not, and it affects both
|
5675 |
the child assembly and the device own Open() execution
|
5676 |
|
5677 |
"""
|
5678 |
if device.CreateOnSecondary():
|
5679 |
force_create = True
|
5680 |
|
5681 |
if device.children:
|
5682 |
for child in device.children: |
5683 |
_CreateBlockDev(lu, node, instance, child, force_create, |
5684 |
info, force_open) |
5685 |
|
5686 |
if not force_create: |
5687 |
return
|
5688 |
|
5689 |
_CreateSingleBlockDev(lu, node, instance, device, info, force_open) |
5690 |
|
5691 |
|
5692 |
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): |
5693 |
"""Create a single block device on a given node.
|
5694 |
|
5695 |
This will not recurse over children of the device, so they must be
|
5696 |
created in advance.
|
5697 |
|
5698 |
@param lu: the lu on whose behalf we execute
|
5699 |
@param node: the node on which to create the device
|
5700 |
@type instance: L{objects.Instance}
|
5701 |
@param instance: the instance which owns the device
|
5702 |
@type device: L{objects.Disk}
|
5703 |
@param device: the device to create
|
5704 |
@param info: the extra 'metadata' we should attach to the device
|
5705 |
(this will be represented as a LVM tag)
|
5706 |
@type force_open: boolean
|
5707 |
@param force_open: this parameter will be passes to the
|
5708 |
L{backend.BlockdevCreate} function where it specifies
|
5709 |
whether we run on primary or not, and it affects both
|
5710 |
the child assembly and the device own Open() execution
|
5711 |
|
5712 |
"""
|
5713 |
lu.cfg.SetDiskID(device, node) |
5714 |
result = lu.rpc.call_blockdev_create(node, device, device.size, |
5715 |
instance.name, force_open, info) |
5716 |
result.Raise("Can't create block device %s on"
|
5717 |
" node %s for instance %s" % (device, node, instance.name))
|
5718 |
if device.physical_id is None: |
5719 |
device.physical_id = result.payload |
5720 |
|
5721 |
|
5722 |
def _GenerateUniqueNames(lu, exts): |
5723 |
"""Generate a suitable LV name.
|
5724 |
|
5725 |
This will generate a logical volume name for the given instance.
|
5726 |
|
5727 |
"""
|
5728 |
results = [] |
5729 |
for val in exts: |
5730 |
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId()) |
5731 |
results.append("%s%s" % (new_id, val))
|
5732 |
return results
|
5733 |
|
5734 |
|
5735 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name, |
5736 |
p_minor, s_minor): |
5737 |
"""Generate a drbd8 device complete with its children.
|
5738 |
|
5739 |
"""
|
5740 |
port = lu.cfg.AllocatePort() |
5741 |
vgname = lu.cfg.GetVGName() |
5742 |
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId()) |
5743 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
5744 |
logical_id=(vgname, names[0]))
|
5745 |
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
5746 |
logical_id=(vgname, names[1]))
|
5747 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, |
5748 |
logical_id=(primary, secondary, port, |
5749 |
p_minor, s_minor, |
5750 |
shared_secret), |
5751 |
children=[dev_data, dev_meta], |
5752 |
iv_name=iv_name) |
5753 |
return drbd_dev
|
5754 |
|
5755 |
|
5756 |
def _GenerateDiskTemplate(lu, template_name, |
5757 |
instance_name, primary_node, |
5758 |
secondary_nodes, disk_info, |
5759 |
file_storage_dir, file_driver, |
5760 |
base_index): |
5761 |
"""Generate the entire disk layout for a given template type.
|
5762 |
|
5763 |
"""
|
5764 |
#TODO: compute space requirements
|
5765 |
|
5766 |
vgname = lu.cfg.GetVGName() |
5767 |
disk_count = len(disk_info)
|
5768 |
disks = [] |
5769 |
if template_name == constants.DT_DISKLESS:
|
5770 |
pass
|
5771 |
elif template_name == constants.DT_PLAIN:
|
5772 |
if len(secondary_nodes) != 0: |
5773 |
raise errors.ProgrammerError("Wrong template configuration") |
5774 |
|
5775 |
names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
|
5776 |
for i in range(disk_count)]) |
5777 |
for idx, disk in enumerate(disk_info): |
5778 |
disk_index = idx + base_index |
5779 |
disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
|
5780 |
logical_id=(vgname, names[idx]), |
5781 |
iv_name="disk/%d" % disk_index,
|
5782 |
mode=disk["mode"])
|
5783 |
disks.append(disk_dev) |
5784 |
elif template_name == constants.DT_DRBD8:
|
5785 |
if len(secondary_nodes) != 1: |
5786 |
raise errors.ProgrammerError("Wrong template configuration") |
5787 |
remote_node = secondary_nodes[0]
|
5788 |
minors = lu.cfg.AllocateDRBDMinor( |
5789 |
[primary_node, remote_node] * len(disk_info), instance_name)
|
5790 |
|
5791 |
names = [] |
5792 |
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i) |
5793 |
for i in range(disk_count)]): |
5794 |
names.append(lv_prefix + "_data")
|
5795 |
names.append(lv_prefix + "_meta")
|
5796 |
for idx, disk in enumerate(disk_info): |
5797 |
disk_index = idx + base_index |
5798 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, |
5799 |
disk["size"], names[idx*2:idx*2+2], |
5800 |
"disk/%d" % disk_index,
|
5801 |
minors[idx*2], minors[idx*2+1]) |
5802 |
disk_dev.mode = disk["mode"]
|
5803 |
disks.append(disk_dev) |
5804 |
elif template_name == constants.DT_FILE:
|
5805 |
if len(secondary_nodes) != 0: |
5806 |
raise errors.ProgrammerError("Wrong template configuration") |
5807 |
|
5808 |
for idx, disk in enumerate(disk_info): |
5809 |
disk_index = idx + base_index |
5810 |
disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
|
5811 |
iv_name="disk/%d" % disk_index,
|
5812 |
logical_id=(file_driver, |
5813 |
"%s/disk%d" % (file_storage_dir,
|
5814 |
disk_index)), |
5815 |
mode=disk["mode"])
|
5816 |
disks.append(disk_dev) |
5817 |
else:
|
5818 |
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
5819 |
return disks
|
5820 |
|
5821 |
|
5822 |
def _GetInstanceInfoText(instance): |
5823 |
"""Compute that text that should be added to the disk's metadata.
|
5824 |
|
5825 |
"""
|
5826 |
return "originstname+%s" % instance.name |
5827 |
|
5828 |
|
5829 |
def _CreateDisks(lu, instance, to_skip=None, target_node=None): |
5830 |
"""Create all disks for an instance.
|
5831 |
|
5832 |
This abstracts away some work from AddInstance.
|
5833 |
|
5834 |
@type lu: L{LogicalUnit}
|
5835 |
@param lu: the logical unit on whose behalf we execute
|
5836 |
@type instance: L{objects.Instance}
|
5837 |
@param instance: the instance whose disks we should create
|
5838 |
@type to_skip: list
|
5839 |
@param to_skip: list of indices to skip
|
5840 |
@type target_node: string
|
5841 |
@param target_node: if passed, overrides the target node for creation
|
5842 |
@rtype: boolean
|
5843 |
@return: the success of the creation
|
5844 |
|
5845 |
"""
|
5846 |
info = _GetInstanceInfoText(instance) |
5847 |
if target_node is None: |
5848 |
pnode = instance.primary_node |
5849 |
all_nodes = instance.all_nodes |
5850 |
else:
|
5851 |
pnode = target_node |
5852 |
all_nodes = [pnode] |
5853 |
|
5854 |
if instance.disk_template == constants.DT_FILE:
|
5855 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
5856 |
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) |
5857 |
|
5858 |
result.Raise("Failed to create directory '%s' on"
|
5859 |
" node %s" % (file_storage_dir, pnode))
|
5860 |
|
5861 |
# Note: this needs to be kept in sync with adding of disks in
|
5862 |
# LUSetInstanceParams
|
5863 |
for idx, device in enumerate(instance.disks): |
5864 |
if to_skip and idx in to_skip: |
5865 |
continue
|
5866 |
logging.info("Creating volume %s for instance %s",
|
5867 |
device.iv_name, instance.name) |
5868 |
#HARDCODE
|
5869 |
for node in all_nodes: |
5870 |
f_create = node == pnode |
5871 |
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create) |
5872 |
|
5873 |
|
5874 |
def _RemoveDisks(lu, instance, target_node=None): |
5875 |
"""Remove all disks for an instance.
|
5876 |
|
5877 |
This abstracts away some work from `AddInstance()` and
|
5878 |
`RemoveInstance()`. Note that in case some of the devices couldn't
|
5879 |
be removed, the removal will continue with the other ones (compare
|
5880 |
with `_CreateDisks()`).
|
5881 |
|
5882 |
@type lu: L{LogicalUnit}
|
5883 |
@param lu: the logical unit on whose behalf we execute
|
5884 |
@type instance: L{objects.Instance}
|
5885 |
@param instance: the instance whose disks we should remove
|
5886 |
@type target_node: string
|
5887 |
@param target_node: used to override the node on which to remove the disks
|
5888 |
@rtype: boolean
|
5889 |
@return: the success of the removal
|
5890 |
|
5891 |
"""
|
5892 |
logging.info("Removing block devices for instance %s", instance.name)
|
5893 |
|
5894 |
all_result = True
|
5895 |
for device in instance.disks: |
5896 |
if target_node:
|
5897 |
edata = [(target_node, device)] |
5898 |
else:
|
5899 |
edata = device.ComputeNodeTree(instance.primary_node) |
5900 |
for node, disk in edata: |
5901 |
lu.cfg.SetDiskID(disk, node) |
5902 |
msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg |
5903 |
if msg:
|
5904 |
lu.LogWarning("Could not remove block device %s on node %s,"
|
5905 |
" continuing anyway: %s", device.iv_name, node, msg)
|
5906 |
all_result = False
|
5907 |
|
5908 |
if instance.disk_template == constants.DT_FILE:
|
5909 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
5910 |
if target_node:
|
5911 |
tgt = target_node |
5912 |
else:
|
5913 |
tgt = instance.primary_node |
5914 |
result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) |
5915 |
if result.fail_msg:
|
5916 |
lu.LogWarning("Could not remove directory '%s' on node %s: %s",
|
5917 |
file_storage_dir, instance.primary_node, result.fail_msg) |
5918 |
all_result = False
|
5919 |
|
5920 |
return all_result
|
5921 |
|
5922 |
|
5923 |
def _ComputeDiskSize(disk_template, disks): |
5924 |
"""Compute disk size requirements in the volume group
|
5925 |
|
5926 |
"""
|
5927 |
# Required free disk space as a function of disk and swap space
|
5928 |
req_size_dict = { |
5929 |
constants.DT_DISKLESS: None,
|
5930 |
constants.DT_PLAIN: sum(d["size"] for d in disks), |
5931 |
# 128 MB are added for drbd metadata for each disk
|
5932 |
constants.DT_DRBD8: sum(d["size"] + 128 for d in disks), |
5933 |
constants.DT_FILE: None,
|
5934 |
} |
5935 |
|
5936 |
if disk_template not in req_size_dict: |
5937 |
raise errors.ProgrammerError("Disk template '%s' size requirement" |
5938 |
" is unknown" % disk_template)
|
5939 |
|
5940 |
return req_size_dict[disk_template]
|
5941 |
|
5942 |
|
5943 |
def _CheckHVParams(lu, nodenames, hvname, hvparams): |
5944 |
"""Hypervisor parameter validation.
|
5945 |
|
5946 |
This function abstract the hypervisor parameter validation to be
|
5947 |
used in both instance create and instance modify.
|
5948 |
|
5949 |
@type lu: L{LogicalUnit}
|
5950 |
@param lu: the logical unit for which we check
|
5951 |
@type nodenames: list
|
5952 |
@param nodenames: the list of nodes on which we should check
|
5953 |
@type hvname: string
|
5954 |
@param hvname: the name of the hypervisor we should use
|
5955 |
@type hvparams: dict
|
5956 |
@param hvparams: the parameters which we need to check
|
5957 |
@raise errors.OpPrereqError: if the parameters are not valid
|
5958 |
|
5959 |
"""
|
5960 |
hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, |
5961 |
hvname, |
5962 |
hvparams) |
5963 |
for node in nodenames: |
5964 |
info = hvinfo[node] |
5965 |
if info.offline:
|
5966 |
continue
|
5967 |
info.Raise("Hypervisor parameter validation failed on node %s" % node)
|
5968 |
|
5969 |
|
5970 |
class LUCreateInstance(LogicalUnit): |
5971 |
"""Create an instance.
|
5972 |
|
5973 |
"""
|
5974 |
HPATH = "instance-add"
|
5975 |
HTYPE = constants.HTYPE_INSTANCE |
5976 |
_OP_REQP = ["instance_name", "disks", "disk_template", |
5977 |
"mode", "start", |
5978 |
"wait_for_sync", "ip_check", "nics", |
5979 |
"hvparams", "beparams"] |
5980 |
REQ_BGL = False
|
5981 |
|
5982 |
def CheckArguments(self): |
5983 |
"""Check arguments.
|
5984 |
|
5985 |
"""
|
5986 |
# set optional parameters to none if they don't exist
|
5987 |
for attr in ["pnode", "snode", "iallocator", "hypervisor"]: |
5988 |
if not hasattr(self.op, attr): |
5989 |
setattr(self.op, attr, None) |
5990 |
|
5991 |
# do not require name_check to ease forward/backward compatibility
|
5992 |
# for tools
|
5993 |
if not hasattr(self.op, "name_check"): |
5994 |
self.op.name_check = True |
5995 |
if not hasattr(self.op, "no_install"): |
5996 |
self.op.no_install = False |
5997 |
if self.op.no_install and self.op.start: |
5998 |
self.LogInfo("No-installation mode selected, disabling startup") |
5999 |
self.op.start = False |
6000 |
# validate/normalize the instance name
|
6001 |
self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name) |
6002 |
if self.op.ip_check and not self.op.name_check: |
6003 |
# TODO: make the ip check more flexible and not depend on the name check
|
6004 |
raise errors.OpPrereqError("Cannot do ip checks without a name check", |
6005 |
errors.ECODE_INVAL) |
6006 |
if (self.op.disk_template == constants.DT_FILE and |
6007 |
not constants.ENABLE_FILE_STORAGE):
|
6008 |
raise errors.OpPrereqError("File storage disabled at configure time", |
6009 |
errors.ECODE_INVAL) |
6010 |
# check disk information: either all adopt, or no adopt
|
6011 |
has_adopt = has_no_adopt = False
|
6012 |
for disk in self.op.disks: |
6013 |
if "adopt" in disk: |
6014 |
has_adopt = True
|
6015 |
else:
|
6016 |
has_no_adopt = True
|
6017 |
if has_adopt and has_no_adopt: |
6018 |
raise errors.OpPrereqError("Either all disks are adopted or none is", |
6019 |
errors.ECODE_INVAL) |
6020 |
if has_adopt:
|
6021 |
if self.op.disk_template != constants.DT_PLAIN: |
6022 |
raise errors.OpPrereqError("Disk adoption is only supported for the" |
6023 |
" 'plain' disk template",
|
6024 |
errors.ECODE_INVAL) |
6025 |
if self.op.iallocator is not None: |
6026 |
raise errors.OpPrereqError("Disk adoption not allowed with an" |
6027 |
" iallocator script", errors.ECODE_INVAL)
|
6028 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6029 |
raise errors.OpPrereqError("Disk adoption not allowed for" |
6030 |
" instance import", errors.ECODE_INVAL)
|
6031 |
|
6032 |
self.adopt_disks = has_adopt
|
6033 |
|
6034 |
# verify creation mode
|
6035 |
if self.op.mode not in (constants.INSTANCE_CREATE, |
6036 |
constants.INSTANCE_IMPORT): |
6037 |
raise errors.OpPrereqError("Invalid instance creation mode '%s'" % |
6038 |
self.op.mode, errors.ECODE_INVAL)
|
6039 |
|
6040 |
# disk template
|
6041 |
_CheckDiskTemplate(self.op.disk_template)
|
6042 |
|
6043 |
# instance name verification
|
6044 |
if self.op.name_check: |
6045 |
self.hostname1 = utils.GetHostInfo(self.op.instance_name) |
6046 |
self.op.instance_name = self.hostname1.name |
6047 |
# used in CheckPrereq for ip ping check
|
6048 |
self.check_ip = self.hostname1.ip |
6049 |
else:
|
6050 |
self.check_ip = None |
6051 |
|
6052 |
# file storage checks
|
6053 |
if (self.op.file_driver and |
6054 |
not self.op.file_driver in constants.FILE_DRIVER): |
6055 |
raise errors.OpPrereqError("Invalid file driver name '%s'" % |
6056 |
self.op.file_driver, errors.ECODE_INVAL)
|
6057 |
|
6058 |
if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir): |
6059 |
raise errors.OpPrereqError("File storage directory path not absolute", |
6060 |
errors.ECODE_INVAL) |
6061 |
|
6062 |
### Node/iallocator related checks
|
6063 |
if [self.op.iallocator, self.op.pnode].count(None) != 1: |
6064 |
raise errors.OpPrereqError("One and only one of iallocator and primary" |
6065 |
" node must be given",
|
6066 |
errors.ECODE_INVAL) |
6067 |
|
6068 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6069 |
# On import force_variant must be True, because if we forced it at
|
6070 |
# initial install, our only chance when importing it back is that it
|
6071 |
# works again!
|
6072 |
self.op.force_variant = True |
6073 |
|
6074 |
if self.op.no_install: |
6075 |
self.LogInfo("No-installation mode has no effect during import") |
6076 |
|
6077 |
else: # INSTANCE_CREATE |
6078 |
if getattr(self.op, "os_type", None) is None: |
6079 |
raise errors.OpPrereqError("No guest OS specified", |
6080 |
errors.ECODE_INVAL) |
6081 |
self.op.force_variant = getattr(self.op, "force_variant", False) |
6082 |
|
6083 |
def ExpandNames(self): |
6084 |
"""ExpandNames for CreateInstance.
|
6085 |
|
6086 |
Figure out the right locks for instance creation.
|
6087 |
|
6088 |
"""
|
6089 |
self.needed_locks = {}
|
6090 |
|
6091 |
# cheap checks, mostly valid constants given
|
6092 |
|
6093 |
if self.op.hypervisor is None: |
6094 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
6095 |
|
6096 |
cluster = self.cfg.GetClusterInfo()
|
6097 |
enabled_hvs = cluster.enabled_hypervisors |
6098 |
if self.op.hypervisor not in enabled_hvs: |
6099 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
6100 |
" cluster (%s)" % (self.op.hypervisor, |
6101 |
",".join(enabled_hvs)),
|
6102 |
errors.ECODE_STATE) |
6103 |
|
6104 |
# check hypervisor parameter syntax (locally)
|
6105 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
|
6106 |
filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
|
6107 |
self.op.hvparams)
|
6108 |
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
|
6109 |
hv_type.CheckParameterSyntax(filled_hvp) |
6110 |
self.hv_full = filled_hvp
|
6111 |
# check that we don't specify global parameters on an instance
|
6112 |
_CheckGlobalHvParams(self.op.hvparams)
|
6113 |
|
6114 |
# fill and remember the beparams dict
|
6115 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
6116 |
self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
|
6117 |
self.op.beparams)
|
6118 |
|
6119 |
#### instance parameters check
|
6120 |
|
6121 |
instance_name = self.op.instance_name
|
6122 |
# this is just a preventive check, but someone might still add this
|
6123 |
# instance in the meantime, and creation will fail at lock-add time
|
6124 |
if instance_name in self.cfg.GetInstanceList(): |
6125 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
6126 |
instance_name, errors.ECODE_EXISTS) |
6127 |
|
6128 |
self.add_locks[locking.LEVEL_INSTANCE] = instance_name
|
6129 |
|
6130 |
# NIC buildup
|
6131 |
self.nics = []
|
6132 |
for idx, nic in enumerate(self.op.nics): |
6133 |
nic_mode_req = nic.get("mode", None) |
6134 |
nic_mode = nic_mode_req |
6135 |
if nic_mode is None: |
6136 |
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] |
6137 |
|
6138 |
# in routed mode, for the first nic, the default ip is 'auto'
|
6139 |
if nic_mode == constants.NIC_MODE_ROUTED and idx == 0: |
6140 |
default_ip_mode = constants.VALUE_AUTO |
6141 |
else:
|
6142 |
default_ip_mode = constants.VALUE_NONE |
6143 |
|
6144 |
# ip validity checks
|
6145 |
ip = nic.get("ip", default_ip_mode)
|
6146 |
if ip is None or ip.lower() == constants.VALUE_NONE: |
6147 |
nic_ip = None
|
6148 |
elif ip.lower() == constants.VALUE_AUTO:
|
6149 |
if not self.op.name_check: |
6150 |
raise errors.OpPrereqError("IP address set to auto but name checks" |
6151 |
" have been skipped. Aborting.",
|
6152 |
errors.ECODE_INVAL) |
6153 |
nic_ip = self.hostname1.ip
|
6154 |
else:
|
6155 |
if not utils.IsValidIP(ip): |
6156 |
raise errors.OpPrereqError("Given IP address '%s' doesn't look" |
6157 |
" like a valid IP" % ip,
|
6158 |
errors.ECODE_INVAL) |
6159 |
nic_ip = ip |
6160 |
|
6161 |
# TODO: check the ip address for uniqueness
|
6162 |
if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: |
6163 |
raise errors.OpPrereqError("Routed nic mode requires an ip address", |
6164 |
errors.ECODE_INVAL) |
6165 |
|
6166 |
# MAC address verification
|
6167 |
mac = nic.get("mac", constants.VALUE_AUTO)
|
6168 |
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
6169 |
mac = utils.NormalizeAndValidateMac(mac) |
6170 |
|
6171 |
try:
|
6172 |
self.cfg.ReserveMAC(mac, self.proc.GetECId()) |
6173 |
except errors.ReservationError:
|
6174 |
raise errors.OpPrereqError("MAC address %s already in use" |
6175 |
" in cluster" % mac,
|
6176 |
errors.ECODE_NOTUNIQUE) |
6177 |
|
6178 |
# bridge verification
|
6179 |
bridge = nic.get("bridge", None) |
6180 |
link = nic.get("link", None) |
6181 |
if bridge and link: |
6182 |
raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" |
6183 |
" at the same time", errors.ECODE_INVAL)
|
6184 |
elif bridge and nic_mode == constants.NIC_MODE_ROUTED: |
6185 |
raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic", |
6186 |
errors.ECODE_INVAL) |
6187 |
elif bridge:
|
6188 |
link = bridge |
6189 |
|
6190 |
nicparams = {} |
6191 |
if nic_mode_req:
|
6192 |
nicparams[constants.NIC_MODE] = nic_mode_req |
6193 |
if link:
|
6194 |
nicparams[constants.NIC_LINK] = link |
6195 |
|
6196 |
check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT], |
6197 |
nicparams) |
6198 |
objects.NIC.CheckParameterSyntax(check_params) |
6199 |
self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
|
6200 |
|
6201 |
# disk checks/pre-build
|
6202 |
self.disks = []
|
6203 |
for disk in self.op.disks: |
6204 |
mode = disk.get("mode", constants.DISK_RDWR)
|
6205 |
if mode not in constants.DISK_ACCESS_SET: |
6206 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % |
6207 |
mode, errors.ECODE_INVAL) |
6208 |
size = disk.get("size", None) |
6209 |
if size is None: |
6210 |
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL) |
6211 |
try:
|
6212 |
size = int(size)
|
6213 |
except (TypeError, ValueError): |
6214 |
raise errors.OpPrereqError("Invalid disk size '%s'" % size, |
6215 |
errors.ECODE_INVAL) |
6216 |
new_disk = {"size": size, "mode": mode} |
6217 |
if "adopt" in disk: |
6218 |
new_disk["adopt"] = disk["adopt"] |
6219 |
self.disks.append(new_disk)
|
6220 |
|
6221 |
if self.op.iallocator: |
6222 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6223 |
else:
|
6224 |
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode) |
6225 |
nodelist = [self.op.pnode]
|
6226 |
if self.op.snode is not None: |
6227 |
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode) |
6228 |
nodelist.append(self.op.snode)
|
6229 |
self.needed_locks[locking.LEVEL_NODE] = nodelist
|
6230 |
|
6231 |
# in case of import lock the source node too
|
6232 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6233 |
src_node = getattr(self.op, "src_node", None) |
6234 |
src_path = getattr(self.op, "src_path", None) |
6235 |
|
6236 |
if src_path is None: |
6237 |
self.op.src_path = src_path = self.op.instance_name |
6238 |
|
6239 |
if src_node is None: |
6240 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6241 |
self.op.src_node = None |
6242 |
if os.path.isabs(src_path):
|
6243 |
raise errors.OpPrereqError("Importing an instance from an absolute" |
6244 |
" path requires a source node option.",
|
6245 |
errors.ECODE_INVAL) |
6246 |
else:
|
6247 |
self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node) |
6248 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
6249 |
self.needed_locks[locking.LEVEL_NODE].append(src_node)
|
6250 |
if not os.path.isabs(src_path): |
6251 |
self.op.src_path = src_path = \
|
6252 |
utils.PathJoin(constants.EXPORT_DIR, src_path) |
6253 |
|
6254 |
def _RunAllocator(self): |
6255 |
"""Run the allocator based on input opcode.
|
6256 |
|
6257 |
"""
|
6258 |
nics = [n.ToDict() for n in self.nics] |
6259 |
ial = IAllocator(self.cfg, self.rpc, |
6260 |
mode=constants.IALLOCATOR_MODE_ALLOC, |
6261 |
name=self.op.instance_name,
|
6262 |
disk_template=self.op.disk_template,
|
6263 |
tags=[], |
6264 |
os=self.op.os_type,
|
6265 |
vcpus=self.be_full[constants.BE_VCPUS],
|
6266 |
mem_size=self.be_full[constants.BE_MEMORY],
|
6267 |
disks=self.disks,
|
6268 |
nics=nics, |
6269 |
hypervisor=self.op.hypervisor,
|
6270 |
) |
6271 |
|
6272 |
ial.Run(self.op.iallocator)
|
6273 |
|
6274 |
if not ial.success: |
6275 |
raise errors.OpPrereqError("Can't compute nodes using" |
6276 |
" iallocator '%s': %s" %
|
6277 |
(self.op.iallocator, ial.info),
|
6278 |
errors.ECODE_NORES) |
6279 |
if len(ial.result) != ial.required_nodes: |
6280 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
6281 |
" of nodes (%s), required %s" %
|
6282 |
(self.op.iallocator, len(ial.result), |
6283 |
ial.required_nodes), errors.ECODE_FAULT) |
6284 |
self.op.pnode = ial.result[0] |
6285 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
6286 |
self.op.instance_name, self.op.iallocator, |
6287 |
utils.CommaJoin(ial.result)) |
6288 |
if ial.required_nodes == 2: |
6289 |
self.op.snode = ial.result[1] |
6290 |
|
6291 |
def BuildHooksEnv(self): |
6292 |
"""Build hooks env.
|
6293 |
|
6294 |
This runs on master, primary and secondary nodes of the instance.
|
6295 |
|
6296 |
"""
|
6297 |
env = { |
6298 |
"ADD_MODE": self.op.mode, |
6299 |
} |
6300 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6301 |
env["SRC_NODE"] = self.op.src_node |
6302 |
env["SRC_PATH"] = self.op.src_path |
6303 |
env["SRC_IMAGES"] = self.src_images |
6304 |
|
6305 |
env.update(_BuildInstanceHookEnv( |
6306 |
name=self.op.instance_name,
|
6307 |
primary_node=self.op.pnode,
|
6308 |
secondary_nodes=self.secondaries,
|
6309 |
status=self.op.start,
|
6310 |
os_type=self.op.os_type,
|
6311 |
memory=self.be_full[constants.BE_MEMORY],
|
6312 |
vcpus=self.be_full[constants.BE_VCPUS],
|
6313 |
nics=_NICListToTuple(self, self.nics), |
6314 |
disk_template=self.op.disk_template,
|
6315 |
disks=[(d["size"], d["mode"]) for d in self.disks], |
6316 |
bep=self.be_full,
|
6317 |
hvp=self.hv_full,
|
6318 |
hypervisor_name=self.op.hypervisor,
|
6319 |
)) |
6320 |
|
6321 |
nl = ([self.cfg.GetMasterNode(), self.op.pnode] + |
6322 |
self.secondaries)
|
6323 |
return env, nl, nl
|
6324 |
|
6325 |
|
6326 |
def CheckPrereq(self): |
6327 |
"""Check prerequisites.
|
6328 |
|
6329 |
"""
|
6330 |
if (not self.cfg.GetVGName() and |
6331 |
self.op.disk_template not in constants.DTS_NOT_LVM): |
6332 |
raise errors.OpPrereqError("Cluster does not support lvm-based" |
6333 |
" instances", errors.ECODE_STATE)
|
6334 |
|
6335 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6336 |
src_node = self.op.src_node
|
6337 |
src_path = self.op.src_path
|
6338 |
|
6339 |
if src_node is None: |
6340 |
locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
|
6341 |
exp_list = self.rpc.call_export_list(locked_nodes)
|
6342 |
found = False
|
6343 |
for node in exp_list: |
6344 |
if exp_list[node].fail_msg:
|
6345 |
continue
|
6346 |
if src_path in exp_list[node].payload: |
6347 |
found = True
|
6348 |
self.op.src_node = src_node = node
|
6349 |
self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
|
6350 |
src_path) |
6351 |
break
|
6352 |
if not found: |
6353 |
raise errors.OpPrereqError("No export found for relative path %s" % |
6354 |
src_path, errors.ECODE_INVAL) |
6355 |
|
6356 |
_CheckNodeOnline(self, src_node)
|
6357 |
result = self.rpc.call_export_info(src_node, src_path)
|
6358 |
result.Raise("No export or invalid export found in dir %s" % src_path)
|
6359 |
|
6360 |
export_info = objects.SerializableConfigParser.Loads(str(result.payload))
|
6361 |
if not export_info.has_section(constants.INISECT_EXP): |
6362 |
raise errors.ProgrammerError("Corrupted export config", |
6363 |
errors.ECODE_ENVIRON) |
6364 |
|
6365 |
ei_version = export_info.get(constants.INISECT_EXP, 'version')
|
6366 |
if (int(ei_version) != constants.EXPORT_VERSION): |
6367 |
raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % |
6368 |
(ei_version, constants.EXPORT_VERSION), |
6369 |
errors.ECODE_ENVIRON) |
6370 |
|
6371 |
# Check that the new instance doesn't have less disks than the export
|
6372 |
instance_disks = len(self.disks) |
6373 |
export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
|
6374 |
if instance_disks < export_disks:
|
6375 |
raise errors.OpPrereqError("Not enough disks to import." |
6376 |
" (instance: %d, export: %d)" %
|
6377 |
(instance_disks, export_disks), |
6378 |
errors.ECODE_INVAL) |
6379 |
|
6380 |
self.op.os_type = export_info.get(constants.INISECT_EXP, 'os') |
6381 |
disk_images = [] |
6382 |
for idx in range(export_disks): |
6383 |
option = 'disk%d_dump' % idx
|
6384 |
if export_info.has_option(constants.INISECT_INS, option):
|
6385 |
# FIXME: are the old os-es, disk sizes, etc. useful?
|
6386 |
export_name = export_info.get(constants.INISECT_INS, option) |
6387 |
image = utils.PathJoin(src_path, export_name) |
6388 |
disk_images.append(image) |
6389 |
else:
|
6390 |
disk_images.append(False)
|
6391 |
|
6392 |
self.src_images = disk_images
|
6393 |
|
6394 |
old_name = export_info.get(constants.INISECT_INS, 'name')
|
6395 |
try:
|
6396 |
exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
|
6397 |
except (TypeError, ValueError), err: |
6398 |
raise errors.OpPrereqError("Invalid export file, nic_count is not" |
6399 |
" an integer: %s" % str(err), |
6400 |
errors.ECODE_STATE) |
6401 |
if self.op.instance_name == old_name: |
6402 |
for idx, nic in enumerate(self.nics): |
6403 |
if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx: |
6404 |
nic_mac_ini = 'nic%d_mac' % idx
|
6405 |
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) |
6406 |
|
6407 |
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
|
6408 |
|
6409 |
# ip ping checks (we use the same ip that was resolved in ExpandNames)
|
6410 |
if self.op.ip_check: |
6411 |
if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): |
6412 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
6413 |
(self.check_ip, self.op.instance_name), |
6414 |
errors.ECODE_NOTUNIQUE) |
6415 |
|
6416 |
#### mac address generation
|
6417 |
# By generating here the mac address both the allocator and the hooks get
|
6418 |
# the real final mac address rather than the 'auto' or 'generate' value.
|
6419 |
# There is a race condition between the generation and the instance object
|
6420 |
# creation, which means that we know the mac is valid now, but we're not
|
6421 |
# sure it will be when we actually add the instance. If things go bad
|
6422 |
# adding the instance will abort because of a duplicate mac, and the
|
6423 |
# creation job will fail.
|
6424 |
for nic in self.nics: |
6425 |
if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
6426 |
nic.mac = self.cfg.GenerateMAC(self.proc.GetECId()) |
6427 |
|
6428 |
#### allocator run
|
6429 |
|
6430 |
if self.op.iallocator is not None: |
6431 |
self._RunAllocator()
|
6432 |
|
6433 |
#### node related checks
|
6434 |
|
6435 |
# check primary node
|
6436 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode) |
6437 |
assert self.pnode is not None, \ |
6438 |
"Cannot retrieve locked node %s" % self.op.pnode |
6439 |
if pnode.offline:
|
6440 |
raise errors.OpPrereqError("Cannot use offline primary node '%s'" % |
6441 |
pnode.name, errors.ECODE_STATE) |
6442 |
if pnode.drained:
|
6443 |
raise errors.OpPrereqError("Cannot use drained primary node '%s'" % |
6444 |
pnode.name, errors.ECODE_STATE) |
6445 |
|
6446 |
self.secondaries = []
|
6447 |
|
6448 |
# mirror node verification
|
6449 |
if self.op.disk_template in constants.DTS_NET_MIRROR: |
6450 |
if self.op.snode is None: |
6451 |
raise errors.OpPrereqError("The networked disk templates need" |
6452 |
" a mirror node", errors.ECODE_INVAL)
|
6453 |
if self.op.snode == pnode.name: |
6454 |
raise errors.OpPrereqError("The secondary node cannot be the" |
6455 |
" primary node.", errors.ECODE_INVAL)
|
6456 |
_CheckNodeOnline(self, self.op.snode) |
6457 |
_CheckNodeNotDrained(self, self.op.snode) |
6458 |
self.secondaries.append(self.op.snode) |
6459 |
|
6460 |
nodenames = [pnode.name] + self.secondaries
|
6461 |
|
6462 |
req_size = _ComputeDiskSize(self.op.disk_template,
|
6463 |
self.disks)
|
6464 |
|
6465 |
# Check lv size requirements, if not adopting
|
6466 |
if req_size is not None and not self.adopt_disks: |
6467 |
_CheckNodesFreeDisk(self, nodenames, req_size)
|
6468 |
|
6469 |
if self.adopt_disks: # instead, we must check the adoption data |
6470 |
all_lvs = set([i["adopt"] for i in self.disks]) |
6471 |
if len(all_lvs) != len(self.disks): |
6472 |
raise errors.OpPrereqError("Duplicate volume names given for adoption", |
6473 |
errors.ECODE_INVAL) |
6474 |
for lv_name in all_lvs: |
6475 |
try:
|
6476 |
self.cfg.ReserveLV(lv_name, self.proc.GetECId()) |
6477 |
except errors.ReservationError:
|
6478 |
raise errors.OpPrereqError("LV named %s used by another instance" % |
6479 |
lv_name, errors.ECODE_NOTUNIQUE) |
6480 |
|
6481 |
node_lvs = self.rpc.call_lv_list([pnode.name],
|
6482 |
self.cfg.GetVGName())[pnode.name]
|
6483 |
node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
|
6484 |
node_lvs = node_lvs.payload |
6485 |
delta = all_lvs.difference(node_lvs.keys()) |
6486 |
if delta:
|
6487 |
raise errors.OpPrereqError("Missing logical volume(s): %s" % |
6488 |
utils.CommaJoin(delta), |
6489 |
errors.ECODE_INVAL) |
6490 |
online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] |
6491 |
if online_lvs:
|
6492 |
raise errors.OpPrereqError("Online logical volumes found, cannot" |
6493 |
" adopt: %s" % utils.CommaJoin(online_lvs),
|
6494 |
errors.ECODE_STATE) |
6495 |
# update the size of disk based on what is found
|
6496 |
for dsk in self.disks: |
6497 |
dsk["size"] = int(float(node_lvs[dsk["adopt"]][0])) |
6498 |
|
6499 |
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) |
6500 |
|
6501 |
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) |
6502 |
|
6503 |
_CheckNicsBridgesExist(self, self.nics, self.pnode.name) |
6504 |
|
6505 |
# memory check on primary node
|
6506 |
if self.op.start: |
6507 |
_CheckNodeFreeMemory(self, self.pnode.name, |
6508 |
"creating instance %s" % self.op.instance_name, |
6509 |
self.be_full[constants.BE_MEMORY],
|
6510 |
self.op.hypervisor)
|
6511 |
|
6512 |
self.dry_run_result = list(nodenames) |
6513 |
|
6514 |
def Exec(self, feedback_fn): |
6515 |
"""Create and add the instance to the cluster.
|
6516 |
|
6517 |
"""
|
6518 |
instance = self.op.instance_name
|
6519 |
pnode_name = self.pnode.name
|
6520 |
|
6521 |
ht_kind = self.op.hypervisor
|
6522 |
if ht_kind in constants.HTS_REQ_PORT: |
6523 |
network_port = self.cfg.AllocatePort()
|
6524 |
else:
|
6525 |
network_port = None
|
6526 |
|
6527 |
##if self.op.vnc_bind_address is None:
|
6528 |
## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
|
6529 |
|
6530 |
# this is needed because os.path.join does not accept None arguments
|
6531 |
if self.op.file_storage_dir is None: |
6532 |
string_file_storage_dir = ""
|
6533 |
else:
|
6534 |
string_file_storage_dir = self.op.file_storage_dir
|
6535 |
|
6536 |
# build the full file storage dir path
|
6537 |
file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
|
6538 |
string_file_storage_dir, instance) |
6539 |
|
6540 |
|
6541 |
disks = _GenerateDiskTemplate(self,
|
6542 |
self.op.disk_template,
|
6543 |
instance, pnode_name, |
6544 |
self.secondaries,
|
6545 |
self.disks,
|
6546 |
file_storage_dir, |
6547 |
self.op.file_driver,
|
6548 |
0)
|
6549 |
|
6550 |
iobj = objects.Instance(name=instance, os=self.op.os_type,
|
6551 |
primary_node=pnode_name, |
6552 |
nics=self.nics, disks=disks,
|
6553 |
disk_template=self.op.disk_template,
|
6554 |
admin_up=False,
|
6555 |
network_port=network_port, |
6556 |
beparams=self.op.beparams,
|
6557 |
hvparams=self.op.hvparams,
|
6558 |
hypervisor=self.op.hypervisor,
|
6559 |
) |
6560 |
|
6561 |
if self.adopt_disks: |
6562 |
# rename LVs to the newly-generated names; we need to construct
|
6563 |
# 'fake' LV disks with the old data, plus the new unique_id
|
6564 |
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] |
6565 |
rename_to = [] |
6566 |
for t_dsk, a_dsk in zip (tmp_disks, self.disks): |
6567 |
rename_to.append(t_dsk.logical_id) |
6568 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"]) |
6569 |
self.cfg.SetDiskID(t_dsk, pnode_name)
|
6570 |
result = self.rpc.call_blockdev_rename(pnode_name,
|
6571 |
zip(tmp_disks, rename_to))
|
6572 |
result.Raise("Failed to rename adoped LVs")
|
6573 |
else:
|
6574 |
feedback_fn("* creating instance disks...")
|
6575 |
try:
|
6576 |
_CreateDisks(self, iobj)
|
6577 |
except errors.OpExecError:
|
6578 |
self.LogWarning("Device creation failed, reverting...") |
6579 |
try:
|
6580 |
_RemoveDisks(self, iobj)
|
6581 |
finally:
|
6582 |
self.cfg.ReleaseDRBDMinors(instance)
|
6583 |
raise
|
6584 |
|
6585 |
feedback_fn("adding instance %s to cluster config" % instance)
|
6586 |
|
6587 |
self.cfg.AddInstance(iobj, self.proc.GetECId()) |
6588 |
|
6589 |
# Declare that we don't want to remove the instance lock anymore, as we've
|
6590 |
# added the instance to the config
|
6591 |
del self.remove_locks[locking.LEVEL_INSTANCE] |
6592 |
# Unlock all the nodes
|
6593 |
if self.op.mode == constants.INSTANCE_IMPORT: |
6594 |
nodes_keep = [self.op.src_node]
|
6595 |
nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE] |
6596 |
if node != self.op.src_node] |
6597 |
self.context.glm.release(locking.LEVEL_NODE, nodes_release)
|
6598 |
self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
|
6599 |
else:
|
6600 |
self.context.glm.release(locking.LEVEL_NODE)
|
6601 |
del self.acquired_locks[locking.LEVEL_NODE] |
6602 |
|
6603 |
if self.op.wait_for_sync: |
6604 |
disk_abort = not _WaitForSync(self, iobj) |
6605 |
elif iobj.disk_template in constants.DTS_NET_MIRROR: |
6606 |
# make sure the disks are not degraded (still sync-ing is ok)
|
6607 |
time.sleep(15)
|
6608 |
feedback_fn("* checking mirrors status")
|
6609 |
disk_abort = not _WaitForSync(self, iobj, oneshot=True) |
6610 |
else:
|
6611 |
disk_abort = False
|
6612 |
|
6613 |
if disk_abort:
|
6614 |
_RemoveDisks(self, iobj)
|
6615 |
self.cfg.RemoveInstance(iobj.name)
|
6616 |
# Make sure the instance lock gets removed
|
6617 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
|
6618 |
raise errors.OpExecError("There are some degraded disks for" |
6619 |
" this instance")
|
6620 |
|
6621 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
6622 |
if self.op.mode == constants.INSTANCE_CREATE: |
6623 |
if not self.op.no_install: |
6624 |
feedback_fn("* running the instance OS create scripts...")
|
6625 |
# FIXME: pass debug option from opcode to backend
|
6626 |
result = self.rpc.call_instance_os_add(pnode_name, iobj, False, |
6627 |
self.op.debug_level)
|
6628 |
result.Raise("Could not add os for instance %s"
|
6629 |
" on node %s" % (instance, pnode_name))
|
6630 |
|
6631 |
elif self.op.mode == constants.INSTANCE_IMPORT: |
6632 |
feedback_fn("* running the instance OS import scripts...")
|
6633 |
src_node = self.op.src_node
|
6634 |
src_images = self.src_images
|
6635 |
cluster_name = self.cfg.GetClusterName()
|
6636 |
# FIXME: pass debug option from opcode to backend
|
6637 |
import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
|
6638 |
src_node, src_images, |
6639 |
cluster_name, |
6640 |
self.op.debug_level)
|
6641 |
msg = import_result.fail_msg |
6642 |
if msg:
|
6643 |
self.LogWarning("Error while importing the disk images for instance" |
6644 |
" %s on node %s: %s" % (instance, pnode_name, msg))
|
6645 |
else:
|
6646 |
# also checked in the prereq part
|
6647 |
raise errors.ProgrammerError("Unknown OS initialization mode '%s'" |
6648 |
% self.op.mode)
|
6649 |
|
6650 |
if self.op.start: |
6651 |
iobj.admin_up = True
|
6652 |
self.cfg.Update(iobj, feedback_fn)
|
6653 |
logging.info("Starting instance %s on node %s", instance, pnode_name)
|
6654 |
feedback_fn("* starting instance...")
|
6655 |
result = self.rpc.call_instance_start(pnode_name, iobj, None, None) |
6656 |
result.Raise("Could not start instance")
|
6657 |
|
6658 |
return list(iobj.all_nodes) |
6659 |
|
6660 |
|
6661 |
class LUConnectConsole(NoHooksLU): |
6662 |
"""Connect to an instance's console.
|
6663 |
|
6664 |
This is somewhat special in that it returns the command line that
|
6665 |
you need to run on the master node in order to connect to the
|
6666 |
console.
|
6667 |
|
6668 |
"""
|
6669 |
_OP_REQP = ["instance_name"]
|
6670 |
REQ_BGL = False
|
6671 |
|
6672 |
def ExpandNames(self): |
6673 |
self._ExpandAndLockInstance()
|
6674 |
|
6675 |
def CheckPrereq(self): |
6676 |
"""Check prerequisites.
|
6677 |
|
6678 |
This checks that the instance is in the cluster.
|
6679 |
|
6680 |
"""
|
6681 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6682 |
assert self.instance is not None, \ |
6683 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6684 |
_CheckNodeOnline(self, self.instance.primary_node) |
6685 |
|
6686 |
def Exec(self, feedback_fn): |
6687 |
"""Connect to the console of an instance
|
6688 |
|
6689 |
"""
|
6690 |
instance = self.instance
|
6691 |
node = instance.primary_node |
6692 |
|
6693 |
node_insts = self.rpc.call_instance_list([node],
|
6694 |
[instance.hypervisor])[node] |
6695 |
node_insts.Raise("Can't get node information from %s" % node)
|
6696 |
|
6697 |
if instance.name not in node_insts.payload: |
6698 |
raise errors.OpExecError("Instance %s is not running." % instance.name) |
6699 |
|
6700 |
logging.debug("Connecting to console of %s on %s", instance.name, node)
|
6701 |
|
6702 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
6703 |
cluster = self.cfg.GetClusterInfo()
|
6704 |
# beparams and hvparams are passed separately, to avoid editing the
|
6705 |
# instance and then saving the defaults in the instance itself.
|
6706 |
hvparams = cluster.FillHV(instance) |
6707 |
beparams = cluster.FillBE(instance) |
6708 |
console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams) |
6709 |
|
6710 |
# build ssh cmdline
|
6711 |
return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True) |
6712 |
|
6713 |
|
6714 |
class LUReplaceDisks(LogicalUnit): |
6715 |
"""Replace the disks of an instance.
|
6716 |
|
6717 |
"""
|
6718 |
HPATH = "mirrors-replace"
|
6719 |
HTYPE = constants.HTYPE_INSTANCE |
6720 |
_OP_REQP = ["instance_name", "mode", "disks"] |
6721 |
REQ_BGL = False
|
6722 |
|
6723 |
def CheckArguments(self): |
6724 |
if not hasattr(self.op, "remote_node"): |
6725 |
self.op.remote_node = None |
6726 |
if not hasattr(self.op, "iallocator"): |
6727 |
self.op.iallocator = None |
6728 |
if not hasattr(self.op, "early_release"): |
6729 |
self.op.early_release = False |
6730 |
|
6731 |
TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node, |
6732 |
self.op.iallocator)
|
6733 |
|
6734 |
def ExpandNames(self): |
6735 |
self._ExpandAndLockInstance()
|
6736 |
|
6737 |
if self.op.iallocator is not None: |
6738 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6739 |
|
6740 |
elif self.op.remote_node is not None: |
6741 |
remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
6742 |
self.op.remote_node = remote_node
|
6743 |
|
6744 |
# Warning: do not remove the locking of the new secondary here
|
6745 |
# unless DRBD8.AddChildren is changed to work in parallel;
|
6746 |
# currently it doesn't since parallel invocations of
|
6747 |
# FindUnusedMinor will conflict
|
6748 |
self.needed_locks[locking.LEVEL_NODE] = [remote_node]
|
6749 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
6750 |
|
6751 |
else:
|
6752 |
self.needed_locks[locking.LEVEL_NODE] = []
|
6753 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
6754 |
|
6755 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
6756 |
self.op.iallocator, self.op.remote_node, |
6757 |
self.op.disks, False, self.op.early_release) |
6758 |
|
6759 |
self.tasklets = [self.replacer] |
6760 |
|
6761 |
def DeclareLocks(self, level): |
6762 |
# If we're not already locking all nodes in the set we have to declare the
|
6763 |
# instance's primary/secondary nodes.
|
6764 |
if (level == locking.LEVEL_NODE and |
6765 |
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET): |
6766 |
self._LockInstancesNodes()
|
6767 |
|
6768 |
def BuildHooksEnv(self): |
6769 |
"""Build hooks env.
|
6770 |
|
6771 |
This runs on the master, the primary and all the secondaries.
|
6772 |
|
6773 |
"""
|
6774 |
instance = self.replacer.instance
|
6775 |
env = { |
6776 |
"MODE": self.op.mode, |
6777 |
"NEW_SECONDARY": self.op.remote_node, |
6778 |
"OLD_SECONDARY": instance.secondary_nodes[0], |
6779 |
} |
6780 |
env.update(_BuildInstanceHookEnvByObject(self, instance))
|
6781 |
nl = [ |
6782 |
self.cfg.GetMasterNode(),
|
6783 |
instance.primary_node, |
6784 |
] |
6785 |
if self.op.remote_node is not None: |
6786 |
nl.append(self.op.remote_node)
|
6787 |
return env, nl, nl
|
6788 |
|
6789 |
|
6790 |
class LUEvacuateNode(LogicalUnit): |
6791 |
"""Relocate the secondary instances from a node.
|
6792 |
|
6793 |
"""
|
6794 |
HPATH = "node-evacuate"
|
6795 |
HTYPE = constants.HTYPE_NODE |
6796 |
_OP_REQP = ["node_name"]
|
6797 |
REQ_BGL = False
|
6798 |
|
6799 |
def CheckArguments(self): |
6800 |
if not hasattr(self.op, "remote_node"): |
6801 |
self.op.remote_node = None |
6802 |
if not hasattr(self.op, "iallocator"): |
6803 |
self.op.iallocator = None |
6804 |
if not hasattr(self.op, "early_release"): |
6805 |
self.op.early_release = False |
6806 |
|
6807 |
TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG, |
6808 |
self.op.remote_node,
|
6809 |
self.op.iallocator)
|
6810 |
|
6811 |
def ExpandNames(self): |
6812 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
6813 |
|
6814 |
self.needed_locks = {}
|
6815 |
|
6816 |
# Declare node locks
|
6817 |
if self.op.iallocator is not None: |
6818 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6819 |
|
6820 |
elif self.op.remote_node is not None: |
6821 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
6822 |
|
6823 |
# Warning: do not remove the locking of the new secondary here
|
6824 |
# unless DRBD8.AddChildren is changed to work in parallel;
|
6825 |
# currently it doesn't since parallel invocations of
|
6826 |
# FindUnusedMinor will conflict
|
6827 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node] |
6828 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
6829 |
|
6830 |
else:
|
6831 |
raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL) |
6832 |
|
6833 |
# Create tasklets for replacing disks for all secondary instances on this
|
6834 |
# node
|
6835 |
names = [] |
6836 |
tasklets = [] |
6837 |
|
6838 |
for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name): |
6839 |
logging.debug("Replacing disks for instance %s", inst.name)
|
6840 |
names.append(inst.name) |
6841 |
|
6842 |
replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
|
6843 |
self.op.iallocator, self.op.remote_node, [], |
6844 |
True, self.op.early_release) |
6845 |
tasklets.append(replacer) |
6846 |
|
6847 |
self.tasklets = tasklets
|
6848 |
self.instance_names = names
|
6849 |
|
6850 |
# Declare instance locks
|
6851 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names |
6852 |
|
6853 |
def DeclareLocks(self, level): |
6854 |
# If we're not already locking all nodes in the set we have to declare the
|
6855 |
# instance's primary/secondary nodes.
|
6856 |
if (level == locking.LEVEL_NODE and |
6857 |
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET): |
6858 |
self._LockInstancesNodes()
|
6859 |
|
6860 |
def BuildHooksEnv(self): |
6861 |
"""Build hooks env.
|
6862 |
|
6863 |
This runs on the master, the primary and all the secondaries.
|
6864 |
|
6865 |
"""
|
6866 |
env = { |
6867 |
"NODE_NAME": self.op.node_name, |
6868 |
} |
6869 |
|
6870 |
nl = [self.cfg.GetMasterNode()]
|
6871 |
|
6872 |
if self.op.remote_node is not None: |
6873 |
env["NEW_SECONDARY"] = self.op.remote_node |
6874 |
nl.append(self.op.remote_node)
|
6875 |
|
6876 |
return (env, nl, nl)
|
6877 |
|
6878 |
|
6879 |
class TLReplaceDisks(Tasklet): |
6880 |
"""Replaces disks for an instance.
|
6881 |
|
6882 |
Note: Locking is not within the scope of this class.
|
6883 |
|
6884 |
"""
|
6885 |
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node, |
6886 |
disks, delay_iallocator, early_release): |
6887 |
"""Initializes this class.
|
6888 |
|
6889 |
"""
|
6890 |
Tasklet.__init__(self, lu)
|
6891 |
|
6892 |
# Parameters
|
6893 |
self.instance_name = instance_name
|
6894 |
self.mode = mode
|
6895 |
self.iallocator_name = iallocator_name
|
6896 |
self.remote_node = remote_node
|
6897 |
self.disks = disks
|
6898 |
self.delay_iallocator = delay_iallocator
|
6899 |
self.early_release = early_release
|
6900 |
|
6901 |
# Runtime data
|
6902 |
self.instance = None |
6903 |
self.new_node = None |
6904 |
self.target_node = None |
6905 |
self.other_node = None |
6906 |
self.remote_node_info = None |
6907 |
self.node_secondary_ip = None |
6908 |
|
6909 |
@staticmethod
|
6910 |
def CheckArguments(mode, remote_node, iallocator): |
6911 |
"""Helper function for users of this class.
|
6912 |
|
6913 |
"""
|
6914 |
# check for valid parameter combination
|
6915 |
if mode == constants.REPLACE_DISK_CHG:
|
6916 |
if remote_node is None and iallocator is None: |
6917 |
raise errors.OpPrereqError("When changing the secondary either an" |
6918 |
" iallocator script must be used or the"
|
6919 |
" new node given", errors.ECODE_INVAL)
|
6920 |
|
6921 |
if remote_node is not None and iallocator is not None: |
6922 |
raise errors.OpPrereqError("Give either the iallocator or the new" |
6923 |
" secondary, not both", errors.ECODE_INVAL)
|
6924 |
|
6925 |
elif remote_node is not None or iallocator is not None: |
6926 |
# Not replacing the secondary
|
6927 |
raise errors.OpPrereqError("The iallocator and new node options can" |
6928 |
" only be used when changing the"
|
6929 |
" secondary node", errors.ECODE_INVAL)
|
6930 |
|
6931 |
@staticmethod
|
6932 |
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): |
6933 |
"""Compute a new secondary node using an IAllocator.
|
6934 |
|
6935 |
"""
|
6936 |
ial = IAllocator(lu.cfg, lu.rpc, |
6937 |
mode=constants.IALLOCATOR_MODE_RELOC, |
6938 |
name=instance_name, |
6939 |
relocate_from=relocate_from) |
6940 |
|
6941 |
ial.Run(iallocator_name) |
6942 |
|
6943 |
if not ial.success: |
6944 |
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':" |
6945 |
" %s" % (iallocator_name, ial.info),
|
6946 |
errors.ECODE_NORES) |
6947 |
|
6948 |
if len(ial.result) != ial.required_nodes: |
6949 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
6950 |
" of nodes (%s), required %s" %
|
6951 |
(iallocator_name, |
6952 |
len(ial.result), ial.required_nodes),
|
6953 |
errors.ECODE_FAULT) |
6954 |
|
6955 |
remote_node_name = ial.result[0]
|
6956 |
|
6957 |
lu.LogInfo("Selected new secondary for instance '%s': %s",
|
6958 |
instance_name, remote_node_name) |
6959 |
|
6960 |
return remote_node_name
|
6961 |
|
6962 |
def _FindFaultyDisks(self, node_name): |
6963 |
return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance, |
6964 |
node_name, True)
|
6965 |
|
6966 |
def CheckPrereq(self): |
6967 |
"""Check prerequisites.
|
6968 |
|
6969 |
This checks that the instance is in the cluster.
|
6970 |
|
6971 |
"""
|
6972 |
self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name) |
6973 |
assert instance is not None, \ |
6974 |
"Cannot retrieve locked instance %s" % self.instance_name |
6975 |
|
6976 |
if instance.disk_template != constants.DT_DRBD8:
|
6977 |
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" |
6978 |
" instances", errors.ECODE_INVAL)
|
6979 |
|
6980 |
if len(instance.secondary_nodes) != 1: |
6981 |
raise errors.OpPrereqError("The instance has a strange layout," |
6982 |
" expected one secondary but found %d" %
|
6983 |
len(instance.secondary_nodes),
|
6984 |
errors.ECODE_FAULT) |
6985 |
|
6986 |
if not self.delay_iallocator: |
6987 |
self._CheckPrereq2()
|
6988 |
|
6989 |
def _CheckPrereq2(self): |
6990 |
"""Check prerequisites, second part.
|
6991 |
|
6992 |
This function should always be part of CheckPrereq. It was separated and is
|
6993 |
now called from Exec because during node evacuation iallocator was only
|
6994 |
called with an unmodified cluster model, not taking planned changes into
|
6995 |
account.
|
6996 |
|
6997 |
"""
|
6998 |
instance = self.instance
|
6999 |
secondary_node = instance.secondary_nodes[0]
|
7000 |
|
7001 |
if self.iallocator_name is None: |
7002 |
remote_node = self.remote_node
|
7003 |
else:
|
7004 |
remote_node = self._RunAllocator(self.lu, self.iallocator_name, |
7005 |
instance.name, instance.secondary_nodes) |
7006 |
|
7007 |
if remote_node is not None: |
7008 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
7009 |
assert self.remote_node_info is not None, \ |
7010 |
"Cannot retrieve locked node %s" % remote_node
|
7011 |
else:
|
7012 |
self.remote_node_info = None |
7013 |
|
7014 |
if remote_node == self.instance.primary_node: |
7015 |
raise errors.OpPrereqError("The specified node is the primary node of" |
7016 |
" the instance.", errors.ECODE_INVAL)
|
7017 |
|
7018 |
if remote_node == secondary_node:
|
7019 |
raise errors.OpPrereqError("The specified node is already the" |
7020 |
" secondary node of the instance.",
|
7021 |
errors.ECODE_INVAL) |
7022 |
|
7023 |
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO, |
7024 |
constants.REPLACE_DISK_CHG): |
7025 |
raise errors.OpPrereqError("Cannot specify disks to be replaced", |
7026 |
errors.ECODE_INVAL) |
7027 |
|
7028 |
if self.mode == constants.REPLACE_DISK_AUTO: |
7029 |
faulty_primary = self._FindFaultyDisks(instance.primary_node)
|
7030 |
faulty_secondary = self._FindFaultyDisks(secondary_node)
|
7031 |
|
7032 |
if faulty_primary and faulty_secondary: |
7033 |
raise errors.OpPrereqError("Instance %s has faulty disks on more than" |
7034 |
" one node and can not be repaired"
|
7035 |
" automatically" % self.instance_name, |
7036 |
errors.ECODE_STATE) |
7037 |
|
7038 |
if faulty_primary:
|
7039 |
self.disks = faulty_primary
|
7040 |
self.target_node = instance.primary_node
|
7041 |
self.other_node = secondary_node
|
7042 |
check_nodes = [self.target_node, self.other_node] |
7043 |
elif faulty_secondary:
|
7044 |
self.disks = faulty_secondary
|
7045 |
self.target_node = secondary_node
|
7046 |
self.other_node = instance.primary_node
|
7047 |
check_nodes = [self.target_node, self.other_node] |
7048 |
else:
|
7049 |
self.disks = []
|
7050 |
check_nodes = [] |
7051 |
|
7052 |
else:
|
7053 |
# Non-automatic modes
|
7054 |
if self.mode == constants.REPLACE_DISK_PRI: |
7055 |
self.target_node = instance.primary_node
|
7056 |
self.other_node = secondary_node
|
7057 |
check_nodes = [self.target_node, self.other_node] |
7058 |
|
7059 |
elif self.mode == constants.REPLACE_DISK_SEC: |
7060 |
self.target_node = secondary_node
|
7061 |
self.other_node = instance.primary_node
|
7062 |
check_nodes = [self.target_node, self.other_node] |
7063 |
|
7064 |
elif self.mode == constants.REPLACE_DISK_CHG: |
7065 |
self.new_node = remote_node
|
7066 |
self.other_node = instance.primary_node
|
7067 |
self.target_node = secondary_node
|
7068 |
check_nodes = [self.new_node, self.other_node] |
7069 |
|
7070 |
_CheckNodeNotDrained(self.lu, remote_node)
|
7071 |
|
7072 |
old_node_info = self.cfg.GetNodeInfo(secondary_node)
|
7073 |
assert old_node_info is not None |
7074 |
if old_node_info.offline and not self.early_release: |
7075 |
# doesn't make sense to delay the release
|
7076 |
self.early_release = True |
7077 |
self.lu.LogInfo("Old secondary %s is offline, automatically enabling" |
7078 |
" early-release mode", secondary_node)
|
7079 |
|
7080 |
else:
|
7081 |
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" % |
7082 |
self.mode)
|
7083 |
|
7084 |
# If not specified all disks should be replaced
|
7085 |
if not self.disks: |
7086 |
self.disks = range(len(self.instance.disks)) |
7087 |
|
7088 |
for node in check_nodes: |
7089 |
_CheckNodeOnline(self.lu, node)
|
7090 |
|
7091 |
# Check whether disks are valid
|
7092 |
for disk_idx in self.disks: |
7093 |
instance.FindDisk(disk_idx) |
7094 |
|
7095 |
# Get secondary node IP addresses
|
7096 |
node_2nd_ip = {} |
7097 |
|
7098 |
for node_name in [self.target_node, self.other_node, self.new_node]: |
7099 |
if node_name is not None: |
7100 |
node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
|
7101 |
|
7102 |
self.node_secondary_ip = node_2nd_ip
|
7103 |
|
7104 |
def Exec(self, feedback_fn): |
7105 |
"""Execute disk replacement.
|
7106 |
|
7107 |
This dispatches the disk replacement to the appropriate handler.
|
7108 |
|
7109 |
"""
|
7110 |
if self.delay_iallocator: |
7111 |
self._CheckPrereq2()
|
7112 |
|
7113 |
if not self.disks: |
7114 |
feedback_fn("No disks need replacement")
|
7115 |
return
|
7116 |
|
7117 |
feedback_fn("Replacing disk(s) %s for %s" %
|
7118 |
(utils.CommaJoin(self.disks), self.instance.name)) |
7119 |
|
7120 |
activate_disks = (not self.instance.admin_up) |
7121 |
|
7122 |
# Activate the instance disks if we're replacing them on a down instance
|
7123 |
if activate_disks:
|
7124 |
_StartInstanceDisks(self.lu, self.instance, True) |
7125 |
|
7126 |
try:
|
7127 |
# Should we replace the secondary node?
|
7128 |
if self.new_node is not None: |
7129 |
fn = self._ExecDrbd8Secondary
|
7130 |
else:
|
7131 |
fn = self._ExecDrbd8DiskOnly
|
7132 |
|
7133 |
return fn(feedback_fn)
|
7134 |
|
7135 |
finally:
|
7136 |
# Deactivate the instance disks if we're replacing them on a
|
7137 |
# down instance
|
7138 |
if activate_disks:
|
7139 |
_SafeShutdownInstanceDisks(self.lu, self.instance) |
7140 |
|
7141 |
def _CheckVolumeGroup(self, nodes): |
7142 |
self.lu.LogInfo("Checking volume groups") |
7143 |
|
7144 |
vgname = self.cfg.GetVGName()
|
7145 |
|
7146 |
# Make sure volume group exists on all involved nodes
|
7147 |
results = self.rpc.call_vg_list(nodes)
|
7148 |
if not results: |
7149 |
raise errors.OpExecError("Can't list volume groups on the nodes") |
7150 |
|
7151 |
for node in nodes: |
7152 |
res = results[node] |
7153 |
res.Raise("Error checking node %s" % node)
|
7154 |
if vgname not in res.payload: |
7155 |
raise errors.OpExecError("Volume group '%s' not found on node %s" % |
7156 |
(vgname, node)) |
7157 |
|
7158 |
def _CheckDisksExistence(self, nodes): |
7159 |
# Check disk existence
|
7160 |
for idx, dev in enumerate(self.instance.disks): |
7161 |
if idx not in self.disks: |
7162 |
continue
|
7163 |
|
7164 |
for node in nodes: |
7165 |
self.lu.LogInfo("Checking disk/%d on %s" % (idx, node)) |
7166 |
self.cfg.SetDiskID(dev, node)
|
7167 |
|
7168 |
result = self.rpc.call_blockdev_find(node, dev)
|
7169 |
|
7170 |
msg = result.fail_msg |
7171 |
if msg or not result.payload: |
7172 |
if not msg: |
7173 |
msg = "disk not found"
|
7174 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
7175 |
(idx, node, msg)) |
7176 |
|
7177 |
def _CheckDisksConsistency(self, node_name, on_primary, ldisk): |
7178 |
for idx, dev in enumerate(self.instance.disks): |
7179 |
if idx not in self.disks: |
7180 |
continue
|
7181 |
|
7182 |
self.lu.LogInfo("Checking disk/%d consistency on node %s" % |
7183 |
(idx, node_name)) |
7184 |
|
7185 |
if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary, |
7186 |
ldisk=ldisk): |
7187 |
raise errors.OpExecError("Node %s has degraded storage, unsafe to" |
7188 |
" replace disks for instance %s" %
|
7189 |
(node_name, self.instance.name))
|
7190 |
|
7191 |
def _CreateNewStorage(self, node_name): |
7192 |
vgname = self.cfg.GetVGName()
|
7193 |
iv_names = {} |
7194 |
|
7195 |
for idx, dev in enumerate(self.instance.disks): |
7196 |
if idx not in self.disks: |
7197 |
continue
|
7198 |
|
7199 |
self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx)) |
7200 |
|
7201 |
self.cfg.SetDiskID(dev, node_name)
|
7202 |
|
7203 |
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]] |
7204 |
names = _GenerateUniqueNames(self.lu, lv_names)
|
7205 |
|
7206 |
lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size, |
7207 |
logical_id=(vgname, names[0]))
|
7208 |
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
7209 |
logical_id=(vgname, names[1]))
|
7210 |
|
7211 |
new_lvs = [lv_data, lv_meta] |
7212 |
old_lvs = dev.children |
7213 |
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) |
7214 |
|
7215 |
# we pass force_create=True to force the LVM creation
|
7216 |
for new_lv in new_lvs: |
7217 |
_CreateBlockDev(self.lu, node_name, self.instance, new_lv, True, |
7218 |
_GetInstanceInfoText(self.instance), False) |
7219 |
|
7220 |
return iv_names
|
7221 |
|
7222 |
def _CheckDevices(self, node_name, iv_names): |
7223 |
for name, (dev, _, _) in iv_names.iteritems(): |
7224 |
self.cfg.SetDiskID(dev, node_name)
|
7225 |
|
7226 |
result = self.rpc.call_blockdev_find(node_name, dev)
|
7227 |
|
7228 |
msg = result.fail_msg |
7229 |
if msg or not result.payload: |
7230 |
if not msg: |
7231 |
msg = "disk not found"
|
7232 |
raise errors.OpExecError("Can't find DRBD device %s: %s" % |
7233 |
(name, msg)) |
7234 |
|
7235 |
if result.payload.is_degraded:
|
7236 |
raise errors.OpExecError("DRBD device %s is degraded!" % name) |
7237 |
|
7238 |
def _RemoveOldStorage(self, node_name, iv_names): |
7239 |
for name, (_, old_lvs, _) in iv_names.iteritems(): |
7240 |
self.lu.LogInfo("Remove logical volumes for %s" % name) |
7241 |
|
7242 |
for lv in old_lvs: |
7243 |
self.cfg.SetDiskID(lv, node_name)
|
7244 |
|
7245 |
msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
|
7246 |
if msg:
|
7247 |
self.lu.LogWarning("Can't remove old LV: %s" % msg, |
7248 |
hint="remove unused LVs manually")
|
7249 |
|
7250 |
def _ReleaseNodeLock(self, node_name): |
7251 |
"""Releases the lock for a given node."""
|
7252 |
self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
|
7253 |
|
7254 |
def _ExecDrbd8DiskOnly(self, feedback_fn): |
7255 |
"""Replace a disk on the primary or secondary for DRBD 8.
|
7256 |
|
7257 |
The algorithm for replace is quite complicated:
|
7258 |
|
7259 |
1. for each disk to be replaced:
|
7260 |
|
7261 |
1. create new LVs on the target node with unique names
|
7262 |
1. detach old LVs from the drbd device
|
7263 |
1. rename old LVs to name_replaced.<time_t>
|
7264 |
1. rename new LVs to old LVs
|
7265 |
1. attach the new LVs (with the old names now) to the drbd device
|
7266 |
|
7267 |
1. wait for sync across all devices
|
7268 |
|
7269 |
1. for each modified disk:
|
7270 |
|
7271 |
1. remove old LVs (which have the name name_replaces.<time_t>)
|
7272 |
|
7273 |
Failures are not very well handled.
|
7274 |
|
7275 |
"""
|
7276 |
steps_total = 6
|
7277 |
|
7278 |
# Step: check device activation
|
7279 |
self.lu.LogStep(1, steps_total, "Check device existence") |
7280 |
self._CheckDisksExistence([self.other_node, self.target_node]) |
7281 |
self._CheckVolumeGroup([self.target_node, self.other_node]) |
7282 |
|
7283 |
# Step: check other node consistency
|
7284 |
self.lu.LogStep(2, steps_total, "Check peer consistency") |
7285 |
self._CheckDisksConsistency(self.other_node, |
7286 |
self.other_node == self.instance.primary_node, |
7287 |
False)
|
7288 |
|
7289 |
# Step: create new storage
|
7290 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
7291 |
iv_names = self._CreateNewStorage(self.target_node) |
7292 |
|
7293 |
# Step: for each lv, detach+rename*2+attach
|
7294 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
7295 |
for dev, old_lvs, new_lvs in iv_names.itervalues(): |
7296 |
self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name) |
7297 |
|
7298 |
result = self.rpc.call_blockdev_removechildren(self.target_node, dev, |
7299 |
old_lvs) |
7300 |
result.Raise("Can't detach drbd from local storage on node"
|
7301 |
" %s for device %s" % (self.target_node, dev.iv_name)) |
7302 |
#dev.children = []
|
7303 |
#cfg.Update(instance)
|
7304 |
|
7305 |
# ok, we created the new LVs, so now we know we have the needed
|
7306 |
# storage; as such, we proceed on the target node to rename
|
7307 |
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
|
7308 |
# using the assumption that logical_id == physical_id (which in
|
7309 |
# turn is the unique_id on that node)
|
7310 |
|
7311 |
# FIXME(iustin): use a better name for the replaced LVs
|
7312 |
temp_suffix = int(time.time())
|
7313 |
ren_fn = lambda d, suff: (d.physical_id[0], |
7314 |
d.physical_id[1] + "_replaced-%s" % suff) |
7315 |
|
7316 |
# Build the rename list based on what LVs exist on the node
|
7317 |
rename_old_to_new = [] |
7318 |
for to_ren in old_lvs: |
7319 |
result = self.rpc.call_blockdev_find(self.target_node, to_ren) |
7320 |
if not result.fail_msg and result.payload: |
7321 |
# device exists
|
7322 |
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix))) |
7323 |
|
7324 |
self.lu.LogInfo("Renaming the old LVs on the target node") |
7325 |
result = self.rpc.call_blockdev_rename(self.target_node, |
7326 |
rename_old_to_new) |
7327 |
result.Raise("Can't rename old LVs on node %s" % self.target_node) |
7328 |
|
7329 |
# Now we rename the new LVs to the old LVs
|
7330 |
self.lu.LogInfo("Renaming the new LVs on the target node") |
7331 |
rename_new_to_old = [(new, old.physical_id) |
7332 |
for old, new in zip(old_lvs, new_lvs)] |
7333 |
result = self.rpc.call_blockdev_rename(self.target_node, |
7334 |
rename_new_to_old) |
7335 |
result.Raise("Can't rename new LVs on node %s" % self.target_node) |
7336 |
|
7337 |
for old, new in zip(old_lvs, new_lvs): |
7338 |
new.logical_id = old.logical_id |
7339 |
self.cfg.SetDiskID(new, self.target_node) |
7340 |
|
7341 |
for disk in old_lvs: |
7342 |
disk.logical_id = ren_fn(disk, temp_suffix) |
7343 |
self.cfg.SetDiskID(disk, self.target_node) |
7344 |
|
7345 |
# Now that the new lvs have the old name, we can add them to the device
|
7346 |
self.lu.LogInfo("Adding new mirror component on %s" % self.target_node) |
7347 |
result = self.rpc.call_blockdev_addchildren(self.target_node, dev, |
7348 |
new_lvs) |
7349 |
msg = result.fail_msg |
7350 |
if msg:
|
7351 |
for new_lv in new_lvs: |
7352 |
msg2 = self.rpc.call_blockdev_remove(self.target_node, |
7353 |
new_lv).fail_msg |
7354 |
if msg2:
|
7355 |
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2, |
7356 |
hint=("cleanup manually the unused logical"
|
7357 |
"volumes"))
|
7358 |
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg) |
7359 |
|
7360 |
dev.children = new_lvs |
7361 |
|
7362 |
self.cfg.Update(self.instance, feedback_fn) |
7363 |
|
7364 |
cstep = 5
|
7365 |
if self.early_release: |
7366 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
7367 |
cstep += 1
|
7368 |
self._RemoveOldStorage(self.target_node, iv_names) |
7369 |
# WARNING: we release both node locks here, do not do other RPCs
|
7370 |
# than WaitForSync to the primary node
|
7371 |
self._ReleaseNodeLock([self.target_node, self.other_node]) |
7372 |
|
7373 |
# Wait for sync
|
7374 |
# This can fail as the old devices are degraded and _WaitForSync
|
7375 |
# does a combined result over all disks, so we don't check its return value
|
7376 |
self.lu.LogStep(cstep, steps_total, "Sync devices") |
7377 |
cstep += 1
|
7378 |
_WaitForSync(self.lu, self.instance) |
7379 |
|
7380 |
# Check all devices manually
|
7381 |
self._CheckDevices(self.instance.primary_node, iv_names) |
7382 |
|
7383 |
# Step: remove old storage
|
7384 |
if not self.early_release: |
7385 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
7386 |
cstep += 1
|
7387 |
self._RemoveOldStorage(self.target_node, iv_names) |
7388 |
|
7389 |
def _ExecDrbd8Secondary(self, feedback_fn): |
7390 |
"""Replace the secondary node for DRBD 8.
|
7391 |
|
7392 |
The algorithm for replace is quite complicated:
|
7393 |
- for all disks of the instance:
|
7394 |
- create new LVs on the new node with same names
|
7395 |
- shutdown the drbd device on the old secondary
|
7396 |
- disconnect the drbd network on the primary
|
7397 |
- create the drbd device on the new secondary
|
7398 |
- network attach the drbd on the primary, using an artifice:
|
7399 |
the drbd code for Attach() will connect to the network if it
|
7400 |
finds a device which is connected to the good local disks but
|
7401 |
not network enabled
|
7402 |
- wait for sync across all devices
|
7403 |
- remove all disks from the old secondary
|
7404 |
|
7405 |
Failures are not very well handled.
|
7406 |
|
7407 |
"""
|
7408 |
steps_total = 6
|
7409 |
|
7410 |
# Step: check device activation
|
7411 |
self.lu.LogStep(1, steps_total, "Check device existence") |
7412 |
self._CheckDisksExistence([self.instance.primary_node]) |
7413 |
self._CheckVolumeGroup([self.instance.primary_node]) |
7414 |
|
7415 |
# Step: check other node consistency
|
7416 |
self.lu.LogStep(2, steps_total, "Check peer consistency") |
7417 |
self._CheckDisksConsistency(self.instance.primary_node, True, True) |
7418 |
|
7419 |
# Step: create new storage
|
7420 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
7421 |
for idx, dev in enumerate(self.instance.disks): |
7422 |
self.lu.LogInfo("Adding new local storage on %s for disk/%d" % |
7423 |
(self.new_node, idx))
|
7424 |
# we pass force_create=True to force LVM creation
|
7425 |
for new_lv in dev.children: |
7426 |
_CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True, |
7427 |
_GetInstanceInfoText(self.instance), False) |
7428 |
|
7429 |
# Step 4: dbrd minors and drbd setups changes
|
7430 |
# after this, we must manually remove the drbd minors on both the
|
7431 |
# error and the success paths
|
7432 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
7433 |
minors = self.cfg.AllocateDRBDMinor([self.new_node |
7434 |
for dev in self.instance.disks], |
7435 |
self.instance.name)
|
7436 |
logging.debug("Allocated minors %r", minors)
|
7437 |
|
7438 |
iv_names = {} |
7439 |
for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)): |
7440 |
self.lu.LogInfo("activating a new drbd on %s for disk/%d" % |
7441 |
(self.new_node, idx))
|
7442 |
# create new devices on new_node; note that we create two IDs:
|
7443 |
# one without port, so the drbd will be activated without
|
7444 |
# networking information on the new node at this stage, and one
|
7445 |
# with network, for the latter activation in step 4
|
7446 |
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id |
7447 |
if self.instance.primary_node == o_node1: |
7448 |
p_minor = o_minor1 |
7449 |
else:
|
7450 |
assert self.instance.primary_node == o_node2, "Three-node instance?" |
7451 |
p_minor = o_minor2 |
7452 |
|
7453 |
new_alone_id = (self.instance.primary_node, self.new_node, None, |
7454 |
p_minor, new_minor, o_secret) |
7455 |
new_net_id = (self.instance.primary_node, self.new_node, o_port, |
7456 |
p_minor, new_minor, o_secret) |
7457 |
|
7458 |
iv_names[idx] = (dev, dev.children, new_net_id) |
7459 |
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
|
7460 |
new_net_id) |
7461 |
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, |
7462 |
logical_id=new_alone_id, |
7463 |
children=dev.children, |
7464 |
size=dev.size) |
7465 |
try:
|
7466 |
_CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd, |
7467 |
_GetInstanceInfoText(self.instance), False) |
7468 |
except errors.GenericError:
|
7469 |
self.cfg.ReleaseDRBDMinors(self.instance.name) |
7470 |
raise
|
7471 |
|
7472 |
# We have new devices, shutdown the drbd on the old secondary
|
7473 |
for idx, dev in enumerate(self.instance.disks): |
7474 |
self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx) |
7475 |
self.cfg.SetDiskID(dev, self.target_node) |
7476 |
msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg |
7477 |
if msg:
|
7478 |
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old" |
7479 |
"node: %s" % (idx, msg),
|
7480 |
hint=("Please cleanup this device manually as"
|
7481 |
" soon as possible"))
|
7482 |
|
7483 |
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)") |
7484 |
result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node], |
7485 |
self.node_secondary_ip,
|
7486 |
self.instance.disks)\
|
7487 |
[self.instance.primary_node]
|
7488 |
|
7489 |
msg = result.fail_msg |
7490 |
if msg:
|
7491 |
# detaches didn't succeed (unlikely)
|
7492 |
self.cfg.ReleaseDRBDMinors(self.instance.name) |
7493 |
raise errors.OpExecError("Can't detach the disks from the network on" |
7494 |
" old node: %s" % (msg,))
|
7495 |
|
7496 |
# if we managed to detach at least one, we update all the disks of
|
7497 |
# the instance to point to the new secondary
|
7498 |
self.lu.LogInfo("Updating instance configuration") |
7499 |
for dev, _, new_logical_id in iv_names.itervalues(): |
7500 |
dev.logical_id = new_logical_id |
7501 |
self.cfg.SetDiskID(dev, self.instance.primary_node) |
7502 |
|
7503 |
self.cfg.Update(self.instance, feedback_fn) |
7504 |
|
7505 |
# and now perform the drbd attach
|
7506 |
self.lu.LogInfo("Attaching primary drbds to new secondary" |
7507 |
" (standalone => connected)")
|
7508 |
result = self.rpc.call_drbd_attach_net([self.instance.primary_node, |
7509 |
self.new_node],
|
7510 |
self.node_secondary_ip,
|
7511 |
self.instance.disks,
|
7512 |
self.instance.name,
|
7513 |
False)
|
7514 |
for to_node, to_result in result.items(): |
7515 |
msg = to_result.fail_msg |
7516 |
if msg:
|
7517 |
self.lu.LogWarning("Can't attach drbd disks on node %s: %s", |
7518 |
to_node, msg, |
7519 |
hint=("please do a gnt-instance info to see the"
|
7520 |
" status of disks"))
|
7521 |
cstep = 5
|
7522 |
if self.early_release: |
7523 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
7524 |
cstep += 1
|
7525 |
self._RemoveOldStorage(self.target_node, iv_names) |
7526 |
# WARNING: we release all node locks here, do not do other RPCs
|
7527 |
# than WaitForSync to the primary node
|
7528 |
self._ReleaseNodeLock([self.instance.primary_node, |
7529 |
self.target_node,
|
7530 |
self.new_node])
|
7531 |
|
7532 |
# Wait for sync
|
7533 |
# This can fail as the old devices are degraded and _WaitForSync
|
7534 |
# does a combined result over all disks, so we don't check its return value
|
7535 |
self.lu.LogStep(cstep, steps_total, "Sync devices") |
7536 |
cstep += 1
|
7537 |
_WaitForSync(self.lu, self.instance) |
7538 |
|
7539 |
# Check all devices manually
|
7540 |
self._CheckDevices(self.instance.primary_node, iv_names) |
7541 |
|
7542 |
# Step: remove old storage
|
7543 |
if not self.early_release: |
7544 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
7545 |
self._RemoveOldStorage(self.target_node, iv_names) |
7546 |
|
7547 |
|
7548 |
class LURepairNodeStorage(NoHooksLU): |
7549 |
"""Repairs the volume group on a node.
|
7550 |
|
7551 |
"""
|
7552 |
_OP_REQP = ["node_name"]
|
7553 |
REQ_BGL = False
|
7554 |
|
7555 |
def CheckArguments(self): |
7556 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
7557 |
|
7558 |
def ExpandNames(self): |
7559 |
self.needed_locks = {
|
7560 |
locking.LEVEL_NODE: [self.op.node_name],
|
7561 |
} |
7562 |
|
7563 |
def _CheckFaultyDisks(self, instance, node_name): |
7564 |
"""Ensure faulty disks abort the opcode or at least warn."""
|
7565 |
try:
|
7566 |
if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance, |
7567 |
node_name, True):
|
7568 |
raise errors.OpPrereqError("Instance '%s' has faulty disks on" |
7569 |
" node '%s'" % (instance.name, node_name),
|
7570 |
errors.ECODE_STATE) |
7571 |
except errors.OpPrereqError, err:
|
7572 |
if self.op.ignore_consistency: |
7573 |
self.proc.LogWarning(str(err.args[0])) |
7574 |
else:
|
7575 |
raise
|
7576 |
|
7577 |
def CheckPrereq(self): |
7578 |
"""Check prerequisites.
|
7579 |
|
7580 |
"""
|
7581 |
storage_type = self.op.storage_type
|
7582 |
|
7583 |
if (constants.SO_FIX_CONSISTENCY not in |
7584 |
constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])): |
7585 |
raise errors.OpPrereqError("Storage units of type '%s' can not be" |
7586 |
" repaired" % storage_type,
|
7587 |
errors.ECODE_INVAL) |
7588 |
|
7589 |
# Check whether any instance on this node has faulty disks
|
7590 |
for inst in _GetNodeInstances(self.cfg, self.op.node_name): |
7591 |
if not inst.admin_up: |
7592 |
continue
|
7593 |
check_nodes = set(inst.all_nodes)
|
7594 |
check_nodes.discard(self.op.node_name)
|
7595 |
for inst_node_name in check_nodes: |
7596 |
self._CheckFaultyDisks(inst, inst_node_name)
|
7597 |
|
7598 |
def Exec(self, feedback_fn): |
7599 |
feedback_fn("Repairing storage unit '%s' on %s ..." %
|
7600 |
(self.op.name, self.op.node_name)) |
7601 |
|
7602 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
7603 |
result = self.rpc.call_storage_execute(self.op.node_name, |
7604 |
self.op.storage_type, st_args,
|
7605 |
self.op.name,
|
7606 |
constants.SO_FIX_CONSISTENCY) |
7607 |
result.Raise("Failed to repair storage unit '%s' on %s" %
|
7608 |
(self.op.name, self.op.node_name)) |
7609 |
|
7610 |
|
7611 |
class LUNodeEvacuationStrategy(NoHooksLU): |
7612 |
"""Computes the node evacuation strategy.
|
7613 |
|
7614 |
"""
|
7615 |
_OP_REQP = ["nodes"]
|
7616 |
REQ_BGL = False
|
7617 |
|
7618 |
def CheckArguments(self): |
7619 |
if not hasattr(self.op, "remote_node"): |
7620 |
self.op.remote_node = None |
7621 |
if not hasattr(self.op, "iallocator"): |
7622 |
self.op.iallocator = None |
7623 |
if self.op.remote_node is not None and self.op.iallocator is not None: |
7624 |
raise errors.OpPrereqError("Give either the iallocator or the new" |
7625 |
" secondary, not both", errors.ECODE_INVAL)
|
7626 |
|
7627 |
def ExpandNames(self): |
7628 |
self.op.nodes = _GetWantedNodes(self, self.op.nodes) |
7629 |
self.needed_locks = locks = {}
|
7630 |
if self.op.remote_node is None: |
7631 |
locks[locking.LEVEL_NODE] = locking.ALL_SET |
7632 |
else:
|
7633 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
7634 |
locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node] |
7635 |
|
7636 |
def CheckPrereq(self): |
7637 |
pass
|
7638 |
|
7639 |
def Exec(self, feedback_fn): |
7640 |
if self.op.remote_node is not None: |
7641 |
instances = [] |
7642 |
for node in self.op.nodes: |
7643 |
instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
|
7644 |
result = [] |
7645 |
for i in instances: |
7646 |
if i.primary_node == self.op.remote_node: |
7647 |
raise errors.OpPrereqError("Node %s is the primary node of" |
7648 |
" instance %s, cannot use it as"
|
7649 |
" secondary" %
|
7650 |
(self.op.remote_node, i.name),
|
7651 |
errors.ECODE_INVAL) |
7652 |
result.append([i.name, self.op.remote_node])
|
7653 |
else:
|
7654 |
ial = IAllocator(self.cfg, self.rpc, |
7655 |
mode=constants.IALLOCATOR_MODE_MEVAC, |
7656 |
evac_nodes=self.op.nodes)
|
7657 |
ial.Run(self.op.iallocator, validate=True) |
7658 |
if not ial.success: |
7659 |
raise errors.OpExecError("No valid evacuation solution: %s" % ial.info, |
7660 |
errors.ECODE_NORES) |
7661 |
result = ial.result |
7662 |
return result
|
7663 |
|
7664 |
|
7665 |
class LUGrowDisk(LogicalUnit): |
7666 |
"""Grow a disk of an instance.
|
7667 |
|
7668 |
"""
|
7669 |
HPATH = "disk-grow"
|
7670 |
HTYPE = constants.HTYPE_INSTANCE |
7671 |
_OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"] |
7672 |
REQ_BGL = False
|
7673 |
|
7674 |
def ExpandNames(self): |
7675 |
self._ExpandAndLockInstance()
|
7676 |
self.needed_locks[locking.LEVEL_NODE] = []
|
7677 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
7678 |
|
7679 |
def DeclareLocks(self, level): |
7680 |
if level == locking.LEVEL_NODE:
|
7681 |
self._LockInstancesNodes()
|
7682 |
|
7683 |
def BuildHooksEnv(self): |
7684 |
"""Build hooks env.
|
7685 |
|
7686 |
This runs on the master, the primary and all the secondaries.
|
7687 |
|
7688 |
"""
|
7689 |
env = { |
7690 |
"DISK": self.op.disk, |
7691 |
"AMOUNT": self.op.amount, |
7692 |
} |
7693 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
7694 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
7695 |
return env, nl, nl
|
7696 |
|
7697 |
def CheckPrereq(self): |
7698 |
"""Check prerequisites.
|
7699 |
|
7700 |
This checks that the instance is in the cluster.
|
7701 |
|
7702 |
"""
|
7703 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
7704 |
assert instance is not None, \ |
7705 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
7706 |
nodenames = list(instance.all_nodes)
|
7707 |
for node in nodenames: |
7708 |
_CheckNodeOnline(self, node)
|
7709 |
|
7710 |
|
7711 |
self.instance = instance
|
7712 |
|
7713 |
if instance.disk_template not in constants.DTS_GROWABLE: |
7714 |
raise errors.OpPrereqError("Instance's disk layout does not support" |
7715 |
" growing.", errors.ECODE_INVAL)
|
7716 |
|
7717 |
self.disk = instance.FindDisk(self.op.disk) |
7718 |
|
7719 |
if instance.disk_template != constants.DT_FILE:
|
7720 |
# TODO: check the free disk space for file, when that feature will be
|
7721 |
# supported
|
7722 |
_CheckNodesFreeDisk(self, nodenames, self.op.amount) |
7723 |
|
7724 |
def Exec(self, feedback_fn): |
7725 |
"""Execute disk grow.
|
7726 |
|
7727 |
"""
|
7728 |
instance = self.instance
|
7729 |
disk = self.disk
|
7730 |
for node in instance.all_nodes: |
7731 |
self.cfg.SetDiskID(disk, node)
|
7732 |
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount) |
7733 |
result.Raise("Grow request failed to node %s" % node)
|
7734 |
|
7735 |
# TODO: Rewrite code to work properly
|
7736 |
# DRBD goes into sync mode for a short amount of time after executing the
|
7737 |
# "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
|
7738 |
# calling "resize" in sync mode fails. Sleeping for a short amount of
|
7739 |
# time is a work-around.
|
7740 |
time.sleep(5)
|
7741 |
|
7742 |
disk.RecordGrow(self.op.amount)
|
7743 |
self.cfg.Update(instance, feedback_fn)
|
7744 |
if self.op.wait_for_sync: |
7745 |
disk_abort = not _WaitForSync(self, instance) |
7746 |
if disk_abort:
|
7747 |
self.proc.LogWarning("Warning: disk sync-ing has not returned a good" |
7748 |
" status.\nPlease check the instance.")
|
7749 |
|
7750 |
|
7751 |
class LUQueryInstanceData(NoHooksLU): |
7752 |
"""Query runtime instance data.
|
7753 |
|
7754 |
"""
|
7755 |
_OP_REQP = ["instances", "static"] |
7756 |
REQ_BGL = False
|
7757 |
|
7758 |
def ExpandNames(self): |
7759 |
self.needed_locks = {}
|
7760 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1) |
7761 |
|
7762 |
if not isinstance(self.op.instances, list): |
7763 |
raise errors.OpPrereqError("Invalid argument type 'instances'", |
7764 |
errors.ECODE_INVAL) |
7765 |
|
7766 |
if self.op.instances: |
7767 |
self.wanted_names = []
|
7768 |
for name in self.op.instances: |
7769 |
full_name = _ExpandInstanceName(self.cfg, name)
|
7770 |
self.wanted_names.append(full_name)
|
7771 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names |
7772 |
else:
|
7773 |
self.wanted_names = None |
7774 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
7775 |
|
7776 |
self.needed_locks[locking.LEVEL_NODE] = []
|
7777 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
7778 |
|
7779 |
def DeclareLocks(self, level): |
7780 |
if level == locking.LEVEL_NODE:
|
7781 |
self._LockInstancesNodes()
|
7782 |
|
7783 |
def CheckPrereq(self): |
7784 |
"""Check prerequisites.
|
7785 |
|
7786 |
This only checks the optional instance list against the existing names.
|
7787 |
|
7788 |
"""
|
7789 |
if self.wanted_names is None: |
7790 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE] |
7791 |
|
7792 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
7793 |
in self.wanted_names] |
7794 |
return
|
7795 |
|
7796 |
def _ComputeBlockdevStatus(self, node, instance_name, dev): |
7797 |
"""Returns the status of a block device
|
7798 |
|
7799 |
"""
|
7800 |
if self.op.static or not node: |
7801 |
return None |
7802 |
|
7803 |
self.cfg.SetDiskID(dev, node)
|
7804 |
|
7805 |
result = self.rpc.call_blockdev_find(node, dev)
|
7806 |
if result.offline:
|
7807 |
return None |
7808 |
|
7809 |
result.Raise("Can't compute disk status for %s" % instance_name)
|
7810 |
|
7811 |
status = result.payload |
7812 |
if status is None: |
7813 |
return None |
7814 |
|
7815 |
return (status.dev_path, status.major, status.minor,
|
7816 |
status.sync_percent, status.estimated_time, |
7817 |
status.is_degraded, status.ldisk_status) |
7818 |
|
7819 |
def _ComputeDiskStatus(self, instance, snode, dev): |
7820 |
"""Compute block device status.
|
7821 |
|
7822 |
"""
|
7823 |
if dev.dev_type in constants.LDS_DRBD: |
7824 |
# we change the snode then (otherwise we use the one passed in)
|
7825 |
if dev.logical_id[0] == instance.primary_node: |
7826 |
snode = dev.logical_id[1]
|
7827 |
else:
|
7828 |
snode = dev.logical_id[0]
|
7829 |
|
7830 |
dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
|
7831 |
instance.name, dev) |
7832 |
dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
|
7833 |
|
7834 |
if dev.children:
|
7835 |
dev_children = [self._ComputeDiskStatus(instance, snode, child)
|
7836 |
for child in dev.children] |
7837 |
else:
|
7838 |
dev_children = [] |
7839 |
|
7840 |
data = { |
7841 |
"iv_name": dev.iv_name,
|
7842 |
"dev_type": dev.dev_type,
|
7843 |
"logical_id": dev.logical_id,
|
7844 |
"physical_id": dev.physical_id,
|
7845 |
"pstatus": dev_pstatus,
|
7846 |
"sstatus": dev_sstatus,
|
7847 |
"children": dev_children,
|
7848 |
"mode": dev.mode,
|
7849 |
"size": dev.size,
|
7850 |
} |
7851 |
|
7852 |
return data
|
7853 |
|
7854 |
def Exec(self, feedback_fn): |
7855 |
"""Gather and return data"""
|
7856 |
result = {} |
7857 |
|
7858 |
cluster = self.cfg.GetClusterInfo()
|
7859 |
|
7860 |
for instance in self.wanted_instances: |
7861 |
if not self.op.static: |
7862 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
7863 |
instance.name, |
7864 |
instance.hypervisor) |
7865 |
remote_info.Raise("Error checking node %s" % instance.primary_node)
|
7866 |
remote_info = remote_info.payload |
7867 |
if remote_info and "state" in remote_info: |
7868 |
remote_state = "up"
|
7869 |
else:
|
7870 |
remote_state = "down"
|
7871 |
else:
|
7872 |
remote_state = None
|
7873 |
if instance.admin_up:
|
7874 |
config_state = "up"
|
7875 |
else:
|
7876 |
config_state = "down"
|
7877 |
|
7878 |
disks = [self._ComputeDiskStatus(instance, None, device) |
7879 |
for device in instance.disks] |
7880 |
|
7881 |
idict = { |
7882 |
"name": instance.name,
|
7883 |
"config_state": config_state,
|
7884 |
"run_state": remote_state,
|
7885 |
"pnode": instance.primary_node,
|
7886 |
"snodes": instance.secondary_nodes,
|
7887 |
"os": instance.os,
|
7888 |
# this happens to be the same format used for hooks
|
7889 |
"nics": _NICListToTuple(self, instance.nics), |
7890 |
"disks": disks,
|
7891 |
"hypervisor": instance.hypervisor,
|
7892 |
"network_port": instance.network_port,
|
7893 |
"hv_instance": instance.hvparams,
|
7894 |
"hv_actual": cluster.FillHV(instance, skip_globals=True), |
7895 |
"be_instance": instance.beparams,
|
7896 |
"be_actual": cluster.FillBE(instance),
|
7897 |
"serial_no": instance.serial_no,
|
7898 |
"mtime": instance.mtime,
|
7899 |
"ctime": instance.ctime,
|
7900 |
"uuid": instance.uuid,
|
7901 |
} |
7902 |
|
7903 |
result[instance.name] = idict |
7904 |
|
7905 |
return result
|
7906 |
|
7907 |
|
7908 |
class LUSetInstanceParams(LogicalUnit): |
7909 |
"""Modifies an instances's parameters.
|
7910 |
|
7911 |
"""
|
7912 |
HPATH = "instance-modify"
|
7913 |
HTYPE = constants.HTYPE_INSTANCE |
7914 |
_OP_REQP = ["instance_name"]
|
7915 |
REQ_BGL = False
|
7916 |
|
7917 |
def CheckArguments(self): |
7918 |
if not hasattr(self.op, 'nics'): |
7919 |
self.op.nics = []
|
7920 |
if not hasattr(self.op, 'disks'): |
7921 |
self.op.disks = []
|
7922 |
if not hasattr(self.op, 'beparams'): |
7923 |
self.op.beparams = {}
|
7924 |
if not hasattr(self.op, 'hvparams'): |
7925 |
self.op.hvparams = {}
|
7926 |
if not hasattr(self.op, "disk_template"): |
7927 |
self.op.disk_template = None |
7928 |
if not hasattr(self.op, "remote_node"): |
7929 |
self.op.remote_node = None |
7930 |
if not hasattr(self.op, "os_name"): |
7931 |
self.op.os_name = None |
7932 |
if not hasattr(self.op, "force_variant"): |
7933 |
self.op.force_variant = False |
7934 |
self.op.force = getattr(self.op, "force", False) |
7935 |
if not (self.op.nics or self.op.disks or self.op.disk_template or |
7936 |
self.op.hvparams or self.op.beparams or self.op.os_name): |
7937 |
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) |
7938 |
|
7939 |
if self.op.hvparams: |
7940 |
_CheckGlobalHvParams(self.op.hvparams)
|
7941 |
|
7942 |
# Disk validation
|
7943 |
disk_addremove = 0
|
7944 |
for disk_op, disk_dict in self.op.disks: |
7945 |
if disk_op == constants.DDM_REMOVE:
|
7946 |
disk_addremove += 1
|
7947 |
continue
|
7948 |
elif disk_op == constants.DDM_ADD:
|
7949 |
disk_addremove += 1
|
7950 |
else:
|
7951 |
if not isinstance(disk_op, int): |
7952 |
raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL) |
7953 |
if not isinstance(disk_dict, dict): |
7954 |
msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
|
7955 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
7956 |
|
7957 |
if disk_op == constants.DDM_ADD:
|
7958 |
mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
|
7959 |
if mode not in constants.DISK_ACCESS_SET: |
7960 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, |
7961 |
errors.ECODE_INVAL) |
7962 |
size = disk_dict.get('size', None) |
7963 |
if size is None: |
7964 |
raise errors.OpPrereqError("Required disk parameter size missing", |
7965 |
errors.ECODE_INVAL) |
7966 |
try:
|
7967 |
size = int(size)
|
7968 |
except (TypeError, ValueError), err: |
7969 |
raise errors.OpPrereqError("Invalid disk size parameter: %s" % |
7970 |
str(err), errors.ECODE_INVAL)
|
7971 |
disk_dict['size'] = size
|
7972 |
else:
|
7973 |
# modification of disk
|
7974 |
if 'size' in disk_dict: |
7975 |
raise errors.OpPrereqError("Disk size change not possible, use" |
7976 |
" grow-disk", errors.ECODE_INVAL)
|
7977 |
|
7978 |
if disk_addremove > 1: |
7979 |
raise errors.OpPrereqError("Only one disk add or remove operation" |
7980 |
" supported at a time", errors.ECODE_INVAL)
|
7981 |
|
7982 |
if self.op.disks and self.op.disk_template is not None: |
7983 |
raise errors.OpPrereqError("Disk template conversion and other disk" |
7984 |
" changes not supported at the same time",
|
7985 |
errors.ECODE_INVAL) |
7986 |
|
7987 |
if self.op.disk_template: |
7988 |
_CheckDiskTemplate(self.op.disk_template)
|
7989 |
if (self.op.disk_template in constants.DTS_NET_MIRROR and |
7990 |
self.op.remote_node is None): |
7991 |
raise errors.OpPrereqError("Changing the disk template to a mirrored" |
7992 |
" one requires specifying a secondary node",
|
7993 |
errors.ECODE_INVAL) |
7994 |
|
7995 |
# NIC validation
|
7996 |
nic_addremove = 0
|
7997 |
for nic_op, nic_dict in self.op.nics: |
7998 |
if nic_op == constants.DDM_REMOVE:
|
7999 |
nic_addremove += 1
|
8000 |
continue
|
8001 |
elif nic_op == constants.DDM_ADD:
|
8002 |
nic_addremove += 1
|
8003 |
else:
|
8004 |
if not isinstance(nic_op, int): |
8005 |
raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL) |
8006 |
if not isinstance(nic_dict, dict): |
8007 |
msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
|
8008 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
8009 |
|
8010 |
# nic_dict should be a dict
|
8011 |
nic_ip = nic_dict.get('ip', None) |
8012 |
if nic_ip is not None: |
8013 |
if nic_ip.lower() == constants.VALUE_NONE:
|
8014 |
nic_dict['ip'] = None |
8015 |
else:
|
8016 |
if not utils.IsValidIP(nic_ip): |
8017 |
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip, |
8018 |
errors.ECODE_INVAL) |
8019 |
|
8020 |
nic_bridge = nic_dict.get('bridge', None) |
8021 |
nic_link = nic_dict.get('link', None) |
8022 |
if nic_bridge and nic_link: |
8023 |
raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" |
8024 |
" at the same time", errors.ECODE_INVAL)
|
8025 |
elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE: |
8026 |
nic_dict['bridge'] = None |
8027 |
elif nic_link and nic_link.lower() == constants.VALUE_NONE: |
8028 |
nic_dict['link'] = None |
8029 |
|
8030 |
if nic_op == constants.DDM_ADD:
|
8031 |
nic_mac = nic_dict.get('mac', None) |
8032 |
if nic_mac is None: |
8033 |
nic_dict['mac'] = constants.VALUE_AUTO
|
8034 |
|
8035 |
if 'mac' in nic_dict: |
8036 |
nic_mac = nic_dict['mac']
|
8037 |
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
8038 |
nic_mac = utils.NormalizeAndValidateMac(nic_mac) |
8039 |
|
8040 |
if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO: |
8041 |
raise errors.OpPrereqError("'auto' is not a valid MAC address when" |
8042 |
" modifying an existing nic",
|
8043 |
errors.ECODE_INVAL) |
8044 |
|
8045 |
if nic_addremove > 1: |
8046 |
raise errors.OpPrereqError("Only one NIC add or remove operation" |
8047 |
" supported at a time", errors.ECODE_INVAL)
|
8048 |
|
8049 |
def ExpandNames(self): |
8050 |
self._ExpandAndLockInstance()
|
8051 |
self.needed_locks[locking.LEVEL_NODE] = []
|
8052 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
8053 |
|
8054 |
def DeclareLocks(self, level): |
8055 |
if level == locking.LEVEL_NODE:
|
8056 |
self._LockInstancesNodes()
|
8057 |
if self.op.disk_template and self.op.remote_node: |
8058 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
8059 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node) |
8060 |
|
8061 |
def BuildHooksEnv(self): |
8062 |
"""Build hooks env.
|
8063 |
|
8064 |
This runs on the master, primary and secondaries.
|
8065 |
|
8066 |
"""
|
8067 |
args = dict()
|
8068 |
if constants.BE_MEMORY in self.be_new: |
8069 |
args['memory'] = self.be_new[constants.BE_MEMORY] |
8070 |
if constants.BE_VCPUS in self.be_new: |
8071 |
args['vcpus'] = self.be_new[constants.BE_VCPUS] |
8072 |
# TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
|
8073 |
# information at all.
|
8074 |
if self.op.nics: |
8075 |
args['nics'] = []
|
8076 |
nic_override = dict(self.op.nics) |
8077 |
c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
|
8078 |
for idx, nic in enumerate(self.instance.nics): |
8079 |
if idx in nic_override: |
8080 |
this_nic_override = nic_override[idx] |
8081 |
else:
|
8082 |
this_nic_override = {} |
8083 |
if 'ip' in this_nic_override: |
8084 |
ip = this_nic_override['ip']
|
8085 |
else:
|
8086 |
ip = nic.ip |
8087 |
if 'mac' in this_nic_override: |
8088 |
mac = this_nic_override['mac']
|
8089 |
else:
|
8090 |
mac = nic.mac |
8091 |
if idx in self.nic_pnew: |
8092 |
nicparams = self.nic_pnew[idx]
|
8093 |
else:
|
8094 |
nicparams = objects.FillDict(c_nicparams, nic.nicparams) |
8095 |
mode = nicparams[constants.NIC_MODE] |
8096 |
link = nicparams[constants.NIC_LINK] |
8097 |
args['nics'].append((ip, mac, mode, link))
|
8098 |
if constants.DDM_ADD in nic_override: |
8099 |
ip = nic_override[constants.DDM_ADD].get('ip', None) |
8100 |
mac = nic_override[constants.DDM_ADD]['mac']
|
8101 |
nicparams = self.nic_pnew[constants.DDM_ADD]
|
8102 |
mode = nicparams[constants.NIC_MODE] |
8103 |
link = nicparams[constants.NIC_LINK] |
8104 |
args['nics'].append((ip, mac, mode, link))
|
8105 |
elif constants.DDM_REMOVE in nic_override: |
8106 |
del args['nics'][-1] |
8107 |
|
8108 |
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args) |
8109 |
if self.op.disk_template: |
8110 |
env["NEW_DISK_TEMPLATE"] = self.op.disk_template |
8111 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
8112 |
return env, nl, nl
|
8113 |
|
8114 |
@staticmethod
|
8115 |
def _GetUpdatedParams(old_params, update_dict, |
8116 |
default_values, parameter_types): |
8117 |
"""Return the new params dict for the given params.
|
8118 |
|
8119 |
@type old_params: dict
|
8120 |
@param old_params: old parameters
|
8121 |
@type update_dict: dict
|
8122 |
@param update_dict: dict containing new parameter values,
|
8123 |
or constants.VALUE_DEFAULT to reset the
|
8124 |
parameter to its default value
|
8125 |
@type default_values: dict
|
8126 |
@param default_values: default values for the filled parameters
|
8127 |
@type parameter_types: dict
|
8128 |
@param parameter_types: dict mapping target dict keys to types
|
8129 |
in constants.ENFORCEABLE_TYPES
|
8130 |
@rtype: (dict, dict)
|
8131 |
@return: (new_parameters, filled_parameters)
|
8132 |
|
8133 |
"""
|
8134 |
params_copy = copy.deepcopy(old_params) |
8135 |
for key, val in update_dict.iteritems(): |
8136 |
if val == constants.VALUE_DEFAULT:
|
8137 |
try:
|
8138 |
del params_copy[key]
|
8139 |
except KeyError: |
8140 |
pass
|
8141 |
else:
|
8142 |
params_copy[key] = val |
8143 |
utils.ForceDictType(params_copy, parameter_types) |
8144 |
params_filled = objects.FillDict(default_values, params_copy) |
8145 |
return (params_copy, params_filled)
|
8146 |
|
8147 |
def CheckPrereq(self): |
8148 |
"""Check prerequisites.
|
8149 |
|
8150 |
This only checks the instance list against the existing names.
|
8151 |
|
8152 |
"""
|
8153 |
self.force = self.op.force |
8154 |
|
8155 |
# checking the new params on the primary/secondary nodes
|
8156 |
|
8157 |
instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
8158 |
cluster = self.cluster = self.cfg.GetClusterInfo() |
8159 |
assert self.instance is not None, \ |
8160 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
8161 |
pnode = instance.primary_node |
8162 |
nodelist = list(instance.all_nodes)
|
8163 |
|
8164 |
if self.op.disk_template: |
8165 |
if instance.disk_template == self.op.disk_template: |
8166 |
raise errors.OpPrereqError("Instance already has disk template %s" % |
8167 |
instance.disk_template, errors.ECODE_INVAL) |
8168 |
|
8169 |
if (instance.disk_template,
|
8170 |
self.op.disk_template) not in self._DISK_CONVERSIONS: |
8171 |
raise errors.OpPrereqError("Unsupported disk template conversion from" |
8172 |
" %s to %s" % (instance.disk_template,
|
8173 |
self.op.disk_template),
|
8174 |
errors.ECODE_INVAL) |
8175 |
if self.op.disk_template in constants.DTS_NET_MIRROR: |
8176 |
_CheckNodeOnline(self, self.op.remote_node) |
8177 |
_CheckNodeNotDrained(self, self.op.remote_node) |
8178 |
disks = [{"size": d.size} for d in instance.disks] |
8179 |
required = _ComputeDiskSize(self.op.disk_template, disks)
|
8180 |
_CheckNodesFreeDisk(self, [self.op.remote_node], required) |
8181 |
_CheckInstanceDown(self, instance, "cannot change disk template") |
8182 |
|
8183 |
# hvparams processing
|
8184 |
if self.op.hvparams: |
8185 |
i_hvdict, hv_new = self._GetUpdatedParams(
|
8186 |
instance.hvparams, self.op.hvparams,
|
8187 |
cluster.hvparams[instance.hypervisor], |
8188 |
constants.HVS_PARAMETER_TYPES) |
8189 |
# local check
|
8190 |
hypervisor.GetHypervisor( |
8191 |
instance.hypervisor).CheckParameterSyntax(hv_new) |
8192 |
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
|
8193 |
self.hv_new = hv_new # the new actual values |
8194 |
self.hv_inst = i_hvdict # the new dict (without defaults) |
8195 |
else:
|
8196 |
self.hv_new = self.hv_inst = {} |
8197 |
|
8198 |
# beparams processing
|
8199 |
if self.op.beparams: |
8200 |
i_bedict, be_new = self._GetUpdatedParams(
|
8201 |
instance.beparams, self.op.beparams,
|
8202 |
cluster.beparams[constants.PP_DEFAULT], |
8203 |
constants.BES_PARAMETER_TYPES) |
8204 |
self.be_new = be_new # the new actual values |
8205 |
self.be_inst = i_bedict # the new dict (without defaults) |
8206 |
else:
|
8207 |
self.be_new = self.be_inst = {} |
8208 |
|
8209 |
self.warn = []
|
8210 |
|
8211 |
if constants.BE_MEMORY in self.op.beparams and not self.force: |
8212 |
mem_check_list = [pnode] |
8213 |
if be_new[constants.BE_AUTO_BALANCE]:
|
8214 |
# either we changed auto_balance to yes or it was from before
|
8215 |
mem_check_list.extend(instance.secondary_nodes) |
8216 |
instance_info = self.rpc.call_instance_info(pnode, instance.name,
|
8217 |
instance.hypervisor) |
8218 |
nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(), |
8219 |
instance.hypervisor) |
8220 |
pninfo = nodeinfo[pnode] |
8221 |
msg = pninfo.fail_msg |
8222 |
if msg:
|
8223 |
# Assume the primary node is unreachable and go ahead
|
8224 |
self.warn.append("Can't get info from primary node %s: %s" % |
8225 |
(pnode, msg)) |
8226 |
elif not isinstance(pninfo.payload.get('memory_free', None), int): |
8227 |
self.warn.append("Node data from primary node %s doesn't contain" |
8228 |
" free memory information" % pnode)
|
8229 |
elif instance_info.fail_msg:
|
8230 |
self.warn.append("Can't get instance runtime information: %s" % |
8231 |
instance_info.fail_msg) |
8232 |
else:
|
8233 |
if instance_info.payload:
|
8234 |
current_mem = int(instance_info.payload['memory']) |
8235 |
else:
|
8236 |
# Assume instance not running
|
8237 |
# (there is a slight race condition here, but it's not very probable,
|
8238 |
# and we have no other way to check)
|
8239 |
current_mem = 0
|
8240 |
miss_mem = (be_new[constants.BE_MEMORY] - current_mem - |
8241 |
pninfo.payload['memory_free'])
|
8242 |
if miss_mem > 0: |
8243 |
raise errors.OpPrereqError("This change will prevent the instance" |
8244 |
" from starting, due to %d MB of memory"
|
8245 |
" missing on its primary node" % miss_mem,
|
8246 |
errors.ECODE_NORES) |
8247 |
|
8248 |
if be_new[constants.BE_AUTO_BALANCE]:
|
8249 |
for node, nres in nodeinfo.items(): |
8250 |
if node not in instance.secondary_nodes: |
8251 |
continue
|
8252 |
msg = nres.fail_msg |
8253 |
if msg:
|
8254 |
self.warn.append("Can't get info from secondary node %s: %s" % |
8255 |
(node, msg)) |
8256 |
elif not isinstance(nres.payload.get('memory_free', None), int): |
8257 |
self.warn.append("Secondary node %s didn't return free" |
8258 |
" memory information" % node)
|
8259 |
elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']: |
8260 |
self.warn.append("Not enough memory to failover instance to" |
8261 |
" secondary node %s" % node)
|
8262 |
|
8263 |
# NIC processing
|
8264 |
self.nic_pnew = {}
|
8265 |
self.nic_pinst = {}
|
8266 |
for nic_op, nic_dict in self.op.nics: |
8267 |
if nic_op == constants.DDM_REMOVE:
|
8268 |
if not instance.nics: |
8269 |
raise errors.OpPrereqError("Instance has no NICs, cannot remove", |
8270 |
errors.ECODE_INVAL) |
8271 |
continue
|
8272 |
if nic_op != constants.DDM_ADD:
|
8273 |
# an existing nic
|
8274 |
if not instance.nics: |
8275 |
raise errors.OpPrereqError("Invalid NIC index %s, instance has" |
8276 |
" no NICs" % nic_op,
|
8277 |
errors.ECODE_INVAL) |
8278 |
if nic_op < 0 or nic_op >= len(instance.nics): |
8279 |
raise errors.OpPrereqError("Invalid NIC index %s, valid values" |
8280 |
" are 0 to %d" %
|
8281 |
(nic_op, len(instance.nics) - 1), |
8282 |
errors.ECODE_INVAL) |
8283 |
old_nic_params = instance.nics[nic_op].nicparams |
8284 |
old_nic_ip = instance.nics[nic_op].ip |
8285 |
else:
|
8286 |
old_nic_params = {} |
8287 |
old_nic_ip = None
|
8288 |
|
8289 |
update_params_dict = dict([(key, nic_dict[key])
|
8290 |
for key in constants.NICS_PARAMETERS |
8291 |
if key in nic_dict]) |
8292 |
|
8293 |
if 'bridge' in nic_dict: |
8294 |
update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
|
8295 |
|
8296 |
new_nic_params, new_filled_nic_params = \ |
8297 |
self._GetUpdatedParams(old_nic_params, update_params_dict,
|
8298 |
cluster.nicparams[constants.PP_DEFAULT], |
8299 |
constants.NICS_PARAMETER_TYPES) |
8300 |
objects.NIC.CheckParameterSyntax(new_filled_nic_params) |
8301 |
self.nic_pinst[nic_op] = new_nic_params
|
8302 |
self.nic_pnew[nic_op] = new_filled_nic_params
|
8303 |
new_nic_mode = new_filled_nic_params[constants.NIC_MODE] |
8304 |
|
8305 |
if new_nic_mode == constants.NIC_MODE_BRIDGED:
|
8306 |
nic_bridge = new_filled_nic_params[constants.NIC_LINK] |
8307 |
msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
|
8308 |
if msg:
|
8309 |
msg = "Error checking bridges on node %s: %s" % (pnode, msg)
|
8310 |
if self.force: |
8311 |
self.warn.append(msg)
|
8312 |
else:
|
8313 |
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
|
8314 |
if new_nic_mode == constants.NIC_MODE_ROUTED:
|
8315 |
if 'ip' in nic_dict: |
8316 |
nic_ip = nic_dict['ip']
|
8317 |
else:
|
8318 |
nic_ip = old_nic_ip |
8319 |
if nic_ip is None: |
8320 |
raise errors.OpPrereqError('Cannot set the nic ip to None' |
8321 |
' on a routed nic', errors.ECODE_INVAL)
|
8322 |
if 'mac' in nic_dict: |
8323 |
nic_mac = nic_dict['mac']
|
8324 |
if nic_mac is None: |
8325 |
raise errors.OpPrereqError('Cannot set the nic mac to None', |
8326 |
errors.ECODE_INVAL) |
8327 |
elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
8328 |
# otherwise generate the mac
|
8329 |
nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId()) |
8330 |
else:
|
8331 |
# or validate/reserve the current one
|
8332 |
try:
|
8333 |
self.cfg.ReserveMAC(nic_mac, self.proc.GetECId()) |
8334 |
except errors.ReservationError:
|
8335 |
raise errors.OpPrereqError("MAC address %s already in use" |
8336 |
" in cluster" % nic_mac,
|
8337 |
errors.ECODE_NOTUNIQUE) |
8338 |
|
8339 |
# DISK processing
|
8340 |
if self.op.disks and instance.disk_template == constants.DT_DISKLESS: |
8341 |
raise errors.OpPrereqError("Disk operations not supported for" |
8342 |
" diskless instances",
|
8343 |
errors.ECODE_INVAL) |
8344 |
for disk_op, _ in self.op.disks: |
8345 |
if disk_op == constants.DDM_REMOVE:
|
8346 |
if len(instance.disks) == 1: |
8347 |
raise errors.OpPrereqError("Cannot remove the last disk of" |
8348 |
" an instance", errors.ECODE_INVAL)
|
8349 |
_CheckInstanceDown(self, instance, "cannot remove disks") |
8350 |
|
8351 |
if (disk_op == constants.DDM_ADD and |
8352 |
len(instance.nics) >= constants.MAX_DISKS):
|
8353 |
raise errors.OpPrereqError("Instance has too many disks (%d), cannot" |
8354 |
" add more" % constants.MAX_DISKS,
|
8355 |
errors.ECODE_STATE) |
8356 |
if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE): |
8357 |
# an existing disk
|
8358 |
if disk_op < 0 or disk_op >= len(instance.disks): |
8359 |
raise errors.OpPrereqError("Invalid disk index %s, valid values" |
8360 |
" are 0 to %d" %
|
8361 |
(disk_op, len(instance.disks)),
|
8362 |
errors.ECODE_INVAL) |
8363 |
|
8364 |
# OS change
|
8365 |
if self.op.os_name and not self.op.force: |
8366 |
_CheckNodeHasOS(self, instance.primary_node, self.op.os_name, |
8367 |
self.op.force_variant)
|
8368 |
|
8369 |
return
|
8370 |
|
8371 |
def _ConvertPlainToDrbd(self, feedback_fn): |
8372 |
"""Converts an instance from plain to drbd.
|
8373 |
|
8374 |
"""
|
8375 |
feedback_fn("Converting template to drbd")
|
8376 |
instance = self.instance
|
8377 |
pnode = instance.primary_node |
8378 |
snode = self.op.remote_node
|
8379 |
|
8380 |
# create a fake disk info for _GenerateDiskTemplate
|
8381 |
disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks] |
8382 |
new_disks = _GenerateDiskTemplate(self, self.op.disk_template, |
8383 |
instance.name, pnode, [snode], |
8384 |
disk_info, None, None, 0) |
8385 |
info = _GetInstanceInfoText(instance) |
8386 |
feedback_fn("Creating aditional volumes...")
|
8387 |
# first, create the missing data and meta devices
|
8388 |
for disk in new_disks: |
8389 |
# unfortunately this is... not too nice
|
8390 |
_CreateSingleBlockDev(self, pnode, instance, disk.children[1], |
8391 |
info, True)
|
8392 |
for child in disk.children: |
8393 |
_CreateSingleBlockDev(self, snode, instance, child, info, True) |
8394 |
# at this stage, all new LVs have been created, we can rename the
|
8395 |
# old ones
|
8396 |
feedback_fn("Renaming original volumes...")
|
8397 |
rename_list = [(o, n.children[0].logical_id)
|
8398 |
for (o, n) in zip(instance.disks, new_disks)] |
8399 |
result = self.rpc.call_blockdev_rename(pnode, rename_list)
|
8400 |
result.Raise("Failed to rename original LVs")
|
8401 |
|
8402 |
feedback_fn("Initializing DRBD devices...")
|
8403 |
# all child devices are in place, we can now create the DRBD devices
|
8404 |
for disk in new_disks: |
8405 |
for node in [pnode, snode]: |
8406 |
f_create = node == pnode |
8407 |
_CreateSingleBlockDev(self, node, instance, disk, info, f_create)
|
8408 |
|
8409 |
# at this point, the instance has been modified
|
8410 |
instance.disk_template = constants.DT_DRBD8 |
8411 |
instance.disks = new_disks |
8412 |
self.cfg.Update(instance, feedback_fn)
|
8413 |
|
8414 |
# disks are created, waiting for sync
|
8415 |
disk_abort = not _WaitForSync(self, instance) |
8416 |
if disk_abort:
|
8417 |
raise errors.OpExecError("There are some degraded disks for" |
8418 |
" this instance, please cleanup manually")
|
8419 |
|
8420 |
def _ConvertDrbdToPlain(self, feedback_fn): |
8421 |
"""Converts an instance from drbd to plain.
|
8422 |
|
8423 |
"""
|
8424 |
instance = self.instance
|
8425 |
assert len(instance.secondary_nodes) == 1 |
8426 |
pnode = instance.primary_node |
8427 |
snode = instance.secondary_nodes[0]
|
8428 |
feedback_fn("Converting template to plain")
|
8429 |
|
8430 |
old_disks = instance.disks |
8431 |
new_disks = [d.children[0] for d in old_disks] |
8432 |
|
8433 |
# copy over size and mode
|
8434 |
for parent, child in zip(old_disks, new_disks): |
8435 |
child.size = parent.size |
8436 |
child.mode = parent.mode |
8437 |
|
8438 |
# update instance structure
|
8439 |
instance.disks = new_disks |
8440 |
instance.disk_template = constants.DT_PLAIN |
8441 |
self.cfg.Update(instance, feedback_fn)
|
8442 |
|
8443 |
feedback_fn("Removing volumes on the secondary node...")
|
8444 |
for disk in old_disks: |
8445 |
self.cfg.SetDiskID(disk, snode)
|
8446 |
msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
|
8447 |
if msg:
|
8448 |
self.LogWarning("Could not remove block device %s on node %s," |
8449 |
" continuing anyway: %s", disk.iv_name, snode, msg)
|
8450 |
|
8451 |
feedback_fn("Removing unneeded volumes on the primary node...")
|
8452 |
for idx, disk in enumerate(old_disks): |
8453 |
meta = disk.children[1]
|
8454 |
self.cfg.SetDiskID(meta, pnode)
|
8455 |
msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
|
8456 |
if msg:
|
8457 |
self.LogWarning("Could not remove metadata for disk %d on node %s," |
8458 |
" continuing anyway: %s", idx, pnode, msg)
|
8459 |
|
8460 |
|
8461 |
def Exec(self, feedback_fn): |
8462 |
"""Modifies an instance.
|
8463 |
|
8464 |
All parameters take effect only at the next restart of the instance.
|
8465 |
|
8466 |
"""
|
8467 |
# Process here the warnings from CheckPrereq, as we don't have a
|
8468 |
# feedback_fn there.
|
8469 |
for warn in self.warn: |
8470 |
feedback_fn("WARNING: %s" % warn)
|
8471 |
|
8472 |
result = [] |
8473 |
instance = self.instance
|
8474 |
# disk changes
|
8475 |
for disk_op, disk_dict in self.op.disks: |
8476 |
if disk_op == constants.DDM_REMOVE:
|
8477 |
# remove the last disk
|
8478 |
device = instance.disks.pop() |
8479 |
device_idx = len(instance.disks)
|
8480 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
8481 |
self.cfg.SetDiskID(disk, node)
|
8482 |
msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
|
8483 |
if msg:
|
8484 |
self.LogWarning("Could not remove disk/%d on node %s: %s," |
8485 |
" continuing anyway", device_idx, node, msg)
|
8486 |
result.append(("disk/%d" % device_idx, "remove")) |
8487 |
elif disk_op == constants.DDM_ADD:
|
8488 |
# add a new disk
|
8489 |
if instance.disk_template == constants.DT_FILE:
|
8490 |
file_driver, file_path = instance.disks[0].logical_id
|
8491 |
file_path = os.path.dirname(file_path) |
8492 |
else:
|
8493 |
file_driver = file_path = None
|
8494 |
disk_idx_base = len(instance.disks)
|
8495 |
new_disk = _GenerateDiskTemplate(self,
|
8496 |
instance.disk_template, |
8497 |
instance.name, instance.primary_node, |
8498 |
instance.secondary_nodes, |
8499 |
[disk_dict], |
8500 |
file_path, |
8501 |
file_driver, |
8502 |
disk_idx_base)[0]
|
8503 |
instance.disks.append(new_disk) |
8504 |
info = _GetInstanceInfoText(instance) |
8505 |
|
8506 |
logging.info("Creating volume %s for instance %s",
|
8507 |
new_disk.iv_name, instance.name) |
8508 |
# Note: this needs to be kept in sync with _CreateDisks
|
8509 |
#HARDCODE
|
8510 |
for node in instance.all_nodes: |
8511 |
f_create = node == instance.primary_node |
8512 |
try:
|
8513 |
_CreateBlockDev(self, node, instance, new_disk,
|
8514 |
f_create, info, f_create) |
8515 |
except errors.OpExecError, err:
|
8516 |
self.LogWarning("Failed to create volume %s (%s) on" |
8517 |
" node %s: %s",
|
8518 |
new_disk.iv_name, new_disk, node, err) |
8519 |
result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" % |
8520 |
(new_disk.size, new_disk.mode))) |
8521 |
else:
|
8522 |
# change a given disk
|
8523 |
instance.disks[disk_op].mode = disk_dict['mode']
|
8524 |
result.append(("disk.mode/%d" % disk_op, disk_dict['mode'])) |
8525 |
|
8526 |
if self.op.disk_template: |
8527 |
r_shut = _ShutdownInstanceDisks(self, instance)
|
8528 |
if not r_shut: |
8529 |
raise errors.OpExecError("Cannot shutdow instance disks, unable to" |
8530 |
" proceed with disk template conversion")
|
8531 |
mode = (instance.disk_template, self.op.disk_template)
|
8532 |
try:
|
8533 |
self._DISK_CONVERSIONS[mode](self, feedback_fn) |
8534 |
except:
|
8535 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
8536 |
raise
|
8537 |
result.append(("disk_template", self.op.disk_template)) |
8538 |
|
8539 |
# NIC changes
|
8540 |
for nic_op, nic_dict in self.op.nics: |
8541 |
if nic_op == constants.DDM_REMOVE:
|
8542 |
# remove the last nic
|
8543 |
del instance.nics[-1] |
8544 |
result.append(("nic.%d" % len(instance.nics), "remove")) |
8545 |
elif nic_op == constants.DDM_ADD:
|
8546 |
# mac and bridge should be set, by now
|
8547 |
mac = nic_dict['mac']
|
8548 |
ip = nic_dict.get('ip', None) |
8549 |
nicparams = self.nic_pinst[constants.DDM_ADD]
|
8550 |
new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams) |
8551 |
instance.nics.append(new_nic) |
8552 |
result.append(("nic.%d" % (len(instance.nics) - 1), |
8553 |
"add:mac=%s,ip=%s,mode=%s,link=%s" %
|
8554 |
(new_nic.mac, new_nic.ip, |
8555 |
self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
|
8556 |
self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
|
8557 |
))) |
8558 |
else:
|
8559 |
for key in 'mac', 'ip': |
8560 |
if key in nic_dict: |
8561 |
setattr(instance.nics[nic_op], key, nic_dict[key])
|
8562 |
if nic_op in self.nic_pinst: |
8563 |
instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
|
8564 |
for key, val in nic_dict.iteritems(): |
8565 |
result.append(("nic.%s/%d" % (key, nic_op), val))
|
8566 |
|
8567 |
# hvparams changes
|
8568 |
if self.op.hvparams: |
8569 |
instance.hvparams = self.hv_inst
|
8570 |
for key, val in self.op.hvparams.iteritems(): |
8571 |
result.append(("hv/%s" % key, val))
|
8572 |
|
8573 |
# beparams changes
|
8574 |
if self.op.beparams: |
8575 |
instance.beparams = self.be_inst
|
8576 |
for key, val in self.op.beparams.iteritems(): |
8577 |
result.append(("be/%s" % key, val))
|
8578 |
|
8579 |
# OS change
|
8580 |
if self.op.os_name: |
8581 |
instance.os = self.op.os_name
|
8582 |
|
8583 |
self.cfg.Update(instance, feedback_fn)
|
8584 |
|
8585 |
return result
|
8586 |
|
8587 |
_DISK_CONVERSIONS = { |
8588 |
(constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, |
8589 |
(constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, |
8590 |
} |
8591 |
|
8592 |
class LUQueryExports(NoHooksLU): |
8593 |
"""Query the exports list
|
8594 |
|
8595 |
"""
|
8596 |
_OP_REQP = ['nodes']
|
8597 |
REQ_BGL = False
|
8598 |
|
8599 |
def ExpandNames(self): |
8600 |
self.needed_locks = {}
|
8601 |
self.share_locks[locking.LEVEL_NODE] = 1 |
8602 |
if not self.op.nodes: |
8603 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
8604 |
else:
|
8605 |
self.needed_locks[locking.LEVEL_NODE] = \
|
8606 |
_GetWantedNodes(self, self.op.nodes) |
8607 |
|
8608 |
def CheckPrereq(self): |
8609 |
"""Check prerequisites.
|
8610 |
|
8611 |
"""
|
8612 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
8613 |
|
8614 |
def Exec(self, feedback_fn): |
8615 |
"""Compute the list of all the exported system images.
|
8616 |
|
8617 |
@rtype: dict
|
8618 |
@return: a dictionary with the structure node->(export-list)
|
8619 |
where export-list is a list of the instances exported on
|
8620 |
that node.
|
8621 |
|
8622 |
"""
|
8623 |
rpcresult = self.rpc.call_export_list(self.nodes) |
8624 |
result = {} |
8625 |
for node in rpcresult: |
8626 |
if rpcresult[node].fail_msg:
|
8627 |
result[node] = False
|
8628 |
else:
|
8629 |
result[node] = rpcresult[node].payload |
8630 |
|
8631 |
return result
|
8632 |
|
8633 |
|
8634 |
class LUExportInstance(LogicalUnit): |
8635 |
"""Export an instance to an image in the cluster.
|
8636 |
|
8637 |
"""
|
8638 |
HPATH = "instance-export"
|
8639 |
HTYPE = constants.HTYPE_INSTANCE |
8640 |
_OP_REQP = ["instance_name", "target_node", "shutdown"] |
8641 |
REQ_BGL = False
|
8642 |
|
8643 |
def CheckArguments(self): |
8644 |
"""Check the arguments.
|
8645 |
|
8646 |
"""
|
8647 |
self.shutdown_timeout = getattr(self.op, "shutdown_timeout", |
8648 |
constants.DEFAULT_SHUTDOWN_TIMEOUT) |
8649 |
|
8650 |
def ExpandNames(self): |
8651 |
self._ExpandAndLockInstance()
|
8652 |
# FIXME: lock only instance primary and destination node
|
8653 |
#
|
8654 |
# Sad but true, for now we have do lock all nodes, as we don't know where
|
8655 |
# the previous export might be, and and in this LU we search for it and
|
8656 |
# remove it from its current node. In the future we could fix this by:
|
8657 |
# - making a tasklet to search (share-lock all), then create the new one,
|
8658 |
# then one to remove, after
|
8659 |
# - removing the removal operation altogether
|
8660 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
8661 |
|
8662 |
def DeclareLocks(self, level): |
8663 |
"""Last minute lock declaration."""
|
8664 |
# All nodes are locked anyway, so nothing to do here.
|
8665 |
|
8666 |
def BuildHooksEnv(self): |
8667 |
"""Build hooks env.
|
8668 |
|
8669 |
This will run on the master, primary node and target node.
|
8670 |
|
8671 |
"""
|
8672 |
env = { |
8673 |
"EXPORT_NODE": self.op.target_node, |
8674 |
"EXPORT_DO_SHUTDOWN": self.op.shutdown, |
8675 |
"SHUTDOWN_TIMEOUT": self.shutdown_timeout, |
8676 |
} |
8677 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
8678 |
nl = [self.cfg.GetMasterNode(), self.instance.primary_node, |
8679 |
self.op.target_node]
|
8680 |
return env, nl, nl
|
8681 |
|
8682 |
def CheckPrereq(self): |
8683 |
"""Check prerequisites.
|
8684 |
|
8685 |
This checks that the instance and node names are valid.
|
8686 |
|
8687 |
"""
|
8688 |
instance_name = self.op.instance_name
|
8689 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
8690 |
assert self.instance is not None, \ |
8691 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
8692 |
_CheckNodeOnline(self, self.instance.primary_node) |
8693 |
|
8694 |
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
8695 |
self.dst_node = self.cfg.GetNodeInfo(self.op.target_node) |
8696 |
assert self.dst_node is not None |
8697 |
|
8698 |
_CheckNodeOnline(self, self.dst_node.name) |
8699 |
_CheckNodeNotDrained(self, self.dst_node.name) |
8700 |
|
8701 |
# instance disk type verification
|
8702 |
for disk in self.instance.disks: |
8703 |
if disk.dev_type == constants.LD_FILE:
|
8704 |
raise errors.OpPrereqError("Export not supported for instances with" |
8705 |
" file-based disks", errors.ECODE_INVAL)
|
8706 |
|
8707 |
def Exec(self, feedback_fn): |
8708 |
"""Export an instance to an image in the cluster.
|
8709 |
|
8710 |
"""
|
8711 |
instance = self.instance
|
8712 |
dst_node = self.dst_node
|
8713 |
src_node = instance.primary_node |
8714 |
|
8715 |
if self.op.shutdown: |
8716 |
# shutdown the instance, but not the disks
|
8717 |
feedback_fn("Shutting down instance %s" % instance.name)
|
8718 |
result = self.rpc.call_instance_shutdown(src_node, instance,
|
8719 |
self.shutdown_timeout)
|
8720 |
result.Raise("Could not shutdown instance %s on"
|
8721 |
" node %s" % (instance.name, src_node))
|
8722 |
|
8723 |
vgname = self.cfg.GetVGName()
|
8724 |
|
8725 |
snap_disks = [] |
8726 |
|
8727 |
# set the disks ID correctly since call_instance_start needs the
|
8728 |
# correct drbd minor to create the symlinks
|
8729 |
for disk in instance.disks: |
8730 |
self.cfg.SetDiskID(disk, src_node)
|
8731 |
|
8732 |
activate_disks = (not instance.admin_up)
|
8733 |
|
8734 |
if activate_disks:
|
8735 |
# Activate the instance disks if we'exporting a stopped instance
|
8736 |
feedback_fn("Activating disks for %s" % instance.name)
|
8737 |
_StartInstanceDisks(self, instance, None) |
8738 |
|
8739 |
try:
|
8740 |
# per-disk results
|
8741 |
dresults = [] |
8742 |
try:
|
8743 |
for idx, disk in enumerate(instance.disks): |
8744 |
feedback_fn("Creating a snapshot of disk/%s on node %s" %
|
8745 |
(idx, src_node)) |
8746 |
|
8747 |
# result.payload will be a snapshot of an lvm leaf of the one we
|
8748 |
# passed
|
8749 |
result = self.rpc.call_blockdev_snapshot(src_node, disk)
|
8750 |
msg = result.fail_msg |
8751 |
if msg:
|
8752 |
self.LogWarning("Could not snapshot disk/%s on node %s: %s", |
8753 |
idx, src_node, msg) |
8754 |
snap_disks.append(False)
|
8755 |
else:
|
8756 |
disk_id = (vgname, result.payload) |
8757 |
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, |
8758 |
logical_id=disk_id, physical_id=disk_id, |
8759 |
iv_name=disk.iv_name) |
8760 |
snap_disks.append(new_dev) |
8761 |
|
8762 |
finally:
|
8763 |
if self.op.shutdown and instance.admin_up: |
8764 |
feedback_fn("Starting instance %s" % instance.name)
|
8765 |
result = self.rpc.call_instance_start(src_node, instance, None, None) |
8766 |
msg = result.fail_msg |
8767 |
if msg:
|
8768 |
_ShutdownInstanceDisks(self, instance)
|
8769 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
8770 |
|
8771 |
# TODO: check for size
|
8772 |
|
8773 |
cluster_name = self.cfg.GetClusterName()
|
8774 |
for idx, dev in enumerate(snap_disks): |
8775 |
feedback_fn("Exporting snapshot %s from %s to %s" %
|
8776 |
(idx, src_node, dst_node.name)) |
8777 |
if dev:
|
8778 |
# FIXME: pass debug from opcode to backend
|
8779 |
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
|
8780 |
instance, cluster_name, |
8781 |
idx, self.op.debug_level)
|
8782 |
msg = result.fail_msg |
8783 |
if msg:
|
8784 |
self.LogWarning("Could not export disk/%s from node %s to" |
8785 |
" node %s: %s", idx, src_node, dst_node.name, msg)
|
8786 |
dresults.append(False)
|
8787 |
else:
|
8788 |
dresults.append(True)
|
8789 |
msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
|
8790 |
if msg:
|
8791 |
self.LogWarning("Could not remove snapshot for disk/%d from node" |
8792 |
" %s: %s", idx, src_node, msg)
|
8793 |
else:
|
8794 |
dresults.append(False)
|
8795 |
|
8796 |
feedback_fn("Finalizing export on %s" % dst_node.name)
|
8797 |
result = self.rpc.call_finalize_export(dst_node.name, instance,
|
8798 |
snap_disks) |
8799 |
fin_resu = True
|
8800 |
msg = result.fail_msg |
8801 |
if msg:
|
8802 |
self.LogWarning("Could not finalize export for instance %s" |
8803 |
" on node %s: %s", instance.name, dst_node.name, msg)
|
8804 |
fin_resu = False
|
8805 |
|
8806 |
finally:
|
8807 |
if activate_disks:
|
8808 |
feedback_fn("Deactivating disks for %s" % instance.name)
|
8809 |
_ShutdownInstanceDisks(self, instance)
|
8810 |
|
8811 |
nodelist = self.cfg.GetNodeList()
|
8812 |
nodelist.remove(dst_node.name) |
8813 |
|
8814 |
# on one-node clusters nodelist will be empty after the removal
|
8815 |
# if we proceed the backup would be removed because OpQueryExports
|
8816 |
# substitutes an empty list with the full cluster node list.
|
8817 |
iname = instance.name |
8818 |
if nodelist:
|
8819 |
feedback_fn("Removing old exports for instance %s" % iname)
|
8820 |
exportlist = self.rpc.call_export_list(nodelist)
|
8821 |
for node in exportlist: |
8822 |
if exportlist[node].fail_msg:
|
8823 |
continue
|
8824 |
if iname in exportlist[node].payload: |
8825 |
msg = self.rpc.call_export_remove(node, iname).fail_msg
|
8826 |
if msg:
|
8827 |
self.LogWarning("Could not remove older export for instance %s" |
8828 |
" on node %s: %s", iname, node, msg)
|
8829 |
return fin_resu, dresults
|
8830 |
|
8831 |
|
8832 |
class LURemoveExport(NoHooksLU): |
8833 |
"""Remove exports related to the named instance.
|
8834 |
|
8835 |
"""
|
8836 |
_OP_REQP = ["instance_name"]
|
8837 |
REQ_BGL = False
|
8838 |
|
8839 |
def ExpandNames(self): |
8840 |
self.needed_locks = {}
|
8841 |
# We need all nodes to be locked in order for RemoveExport to work, but we
|
8842 |
# don't need to lock the instance itself, as nothing will happen to it (and
|
8843 |
# we can remove exports also for a removed instance)
|
8844 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
8845 |
|
8846 |
def CheckPrereq(self): |
8847 |
"""Check prerequisites.
|
8848 |
"""
|
8849 |
pass
|
8850 |
|
8851 |
def Exec(self, feedback_fn): |
8852 |
"""Remove any export.
|
8853 |
|
8854 |
"""
|
8855 |
instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
8856 |
# If the instance was not found we'll try with the name that was passed in.
|
8857 |
# This will only work if it was an FQDN, though.
|
8858 |
fqdn_warn = False
|
8859 |
if not instance_name: |
8860 |
fqdn_warn = True
|
8861 |
instance_name = self.op.instance_name
|
8862 |
|
8863 |
locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
|
8864 |
exportlist = self.rpc.call_export_list(locked_nodes)
|
8865 |
found = False
|
8866 |
for node in exportlist: |
8867 |
msg = exportlist[node].fail_msg |
8868 |
if msg:
|
8869 |
self.LogWarning("Failed to query node %s (continuing): %s", node, msg) |
8870 |
continue
|
8871 |
if instance_name in exportlist[node].payload: |
8872 |
found = True
|
8873 |
result = self.rpc.call_export_remove(node, instance_name)
|
8874 |
msg = result.fail_msg |
8875 |
if msg:
|
8876 |
logging.error("Could not remove export for instance %s"
|
8877 |
" on node %s: %s", instance_name, node, msg)
|
8878 |
|
8879 |
if fqdn_warn and not found: |
8880 |
feedback_fn("Export not found. If trying to remove an export belonging"
|
8881 |
" to a deleted instance please use its Fully Qualified"
|
8882 |
" Domain Name.")
|
8883 |
|
8884 |
|
8885 |
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223 |
8886 |
"""Generic tags LU.
|
8887 |
|
8888 |
This is an abstract class which is the parent of all the other tags LUs.
|
8889 |
|
8890 |
"""
|
8891 |
|
8892 |
def ExpandNames(self): |
8893 |
self.needed_locks = {}
|
8894 |
if self.op.kind == constants.TAG_NODE: |
8895 |
self.op.name = _ExpandNodeName(self.cfg, self.op.name) |
8896 |
self.needed_locks[locking.LEVEL_NODE] = self.op.name |
8897 |
elif self.op.kind == constants.TAG_INSTANCE: |
8898 |
self.op.name = _ExpandInstanceName(self.cfg, self.op.name) |
8899 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name |
8900 |
|
8901 |
def CheckPrereq(self): |
8902 |
"""Check prerequisites.
|
8903 |
|
8904 |
"""
|
8905 |
if self.op.kind == constants.TAG_CLUSTER: |
8906 |
self.target = self.cfg.GetClusterInfo() |
8907 |
elif self.op.kind == constants.TAG_NODE: |
8908 |
self.target = self.cfg.GetNodeInfo(self.op.name) |
8909 |
elif self.op.kind == constants.TAG_INSTANCE: |
8910 |
self.target = self.cfg.GetInstanceInfo(self.op.name) |
8911 |
else:
|
8912 |
raise errors.OpPrereqError("Wrong tag type requested (%s)" % |
8913 |
str(self.op.kind), errors.ECODE_INVAL) |
8914 |
|
8915 |
|
8916 |
class LUGetTags(TagsLU): |
8917 |
"""Returns the tags of a given object.
|
8918 |
|
8919 |
"""
|
8920 |
_OP_REQP = ["kind", "name"] |
8921 |
REQ_BGL = False
|
8922 |
|
8923 |
def Exec(self, feedback_fn): |
8924 |
"""Returns the tag list.
|
8925 |
|
8926 |
"""
|
8927 |
return list(self.target.GetTags()) |
8928 |
|
8929 |
|
8930 |
class LUSearchTags(NoHooksLU): |
8931 |
"""Searches the tags for a given pattern.
|
8932 |
|
8933 |
"""
|
8934 |
_OP_REQP = ["pattern"]
|
8935 |
REQ_BGL = False
|
8936 |
|
8937 |
def ExpandNames(self): |
8938 |
self.needed_locks = {}
|
8939 |
|
8940 |
def CheckPrereq(self): |
8941 |
"""Check prerequisites.
|
8942 |
|
8943 |
This checks the pattern passed for validity by compiling it.
|
8944 |
|
8945 |
"""
|
8946 |
try:
|
8947 |
self.re = re.compile(self.op.pattern) |
8948 |
except re.error, err:
|
8949 |
raise errors.OpPrereqError("Invalid search pattern '%s': %s" % |
8950 |
(self.op.pattern, err), errors.ECODE_INVAL)
|
8951 |
|
8952 |
def Exec(self, feedback_fn): |
8953 |
"""Returns the tag list.
|
8954 |
|
8955 |
"""
|
8956 |
cfg = self.cfg
|
8957 |
tgts = [("/cluster", cfg.GetClusterInfo())]
|
8958 |
ilist = cfg.GetAllInstancesInfo().values() |
8959 |
tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) |
8960 |
nlist = cfg.GetAllNodesInfo().values() |
8961 |
tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) |
8962 |
results = [] |
8963 |
for path, target in tgts: |
8964 |
for tag in target.GetTags(): |
8965 |
if self.re.search(tag): |
8966 |
results.append((path, tag)) |
8967 |
return results
|
8968 |
|
8969 |
|
8970 |
class LUAddTags(TagsLU): |
8971 |
"""Sets a tag on a given object.
|
8972 |
|
8973 |
"""
|
8974 |
_OP_REQP = ["kind", "name", "tags"] |
8975 |
REQ_BGL = False
|
8976 |
|
8977 |
def CheckPrereq(self): |
8978 |
"""Check prerequisites.
|
8979 |
|
8980 |
This checks the type and length of the tag name and value.
|
8981 |
|
8982 |
"""
|
8983 |
TagsLU.CheckPrereq(self)
|
8984 |
for tag in self.op.tags: |
8985 |
objects.TaggableObject.ValidateTag(tag) |
8986 |
|
8987 |
def Exec(self, feedback_fn): |
8988 |
"""Sets the tag.
|
8989 |
|
8990 |
"""
|
8991 |
try:
|
8992 |
for tag in self.op.tags: |
8993 |
self.target.AddTag(tag)
|
8994 |
except errors.TagError, err:
|
8995 |
raise errors.OpExecError("Error while setting tag: %s" % str(err)) |
8996 |
self.cfg.Update(self.target, feedback_fn) |
8997 |
|
8998 |
|
8999 |
class LUDelTags(TagsLU): |
9000 |
"""Delete a list of tags from a given object.
|
9001 |
|
9002 |
"""
|
9003 |
_OP_REQP = ["kind", "name", "tags"] |
9004 |
REQ_BGL = False
|
9005 |
|
9006 |
def CheckPrereq(self): |
9007 |
"""Check prerequisites.
|
9008 |
|
9009 |
This checks that we have the given tag.
|
9010 |
|
9011 |
"""
|
9012 |
TagsLU.CheckPrereq(self)
|
9013 |
for tag in self.op.tags: |
9014 |
objects.TaggableObject.ValidateTag(tag) |
9015 |
del_tags = frozenset(self.op.tags) |
9016 |
cur_tags = self.target.GetTags()
|
9017 |
if not del_tags <= cur_tags: |
9018 |
diff_tags = del_tags - cur_tags |
9019 |
diff_names = ["'%s'" % tag for tag in diff_tags] |
9020 |
diff_names.sort() |
9021 |
raise errors.OpPrereqError("Tag(s) %s not found" % |
9022 |
(",".join(diff_names)), errors.ECODE_NOENT)
|
9023 |
|
9024 |
def Exec(self, feedback_fn): |
9025 |
"""Remove the tag from the object.
|
9026 |
|
9027 |
"""
|
9028 |
for tag in self.op.tags: |
9029 |
self.target.RemoveTag(tag)
|
9030 |
self.cfg.Update(self.target, feedback_fn) |
9031 |
|
9032 |
|
9033 |
class LUTestDelay(NoHooksLU): |
9034 |
"""Sleep for a specified amount of time.
|
9035 |
|
9036 |
This LU sleeps on the master and/or nodes for a specified amount of
|
9037 |
time.
|
9038 |
|
9039 |
"""
|
9040 |
_OP_REQP = ["duration", "on_master", "on_nodes"] |
9041 |
REQ_BGL = False
|
9042 |
|
9043 |
def ExpandNames(self): |
9044 |
"""Expand names and set required locks.
|
9045 |
|
9046 |
This expands the node list, if any.
|
9047 |
|
9048 |
"""
|
9049 |
self.needed_locks = {}
|
9050 |
if self.op.on_nodes: |
9051 |
# _GetWantedNodes can be used here, but is not always appropriate to use
|
9052 |
# this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
|
9053 |
# more information.
|
9054 |
self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) |
9055 |
self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes |
9056 |
|
9057 |
def CheckPrereq(self): |
9058 |
"""Check prerequisites.
|
9059 |
|
9060 |
"""
|
9061 |
|
9062 |
def Exec(self, feedback_fn): |
9063 |
"""Do the actual sleep.
|
9064 |
|
9065 |
"""
|
9066 |
if self.op.on_master: |
9067 |
if not utils.TestDelay(self.op.duration): |
9068 |
raise errors.OpExecError("Error during master delay test") |
9069 |
if self.op.on_nodes: |
9070 |
result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration) |
9071 |
for node, node_result in result.items(): |
9072 |
node_result.Raise("Failure during rpc call to node %s" % node)
|
9073 |
|
9074 |
|
9075 |
class IAllocator(object): |
9076 |
"""IAllocator framework.
|
9077 |
|
9078 |
An IAllocator instance has three sets of attributes:
|
9079 |
- cfg that is needed to query the cluster
|
9080 |
- input data (all members of the _KEYS class attribute are required)
|
9081 |
- four buffer attributes (in|out_data|text), that represent the
|
9082 |
input (to the external script) in text and data structure format,
|
9083 |
and the output from it, again in two formats
|
9084 |
- the result variables from the script (success, info, nodes) for
|
9085 |
easy usage
|
9086 |
|
9087 |
"""
|
9088 |
# pylint: disable-msg=R0902
|
9089 |
# lots of instance attributes
|
9090 |
_ALLO_KEYS = [ |
9091 |
"name", "mem_size", "disks", "disk_template", |
9092 |
"os", "tags", "nics", "vcpus", "hypervisor", |
9093 |
] |
9094 |
_RELO_KEYS = [ |
9095 |
"name", "relocate_from", |
9096 |
] |
9097 |
_EVAC_KEYS = [ |
9098 |
"evac_nodes",
|
9099 |
] |
9100 |
|
9101 |
def __init__(self, cfg, rpc, mode, **kwargs): |
9102 |
self.cfg = cfg
|
9103 |
self.rpc = rpc
|
9104 |
# init buffer variables
|
9105 |
self.in_text = self.out_text = self.in_data = self.out_data = None |
9106 |
# init all input fields so that pylint is happy
|
9107 |
self.mode = mode
|
9108 |
self.mem_size = self.disks = self.disk_template = None |
9109 |
self.os = self.tags = self.nics = self.vcpus = None |
9110 |
self.hypervisor = None |
9111 |
self.relocate_from = None |
9112 |
self.name = None |
9113 |
self.evac_nodes = None |
9114 |
# computed fields
|
9115 |
self.required_nodes = None |
9116 |
# init result fields
|
9117 |
self.success = self.info = self.result = None |
9118 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
9119 |
keyset = self._ALLO_KEYS
|
9120 |
fn = self._AddNewInstance
|
9121 |
elif self.mode == constants.IALLOCATOR_MODE_RELOC: |
9122 |
keyset = self._RELO_KEYS
|
9123 |
fn = self._AddRelocateInstance
|
9124 |
elif self.mode == constants.IALLOCATOR_MODE_MEVAC: |
9125 |
keyset = self._EVAC_KEYS
|
9126 |
fn = self._AddEvacuateNodes
|
9127 |
else:
|
9128 |
raise errors.ProgrammerError("Unknown mode '%s' passed to the" |
9129 |
" IAllocator" % self.mode) |
9130 |
for key in kwargs: |
9131 |
if key not in keyset: |
9132 |
raise errors.ProgrammerError("Invalid input parameter '%s' to" |
9133 |
" IAllocator" % key)
|
9134 |
setattr(self, key, kwargs[key]) |
9135 |
|
9136 |
for key in keyset: |
9137 |
if key not in kwargs: |
9138 |
raise errors.ProgrammerError("Missing input parameter '%s' to" |
9139 |
" IAllocator" % key)
|
9140 |
self._BuildInputData(fn)
|
9141 |
|
9142 |
def _ComputeClusterData(self): |
9143 |
"""Compute the generic allocator input data.
|
9144 |
|
9145 |
This is the data that is independent of the actual operation.
|
9146 |
|
9147 |
"""
|
9148 |
cfg = self.cfg
|
9149 |
cluster_info = cfg.GetClusterInfo() |
9150 |
# cluster data
|
9151 |
data = { |
9152 |
"version": constants.IALLOCATOR_VERSION,
|
9153 |
"cluster_name": cfg.GetClusterName(),
|
9154 |
"cluster_tags": list(cluster_info.GetTags()), |
9155 |
"enabled_hypervisors": list(cluster_info.enabled_hypervisors), |
9156 |
# we don't have job IDs
|
9157 |
} |
9158 |
iinfo = cfg.GetAllInstancesInfo().values() |
9159 |
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo] |
9160 |
|
9161 |
# node data
|
9162 |
node_results = {} |
9163 |
node_list = cfg.GetNodeList() |
9164 |
|
9165 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
9166 |
hypervisor_name = self.hypervisor
|
9167 |
elif self.mode == constants.IALLOCATOR_MODE_RELOC: |
9168 |
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
|
9169 |
elif self.mode == constants.IALLOCATOR_MODE_MEVAC: |
9170 |
hypervisor_name = cluster_info.enabled_hypervisors[0]
|
9171 |
|
9172 |
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
|
9173 |
hypervisor_name) |
9174 |
node_iinfo = \ |
9175 |
self.rpc.call_all_instances_info(node_list,
|
9176 |
cluster_info.enabled_hypervisors) |
9177 |
for nname, nresult in node_data.items(): |
9178 |
# first fill in static (config-based) values
|
9179 |
ninfo = cfg.GetNodeInfo(nname) |
9180 |
pnr = { |
9181 |
"tags": list(ninfo.GetTags()), |
9182 |
"primary_ip": ninfo.primary_ip,
|
9183 |
"secondary_ip": ninfo.secondary_ip,
|
9184 |
"offline": ninfo.offline,
|
9185 |
"drained": ninfo.drained,
|
9186 |
"master_candidate": ninfo.master_candidate,
|
9187 |
} |
9188 |
|
9189 |
if not (ninfo.offline or ninfo.drained): |
9190 |
nresult.Raise("Can't get data for node %s" % nname)
|
9191 |
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
|
9192 |
nname) |
9193 |
remote_info = nresult.payload |
9194 |
|
9195 |
for attr in ['memory_total', 'memory_free', 'memory_dom0', |
9196 |
'vg_size', 'vg_free', 'cpu_total']: |
9197 |
if attr not in remote_info: |
9198 |
raise errors.OpExecError("Node '%s' didn't return attribute" |
9199 |
" '%s'" % (nname, attr))
|
9200 |
if not isinstance(remote_info[attr], int): |
9201 |
raise errors.OpExecError("Node '%s' returned invalid value" |
9202 |
" for '%s': %s" %
|
9203 |
(nname, attr, remote_info[attr])) |
9204 |
# compute memory used by primary instances
|
9205 |
i_p_mem = i_p_up_mem = 0
|
9206 |
for iinfo, beinfo in i_list: |
9207 |
if iinfo.primary_node == nname:
|
9208 |
i_p_mem += beinfo[constants.BE_MEMORY] |
9209 |
if iinfo.name not in node_iinfo[nname].payload: |
9210 |
i_used_mem = 0
|
9211 |
else:
|
9212 |
i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory']) |
9213 |
i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem |
9214 |
remote_info['memory_free'] -= max(0, i_mem_diff) |
9215 |
|
9216 |
if iinfo.admin_up:
|
9217 |
i_p_up_mem += beinfo[constants.BE_MEMORY] |
9218 |
|
9219 |
# compute memory used by instances
|
9220 |
pnr_dyn = { |
9221 |
"total_memory": remote_info['memory_total'], |
9222 |
"reserved_memory": remote_info['memory_dom0'], |
9223 |
"free_memory": remote_info['memory_free'], |
9224 |
"total_disk": remote_info['vg_size'], |
9225 |
"free_disk": remote_info['vg_free'], |
9226 |
"total_cpus": remote_info['cpu_total'], |
9227 |
"i_pri_memory": i_p_mem,
|
9228 |
"i_pri_up_memory": i_p_up_mem,
|
9229 |
} |
9230 |
pnr.update(pnr_dyn) |
9231 |
|
9232 |
node_results[nname] = pnr |
9233 |
data["nodes"] = node_results
|
9234 |
|
9235 |
# instance data
|
9236 |
instance_data = {} |
9237 |
for iinfo, beinfo in i_list: |
9238 |
nic_data = [] |
9239 |
for nic in iinfo.nics: |
9240 |
filled_params = objects.FillDict( |
9241 |
cluster_info.nicparams[constants.PP_DEFAULT], |
9242 |
nic.nicparams) |
9243 |
nic_dict = {"mac": nic.mac,
|
9244 |
"ip": nic.ip,
|
9245 |
"mode": filled_params[constants.NIC_MODE],
|
9246 |
"link": filled_params[constants.NIC_LINK],
|
9247 |
} |
9248 |
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
9249 |
nic_dict["bridge"] = filled_params[constants.NIC_LINK]
|
9250 |
nic_data.append(nic_dict) |
9251 |
pir = { |
9252 |
"tags": list(iinfo.GetTags()), |
9253 |
"admin_up": iinfo.admin_up,
|
9254 |
"vcpus": beinfo[constants.BE_VCPUS],
|
9255 |
"memory": beinfo[constants.BE_MEMORY],
|
9256 |
"os": iinfo.os,
|
9257 |
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), |
9258 |
"nics": nic_data,
|
9259 |
"disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks], |
9260 |
"disk_template": iinfo.disk_template,
|
9261 |
"hypervisor": iinfo.hypervisor,
|
9262 |
} |
9263 |
pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
|
9264 |
pir["disks"])
|
9265 |
instance_data[iinfo.name] = pir |
9266 |
|
9267 |
data["instances"] = instance_data
|
9268 |
|
9269 |
self.in_data = data
|
9270 |
|
9271 |
def _AddNewInstance(self): |
9272 |
"""Add new instance data to allocator structure.
|
9273 |
|
9274 |
This in combination with _AllocatorGetClusterData will create the
|
9275 |
correct structure needed as input for the allocator.
|
9276 |
|
9277 |
The checks for the completeness of the opcode must have already been
|
9278 |
done.
|
9279 |
|
9280 |
"""
|
9281 |
disk_space = _ComputeDiskSize(self.disk_template, self.disks) |
9282 |
|
9283 |
if self.disk_template in constants.DTS_NET_MIRROR: |
9284 |
self.required_nodes = 2 |
9285 |
else:
|
9286 |
self.required_nodes = 1 |
9287 |
request = { |
9288 |
"name": self.name, |
9289 |
"disk_template": self.disk_template, |
9290 |
"tags": self.tags, |
9291 |
"os": self.os, |
9292 |
"vcpus": self.vcpus, |
9293 |
"memory": self.mem_size, |
9294 |
"disks": self.disks, |
9295 |
"disk_space_total": disk_space,
|
9296 |
"nics": self.nics, |
9297 |
"required_nodes": self.required_nodes, |
9298 |
} |
9299 |
return request
|
9300 |
|
9301 |
def _AddRelocateInstance(self): |
9302 |
"""Add relocate instance data to allocator structure.
|
9303 |
|
9304 |
This in combination with _IAllocatorGetClusterData will create the
|
9305 |
correct structure needed as input for the allocator.
|
9306 |
|
9307 |
The checks for the completeness of the opcode must have already been
|
9308 |
done.
|
9309 |
|
9310 |
"""
|
9311 |
instance = self.cfg.GetInstanceInfo(self.name) |
9312 |
if instance is None: |
9313 |
raise errors.ProgrammerError("Unknown instance '%s' passed to" |
9314 |
" IAllocator" % self.name) |
9315 |
|
9316 |
if instance.disk_template not in constants.DTS_NET_MIRROR: |
9317 |
raise errors.OpPrereqError("Can't relocate non-mirrored instances", |
9318 |
errors.ECODE_INVAL) |
9319 |
|
9320 |
if len(instance.secondary_nodes) != 1: |
9321 |
raise errors.OpPrereqError("Instance has not exactly one secondary node", |
9322 |
errors.ECODE_STATE) |
9323 |
|
9324 |
self.required_nodes = 1 |
9325 |
disk_sizes = [{'size': disk.size} for disk in instance.disks] |
9326 |
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes) |
9327 |
|
9328 |
request = { |
9329 |
"name": self.name, |
9330 |
"disk_space_total": disk_space,
|
9331 |
"required_nodes": self.required_nodes, |
9332 |
"relocate_from": self.relocate_from, |
9333 |
} |
9334 |
return request
|
9335 |
|
9336 |
def _AddEvacuateNodes(self): |
9337 |
"""Add evacuate nodes data to allocator structure.
|
9338 |
|
9339 |
"""
|
9340 |
request = { |
9341 |
"evac_nodes": self.evac_nodes |
9342 |
} |
9343 |
return request
|
9344 |
|
9345 |
def _BuildInputData(self, fn): |
9346 |
"""Build input data structures.
|
9347 |
|
9348 |
"""
|
9349 |
self._ComputeClusterData()
|
9350 |
|
9351 |
request = fn() |
9352 |
request["type"] = self.mode |
9353 |
self.in_data["request"] = request |
9354 |
|
9355 |
self.in_text = serializer.Dump(self.in_data) |
9356 |
|
9357 |
def Run(self, name, validate=True, call_fn=None): |
9358 |
"""Run an instance allocator and return the results.
|
9359 |
|
9360 |
"""
|
9361 |
if call_fn is None: |
9362 |
call_fn = self.rpc.call_iallocator_runner
|
9363 |
|
9364 |
result = call_fn(self.cfg.GetMasterNode(), name, self.in_text) |
9365 |
result.Raise("Failure while running the iallocator script")
|
9366 |
|
9367 |
self.out_text = result.payload
|
9368 |
if validate:
|
9369 |
self._ValidateResult()
|
9370 |
|
9371 |
def _ValidateResult(self): |
9372 |
"""Process the allocator results.
|
9373 |
|
9374 |
This will process and if successful save the result in
|
9375 |
self.out_data and the other parameters.
|
9376 |
|
9377 |
"""
|
9378 |
try:
|
9379 |
rdict = serializer.Load(self.out_text)
|
9380 |
except Exception, err: |
9381 |
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) |
9382 |
|
9383 |
if not isinstance(rdict, dict): |
9384 |
raise errors.OpExecError("Can't parse iallocator results: not a dict") |
9385 |
|
9386 |
# TODO: remove backwards compatiblity in later versions
|
9387 |
if "nodes" in rdict and "result" not in rdict: |
9388 |
rdict["result"] = rdict["nodes"] |
9389 |
del rdict["nodes"] |
9390 |
|
9391 |
for key in "success", "info", "result": |
9392 |
if key not in rdict: |
9393 |
raise errors.OpExecError("Can't parse iallocator results:" |
9394 |
" missing key '%s'" % key)
|
9395 |
setattr(self, key, rdict[key]) |
9396 |
|
9397 |
if not isinstance(rdict["result"], list): |
9398 |
raise errors.OpExecError("Can't parse iallocator results: 'result' key" |
9399 |
" is not a list")
|
9400 |
self.out_data = rdict
|
9401 |
|
9402 |
|
9403 |
class LUTestAllocator(NoHooksLU): |
9404 |
"""Run allocator tests.
|
9405 |
|
9406 |
This LU runs the allocator tests
|
9407 |
|
9408 |
"""
|
9409 |
_OP_REQP = ["direction", "mode", "name"] |
9410 |
|
9411 |
def CheckPrereq(self): |
9412 |
"""Check prerequisites.
|
9413 |
|
9414 |
This checks the opcode parameters depending on the director and mode test.
|
9415 |
|
9416 |
"""
|
9417 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
9418 |
for attr in ["name", "mem_size", "disks", "disk_template", |
9419 |
"os", "tags", "nics", "vcpus"]: |
9420 |
if not hasattr(self.op, attr): |
9421 |
raise errors.OpPrereqError("Missing attribute '%s' on opcode input" % |
9422 |
attr, errors.ECODE_INVAL) |
9423 |
iname = self.cfg.ExpandInstanceName(self.op.name) |
9424 |
if iname is not None: |
9425 |
raise errors.OpPrereqError("Instance '%s' already in the cluster" % |
9426 |
iname, errors.ECODE_EXISTS) |
9427 |
if not isinstance(self.op.nics, list): |
9428 |
raise errors.OpPrereqError("Invalid parameter 'nics'", |
9429 |
errors.ECODE_INVAL) |
9430 |
for row in self.op.nics: |
9431 |
if (not isinstance(row, dict) or |
9432 |
"mac" not in row or |
9433 |
"ip" not in row or |
9434 |
"bridge" not in row): |
9435 |
raise errors.OpPrereqError("Invalid contents of the 'nics'" |
9436 |
" parameter", errors.ECODE_INVAL)
|
9437 |
if not isinstance(self.op.disks, list): |
9438 |
raise errors.OpPrereqError("Invalid parameter 'disks'", |
9439 |
errors.ECODE_INVAL) |
9440 |
for row in self.op.disks: |
9441 |
if (not isinstance(row, dict) or |
9442 |
"size" not in row or |
9443 |
not isinstance(row["size"], int) or |
9444 |
"mode" not in row or |
9445 |
row["mode"] not in ['r', 'w']): |
9446 |
raise errors.OpPrereqError("Invalid contents of the 'disks'" |
9447 |
" parameter", errors.ECODE_INVAL)
|
9448 |
if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None: |
9449 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
9450 |
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: |
9451 |
if not hasattr(self.op, "name"): |
9452 |
raise errors.OpPrereqError("Missing attribute 'name' on opcode input", |
9453 |
errors.ECODE_INVAL) |
9454 |
fname = _ExpandInstanceName(self.cfg, self.op.name) |
9455 |
self.op.name = fname
|
9456 |
self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes |
9457 |
elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC: |
9458 |
if not hasattr(self.op, "evac_nodes"): |
9459 |
raise errors.OpPrereqError("Missing attribute 'evac_nodes' on" |
9460 |
" opcode input", errors.ECODE_INVAL)
|
9461 |
else:
|
9462 |
raise errors.OpPrereqError("Invalid test allocator mode '%s'" % |
9463 |
self.op.mode, errors.ECODE_INVAL)
|
9464 |
|
9465 |
if self.op.direction == constants.IALLOCATOR_DIR_OUT: |
9466 |
if not hasattr(self.op, "allocator") or self.op.allocator is None: |
9467 |
raise errors.OpPrereqError("Missing allocator name", |
9468 |
errors.ECODE_INVAL) |
9469 |
elif self.op.direction != constants.IALLOCATOR_DIR_IN: |
9470 |
raise errors.OpPrereqError("Wrong allocator test '%s'" % |
9471 |
self.op.direction, errors.ECODE_INVAL)
|
9472 |
|
9473 |
def Exec(self, feedback_fn): |
9474 |
"""Run the allocator test.
|
9475 |
|
9476 |
"""
|
9477 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
9478 |
ial = IAllocator(self.cfg, self.rpc, |
9479 |
mode=self.op.mode,
|
9480 |
name=self.op.name,
|
9481 |
mem_size=self.op.mem_size,
|
9482 |
disks=self.op.disks,
|
9483 |
disk_template=self.op.disk_template,
|
9484 |
os=self.op.os,
|
9485 |
tags=self.op.tags,
|
9486 |
nics=self.op.nics,
|
9487 |
vcpus=self.op.vcpus,
|
9488 |
hypervisor=self.op.hypervisor,
|
9489 |
) |
9490 |
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: |
9491 |
ial = IAllocator(self.cfg, self.rpc, |
9492 |
mode=self.op.mode,
|
9493 |
name=self.op.name,
|
9494 |
relocate_from=list(self.relocate_from), |
9495 |
) |
9496 |
elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC: |
9497 |
ial = IAllocator(self.cfg, self.rpc, |
9498 |
mode=self.op.mode,
|
9499 |
evac_nodes=self.op.evac_nodes)
|
9500 |
else:
|
9501 |
raise errors.ProgrammerError("Uncatched mode %s in" |
9502 |
" LUTestAllocator.Exec", self.op.mode) |
9503 |
|
9504 |
if self.op.direction == constants.IALLOCATOR_DIR_IN: |
9505 |
result = ial.in_text |
9506 |
else:
|
9507 |
ial.Run(self.op.allocator, validate=False) |
9508 |
result = ial.out_text |
9509 |
return result
|