root / lib / cmdlib.py @ d6f8db24
History | View | Annotate | Download (480.9 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable=W0201,C0302
|
25 |
|
26 |
# W0201 since most LU attributes are defined in CheckPrereq or similar
|
27 |
# functions
|
28 |
|
29 |
# C0302: since we have waaaay too many lines in this module
|
30 |
|
31 |
import os |
32 |
import os.path |
33 |
import time |
34 |
import re |
35 |
import platform |
36 |
import logging |
37 |
import copy |
38 |
import OpenSSL |
39 |
import socket |
40 |
import tempfile |
41 |
import shutil |
42 |
import itertools |
43 |
import operator |
44 |
|
45 |
from ganeti import ssh |
46 |
from ganeti import utils |
47 |
from ganeti import errors |
48 |
from ganeti import hypervisor |
49 |
from ganeti import locking |
50 |
from ganeti import constants |
51 |
from ganeti import objects |
52 |
from ganeti import serializer |
53 |
from ganeti import ssconf |
54 |
from ganeti import uidpool |
55 |
from ganeti import compat |
56 |
from ganeti import masterd |
57 |
from ganeti import netutils |
58 |
from ganeti import query |
59 |
from ganeti import qlang |
60 |
from ganeti import opcodes |
61 |
from ganeti import ht |
62 |
|
63 |
import ganeti.masterd.instance # pylint: disable=W0611 |
64 |
|
65 |
|
66 |
class ResultWithJobs: |
67 |
"""Data container for LU results with jobs.
|
68 |
|
69 |
Instances of this class returned from L{LogicalUnit.Exec} will be recognized
|
70 |
by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
|
71 |
contained in the C{jobs} attribute and include the job IDs in the opcode
|
72 |
result.
|
73 |
|
74 |
"""
|
75 |
def __init__(self, jobs, **kwargs): |
76 |
"""Initializes this class.
|
77 |
|
78 |
Additional return values can be specified as keyword arguments.
|
79 |
|
80 |
@type jobs: list of lists of L{opcode.OpCode}
|
81 |
@param jobs: A list of lists of opcode objects
|
82 |
|
83 |
"""
|
84 |
self.jobs = jobs
|
85 |
self.other = kwargs
|
86 |
|
87 |
|
88 |
class LogicalUnit(object): |
89 |
"""Logical Unit base class.
|
90 |
|
91 |
Subclasses must follow these rules:
|
92 |
- implement ExpandNames
|
93 |
- implement CheckPrereq (except when tasklets are used)
|
94 |
- implement Exec (except when tasklets are used)
|
95 |
- implement BuildHooksEnv
|
96 |
- implement BuildHooksNodes
|
97 |
- redefine HPATH and HTYPE
|
98 |
- optionally redefine their run requirements:
|
99 |
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
100 |
|
101 |
Note that all commands require root permissions.
|
102 |
|
103 |
@ivar dry_run_result: the value (if any) that will be returned to the caller
|
104 |
in dry-run mode (signalled by opcode dry_run parameter)
|
105 |
|
106 |
"""
|
107 |
HPATH = None
|
108 |
HTYPE = None
|
109 |
REQ_BGL = True
|
110 |
|
111 |
def __init__(self, processor, op, context, rpc): |
112 |
"""Constructor for LogicalUnit.
|
113 |
|
114 |
This needs to be overridden in derived classes in order to check op
|
115 |
validity.
|
116 |
|
117 |
"""
|
118 |
self.proc = processor
|
119 |
self.op = op
|
120 |
self.cfg = context.cfg
|
121 |
self.glm = context.glm
|
122 |
# readability alias
|
123 |
self.owned_locks = context.glm.list_owned
|
124 |
self.context = context
|
125 |
self.rpc = rpc
|
126 |
# Dicts used to declare locking needs to mcpu
|
127 |
self.needed_locks = None |
128 |
self.share_locks = dict.fromkeys(locking.LEVELS, 0) |
129 |
self.add_locks = {}
|
130 |
self.remove_locks = {}
|
131 |
# Used to force good behavior when calling helper functions
|
132 |
self.recalculate_locks = {}
|
133 |
# logging
|
134 |
self.Log = processor.Log # pylint: disable=C0103 |
135 |
self.LogWarning = processor.LogWarning # pylint: disable=C0103 |
136 |
self.LogInfo = processor.LogInfo # pylint: disable=C0103 |
137 |
self.LogStep = processor.LogStep # pylint: disable=C0103 |
138 |
# support for dry-run
|
139 |
self.dry_run_result = None |
140 |
# support for generic debug attribute
|
141 |
if (not hasattr(self.op, "debug_level") or |
142 |
not isinstance(self.op.debug_level, int)): |
143 |
self.op.debug_level = 0 |
144 |
|
145 |
# Tasklets
|
146 |
self.tasklets = None |
147 |
|
148 |
# Validate opcode parameters and set defaults
|
149 |
self.op.Validate(True) |
150 |
|
151 |
self.CheckArguments()
|
152 |
|
153 |
def CheckArguments(self): |
154 |
"""Check syntactic validity for the opcode arguments.
|
155 |
|
156 |
This method is for doing a simple syntactic check and ensure
|
157 |
validity of opcode parameters, without any cluster-related
|
158 |
checks. While the same can be accomplished in ExpandNames and/or
|
159 |
CheckPrereq, doing these separate is better because:
|
160 |
|
161 |
- ExpandNames is left as as purely a lock-related function
|
162 |
- CheckPrereq is run after we have acquired locks (and possible
|
163 |
waited for them)
|
164 |
|
165 |
The function is allowed to change the self.op attribute so that
|
166 |
later methods can no longer worry about missing parameters.
|
167 |
|
168 |
"""
|
169 |
pass
|
170 |
|
171 |
def ExpandNames(self): |
172 |
"""Expand names for this LU.
|
173 |
|
174 |
This method is called before starting to execute the opcode, and it should
|
175 |
update all the parameters of the opcode to their canonical form (e.g. a
|
176 |
short node name must be fully expanded after this method has successfully
|
177 |
completed). This way locking, hooks, logging, etc. can work correctly.
|
178 |
|
179 |
LUs which implement this method must also populate the self.needed_locks
|
180 |
member, as a dict with lock levels as keys, and a list of needed lock names
|
181 |
as values. Rules:
|
182 |
|
183 |
- use an empty dict if you don't need any lock
|
184 |
- if you don't need any lock at a particular level omit that level
|
185 |
- don't put anything for the BGL level
|
186 |
- if you want all locks at a level use locking.ALL_SET as a value
|
187 |
|
188 |
If you need to share locks (rather than acquire them exclusively) at one
|
189 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
190 |
that level. By default locks are not shared.
|
191 |
|
192 |
This function can also define a list of tasklets, which then will be
|
193 |
executed in order instead of the usual LU-level CheckPrereq and Exec
|
194 |
functions, if those are not defined by the LU.
|
195 |
|
196 |
Examples::
|
197 |
|
198 |
# Acquire all nodes and one instance
|
199 |
self.needed_locks = {
|
200 |
locking.LEVEL_NODE: locking.ALL_SET,
|
201 |
locking.LEVEL_INSTANCE: ['instance1.example.com'],
|
202 |
}
|
203 |
# Acquire just two nodes
|
204 |
self.needed_locks = {
|
205 |
locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
|
206 |
}
|
207 |
# Acquire no locks
|
208 |
self.needed_locks = {} # No, you can't leave it to the default value None
|
209 |
|
210 |
"""
|
211 |
# The implementation of this method is mandatory only if the new LU is
|
212 |
# concurrent, so that old LUs don't need to be changed all at the same
|
213 |
# time.
|
214 |
if self.REQ_BGL: |
215 |
self.needed_locks = {} # Exclusive LUs don't need locks. |
216 |
else:
|
217 |
raise NotImplementedError |
218 |
|
219 |
def DeclareLocks(self, level): |
220 |
"""Declare LU locking needs for a level
|
221 |
|
222 |
While most LUs can just declare their locking needs at ExpandNames time,
|
223 |
sometimes there's the need to calculate some locks after having acquired
|
224 |
the ones before. This function is called just before acquiring locks at a
|
225 |
particular level, but after acquiring the ones at lower levels, and permits
|
226 |
such calculations. It can be used to modify self.needed_locks, and by
|
227 |
default it does nothing.
|
228 |
|
229 |
This function is only called if you have something already set in
|
230 |
self.needed_locks for the level.
|
231 |
|
232 |
@param level: Locking level which is going to be locked
|
233 |
@type level: member of ganeti.locking.LEVELS
|
234 |
|
235 |
"""
|
236 |
|
237 |
def CheckPrereq(self): |
238 |
"""Check prerequisites for this LU.
|
239 |
|
240 |
This method should check that the prerequisites for the execution
|
241 |
of this LU are fulfilled. It can do internode communication, but
|
242 |
it should be idempotent - no cluster or system changes are
|
243 |
allowed.
|
244 |
|
245 |
The method should raise errors.OpPrereqError in case something is
|
246 |
not fulfilled. Its return value is ignored.
|
247 |
|
248 |
This method should also update all the parameters of the opcode to
|
249 |
their canonical form if it hasn't been done by ExpandNames before.
|
250 |
|
251 |
"""
|
252 |
if self.tasklets is not None: |
253 |
for (idx, tl) in enumerate(self.tasklets): |
254 |
logging.debug("Checking prerequisites for tasklet %s/%s",
|
255 |
idx + 1, len(self.tasklets)) |
256 |
tl.CheckPrereq() |
257 |
else:
|
258 |
pass
|
259 |
|
260 |
def Exec(self, feedback_fn): |
261 |
"""Execute the LU.
|
262 |
|
263 |
This method should implement the actual work. It should raise
|
264 |
errors.OpExecError for failures that are somewhat dealt with in
|
265 |
code, or expected.
|
266 |
|
267 |
"""
|
268 |
if self.tasklets is not None: |
269 |
for (idx, tl) in enumerate(self.tasklets): |
270 |
logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets)) |
271 |
tl.Exec(feedback_fn) |
272 |
else:
|
273 |
raise NotImplementedError |
274 |
|
275 |
def BuildHooksEnv(self): |
276 |
"""Build hooks environment for this LU.
|
277 |
|
278 |
@rtype: dict
|
279 |
@return: Dictionary containing the environment that will be used for
|
280 |
running the hooks for this LU. The keys of the dict must not be prefixed
|
281 |
with "GANETI_"--that'll be added by the hooks runner. The hooks runner
|
282 |
will extend the environment with additional variables. If no environment
|
283 |
should be defined, an empty dictionary should be returned (not C{None}).
|
284 |
@note: If the C{HPATH} attribute of the LU class is C{None}, this function
|
285 |
will not be called.
|
286 |
|
287 |
"""
|
288 |
raise NotImplementedError |
289 |
|
290 |
def BuildHooksNodes(self): |
291 |
"""Build list of nodes to run LU's hooks.
|
292 |
|
293 |
@rtype: tuple; (list, list)
|
294 |
@return: Tuple containing a list of node names on which the hook
|
295 |
should run before the execution and a list of node names on which the
|
296 |
hook should run after the execution. No nodes should be returned as an
|
297 |
empty list (and not None).
|
298 |
@note: If the C{HPATH} attribute of the LU class is C{None}, this function
|
299 |
will not be called.
|
300 |
|
301 |
"""
|
302 |
raise NotImplementedError |
303 |
|
304 |
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result): |
305 |
"""Notify the LU about the results of its hooks.
|
306 |
|
307 |
This method is called every time a hooks phase is executed, and notifies
|
308 |
the Logical Unit about the hooks' result. The LU can then use it to alter
|
309 |
its result based on the hooks. By default the method does nothing and the
|
310 |
previous result is passed back unchanged but any LU can define it if it
|
311 |
wants to use the local cluster hook-scripts somehow.
|
312 |
|
313 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
314 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
315 |
@param hook_results: the results of the multi-node hooks rpc call
|
316 |
@param feedback_fn: function used send feedback back to the caller
|
317 |
@param lu_result: the previous Exec result this LU had, or None
|
318 |
in the PRE phase
|
319 |
@return: the new Exec result, based on the previous result
|
320 |
and hook results
|
321 |
|
322 |
"""
|
323 |
# API must be kept, thus we ignore the unused argument and could
|
324 |
# be a function warnings
|
325 |
# pylint: disable=W0613,R0201
|
326 |
return lu_result
|
327 |
|
328 |
def _ExpandAndLockInstance(self): |
329 |
"""Helper function to expand and lock an instance.
|
330 |
|
331 |
Many LUs that work on an instance take its name in self.op.instance_name
|
332 |
and need to expand it and then declare the expanded name for locking. This
|
333 |
function does it, and then updates self.op.instance_name to the expanded
|
334 |
name. It also initializes needed_locks as a dict, if this hasn't been done
|
335 |
before.
|
336 |
|
337 |
"""
|
338 |
if self.needed_locks is None: |
339 |
self.needed_locks = {}
|
340 |
else:
|
341 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
342 |
"_ExpandAndLockInstance called with instance-level locks set"
|
343 |
self.op.instance_name = _ExpandInstanceName(self.cfg, |
344 |
self.op.instance_name)
|
345 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name |
346 |
|
347 |
def _LockInstancesNodes(self, primary_only=False): |
348 |
"""Helper function to declare instances' nodes for locking.
|
349 |
|
350 |
This function should be called after locking one or more instances to lock
|
351 |
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
352 |
with all primary or secondary nodes for instances already locked and
|
353 |
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
354 |
|
355 |
It should be called from DeclareLocks, and for safety only works if
|
356 |
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
357 |
|
358 |
In the future it may grow parameters to just lock some instance's nodes, or
|
359 |
to just lock primaries or secondary nodes, if needed.
|
360 |
|
361 |
If should be called in DeclareLocks in a way similar to::
|
362 |
|
363 |
if level == locking.LEVEL_NODE:
|
364 |
self._LockInstancesNodes()
|
365 |
|
366 |
@type primary_only: boolean
|
367 |
@param primary_only: only lock primary nodes of locked instances
|
368 |
|
369 |
"""
|
370 |
assert locking.LEVEL_NODE in self.recalculate_locks, \ |
371 |
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
372 |
|
373 |
# TODO: check if we're really been called with the instance locks held
|
374 |
|
375 |
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
376 |
# future we might want to have different behaviors depending on the value
|
377 |
# of self.recalculate_locks[locking.LEVEL_NODE]
|
378 |
wanted_nodes = [] |
379 |
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
|
380 |
for _, instance in self.cfg.GetMultiInstanceInfo(locked_i): |
381 |
wanted_nodes.append(instance.primary_node) |
382 |
if not primary_only: |
383 |
wanted_nodes.extend(instance.secondary_nodes) |
384 |
|
385 |
if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: |
386 |
self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
|
387 |
elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: |
388 |
self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
|
389 |
|
390 |
del self.recalculate_locks[locking.LEVEL_NODE] |
391 |
|
392 |
|
393 |
class NoHooksLU(LogicalUnit): # pylint: disable=W0223 |
394 |
"""Simple LU which runs no hooks.
|
395 |
|
396 |
This LU is intended as a parent for other LogicalUnits which will
|
397 |
run no hooks, in order to reduce duplicate code.
|
398 |
|
399 |
"""
|
400 |
HPATH = None
|
401 |
HTYPE = None
|
402 |
|
403 |
def BuildHooksEnv(self): |
404 |
"""Empty BuildHooksEnv for NoHooksLu.
|
405 |
|
406 |
This just raises an error.
|
407 |
|
408 |
"""
|
409 |
raise AssertionError("BuildHooksEnv called for NoHooksLUs") |
410 |
|
411 |
def BuildHooksNodes(self): |
412 |
"""Empty BuildHooksNodes for NoHooksLU.
|
413 |
|
414 |
"""
|
415 |
raise AssertionError("BuildHooksNodes called for NoHooksLU") |
416 |
|
417 |
|
418 |
class Tasklet: |
419 |
"""Tasklet base class.
|
420 |
|
421 |
Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
|
422 |
they can mix legacy code with tasklets. Locking needs to be done in the LU,
|
423 |
tasklets know nothing about locks.
|
424 |
|
425 |
Subclasses must follow these rules:
|
426 |
- Implement CheckPrereq
|
427 |
- Implement Exec
|
428 |
|
429 |
"""
|
430 |
def __init__(self, lu): |
431 |
self.lu = lu
|
432 |
|
433 |
# Shortcuts
|
434 |
self.cfg = lu.cfg
|
435 |
self.rpc = lu.rpc
|
436 |
|
437 |
def CheckPrereq(self): |
438 |
"""Check prerequisites for this tasklets.
|
439 |
|
440 |
This method should check whether the prerequisites for the execution of
|
441 |
this tasklet are fulfilled. It can do internode communication, but it
|
442 |
should be idempotent - no cluster or system changes are allowed.
|
443 |
|
444 |
The method should raise errors.OpPrereqError in case something is not
|
445 |
fulfilled. Its return value is ignored.
|
446 |
|
447 |
This method should also update all parameters to their canonical form if it
|
448 |
hasn't been done before.
|
449 |
|
450 |
"""
|
451 |
pass
|
452 |
|
453 |
def Exec(self, feedback_fn): |
454 |
"""Execute the tasklet.
|
455 |
|
456 |
This method should implement the actual work. It should raise
|
457 |
errors.OpExecError for failures that are somewhat dealt with in code, or
|
458 |
expected.
|
459 |
|
460 |
"""
|
461 |
raise NotImplementedError |
462 |
|
463 |
|
464 |
class _QueryBase: |
465 |
"""Base for query utility classes.
|
466 |
|
467 |
"""
|
468 |
#: Attribute holding field definitions
|
469 |
FIELDS = None
|
470 |
|
471 |
def __init__(self, qfilter, fields, use_locking): |
472 |
"""Initializes this class.
|
473 |
|
474 |
"""
|
475 |
self.use_locking = use_locking
|
476 |
|
477 |
self.query = query.Query(self.FIELDS, fields, qfilter=qfilter, |
478 |
namefield="name")
|
479 |
self.requested_data = self.query.RequestedData() |
480 |
self.names = self.query.RequestedNames() |
481 |
|
482 |
# Sort only if no names were requested
|
483 |
self.sort_by_name = not self.names |
484 |
|
485 |
self.do_locking = None |
486 |
self.wanted = None |
487 |
|
488 |
def _GetNames(self, lu, all_names, lock_level): |
489 |
"""Helper function to determine names asked for in the query.
|
490 |
|
491 |
"""
|
492 |
if self.do_locking: |
493 |
names = lu.owned_locks(lock_level) |
494 |
else:
|
495 |
names = all_names |
496 |
|
497 |
if self.wanted == locking.ALL_SET: |
498 |
assert not self.names |
499 |
# caller didn't specify names, so ordering is not important
|
500 |
return utils.NiceSort(names)
|
501 |
|
502 |
# caller specified names and we must keep the same order
|
503 |
assert self.names |
504 |
assert not self.do_locking or lu.glm.is_owned(lock_level) |
505 |
|
506 |
missing = set(self.wanted).difference(names) |
507 |
if missing:
|
508 |
raise errors.OpExecError("Some items were removed before retrieving" |
509 |
" their data: %s" % missing)
|
510 |
|
511 |
# Return expanded names
|
512 |
return self.wanted |
513 |
|
514 |
def ExpandNames(self, lu): |
515 |
"""Expand names for this query.
|
516 |
|
517 |
See L{LogicalUnit.ExpandNames}.
|
518 |
|
519 |
"""
|
520 |
raise NotImplementedError() |
521 |
|
522 |
def DeclareLocks(self, lu, level): |
523 |
"""Declare locks for this query.
|
524 |
|
525 |
See L{LogicalUnit.DeclareLocks}.
|
526 |
|
527 |
"""
|
528 |
raise NotImplementedError() |
529 |
|
530 |
def _GetQueryData(self, lu): |
531 |
"""Collects all data for this query.
|
532 |
|
533 |
@return: Query data object
|
534 |
|
535 |
"""
|
536 |
raise NotImplementedError() |
537 |
|
538 |
def NewStyleQuery(self, lu): |
539 |
"""Collect data and execute query.
|
540 |
|
541 |
"""
|
542 |
return query.GetQueryResponse(self.query, self._GetQueryData(lu), |
543 |
sort_by_name=self.sort_by_name)
|
544 |
|
545 |
def OldStyleQuery(self, lu): |
546 |
"""Collect data and execute query.
|
547 |
|
548 |
"""
|
549 |
return self.query.OldStyleQuery(self._GetQueryData(lu), |
550 |
sort_by_name=self.sort_by_name)
|
551 |
|
552 |
|
553 |
def _ShareAll(): |
554 |
"""Returns a dict declaring all lock levels shared.
|
555 |
|
556 |
"""
|
557 |
return dict.fromkeys(locking.LEVELS, 1) |
558 |
|
559 |
|
560 |
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups): |
561 |
"""Checks if the owned node groups are still correct for an instance.
|
562 |
|
563 |
@type cfg: L{config.ConfigWriter}
|
564 |
@param cfg: The cluster configuration
|
565 |
@type instance_name: string
|
566 |
@param instance_name: Instance name
|
567 |
@type owned_groups: set or frozenset
|
568 |
@param owned_groups: List of currently owned node groups
|
569 |
|
570 |
"""
|
571 |
inst_groups = cfg.GetInstanceNodeGroups(instance_name) |
572 |
|
573 |
if not owned_groups.issuperset(inst_groups): |
574 |
raise errors.OpPrereqError("Instance %s's node groups changed since" |
575 |
" locks were acquired, current groups are"
|
576 |
" are '%s', owning groups '%s'; retry the"
|
577 |
" operation" %
|
578 |
(instance_name, |
579 |
utils.CommaJoin(inst_groups), |
580 |
utils.CommaJoin(owned_groups)), |
581 |
errors.ECODE_STATE) |
582 |
|
583 |
return inst_groups
|
584 |
|
585 |
|
586 |
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances): |
587 |
"""Checks if the instances in a node group are still correct.
|
588 |
|
589 |
@type cfg: L{config.ConfigWriter}
|
590 |
@param cfg: The cluster configuration
|
591 |
@type group_uuid: string
|
592 |
@param group_uuid: Node group UUID
|
593 |
@type owned_instances: set or frozenset
|
594 |
@param owned_instances: List of currently owned instances
|
595 |
|
596 |
"""
|
597 |
wanted_instances = cfg.GetNodeGroupInstances(group_uuid) |
598 |
if owned_instances != wanted_instances:
|
599 |
raise errors.OpPrereqError("Instances in node group '%s' changed since" |
600 |
" locks were acquired, wanted '%s', have '%s';"
|
601 |
" retry the operation" %
|
602 |
(group_uuid, |
603 |
utils.CommaJoin(wanted_instances), |
604 |
utils.CommaJoin(owned_instances)), |
605 |
errors.ECODE_STATE) |
606 |
|
607 |
return wanted_instances
|
608 |
|
609 |
|
610 |
def _SupportsOob(cfg, node): |
611 |
"""Tells if node supports OOB.
|
612 |
|
613 |
@type cfg: L{config.ConfigWriter}
|
614 |
@param cfg: The cluster configuration
|
615 |
@type node: L{objects.Node}
|
616 |
@param node: The node
|
617 |
@return: The OOB script if supported or an empty string otherwise
|
618 |
|
619 |
"""
|
620 |
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
|
621 |
|
622 |
|
623 |
def _GetWantedNodes(lu, nodes): |
624 |
"""Returns list of checked and expanded node names.
|
625 |
|
626 |
@type lu: L{LogicalUnit}
|
627 |
@param lu: the logical unit on whose behalf we execute
|
628 |
@type nodes: list
|
629 |
@param nodes: list of node names or None for all nodes
|
630 |
@rtype: list
|
631 |
@return: the list of nodes, sorted
|
632 |
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
633 |
|
634 |
"""
|
635 |
if nodes:
|
636 |
return [_ExpandNodeName(lu.cfg, name) for name in nodes] |
637 |
|
638 |
return utils.NiceSort(lu.cfg.GetNodeList())
|
639 |
|
640 |
|
641 |
def _GetWantedInstances(lu, instances): |
642 |
"""Returns list of checked and expanded instance names.
|
643 |
|
644 |
@type lu: L{LogicalUnit}
|
645 |
@param lu: the logical unit on whose behalf we execute
|
646 |
@type instances: list
|
647 |
@param instances: list of instance names or None for all instances
|
648 |
@rtype: list
|
649 |
@return: the list of instances, sorted
|
650 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
651 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
652 |
|
653 |
"""
|
654 |
if instances:
|
655 |
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances] |
656 |
else:
|
657 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
658 |
return wanted
|
659 |
|
660 |
|
661 |
def _GetUpdatedParams(old_params, update_dict, |
662 |
use_default=True, use_none=False): |
663 |
"""Return the new version of a parameter dictionary.
|
664 |
|
665 |
@type old_params: dict
|
666 |
@param old_params: old parameters
|
667 |
@type update_dict: dict
|
668 |
@param update_dict: dict containing new parameter values, or
|
669 |
constants.VALUE_DEFAULT to reset the parameter to its default
|
670 |
value
|
671 |
@param use_default: boolean
|
672 |
@type use_default: whether to recognise L{constants.VALUE_DEFAULT}
|
673 |
values as 'to be deleted' values
|
674 |
@param use_none: boolean
|
675 |
@type use_none: whether to recognise C{None} values as 'to be
|
676 |
deleted' values
|
677 |
@rtype: dict
|
678 |
@return: the new parameter dictionary
|
679 |
|
680 |
"""
|
681 |
params_copy = copy.deepcopy(old_params) |
682 |
for key, val in update_dict.iteritems(): |
683 |
if ((use_default and val == constants.VALUE_DEFAULT) or |
684 |
(use_none and val is None)): |
685 |
try:
|
686 |
del params_copy[key]
|
687 |
except KeyError: |
688 |
pass
|
689 |
else:
|
690 |
params_copy[key] = val |
691 |
return params_copy
|
692 |
|
693 |
|
694 |
def _ReleaseLocks(lu, level, names=None, keep=None): |
695 |
"""Releases locks owned by an LU.
|
696 |
|
697 |
@type lu: L{LogicalUnit}
|
698 |
@param level: Lock level
|
699 |
@type names: list or None
|
700 |
@param names: Names of locks to release
|
701 |
@type keep: list or None
|
702 |
@param keep: Names of locks to retain
|
703 |
|
704 |
"""
|
705 |
assert not (keep is not None and names is not None), \ |
706 |
"Only one of the 'names' and the 'keep' parameters can be given"
|
707 |
|
708 |
if names is not None: |
709 |
should_release = names.__contains__ |
710 |
elif keep:
|
711 |
should_release = lambda name: name not in keep |
712 |
else:
|
713 |
should_release = None
|
714 |
|
715 |
if should_release:
|
716 |
retain = [] |
717 |
release = [] |
718 |
|
719 |
# Determine which locks to release
|
720 |
for name in lu.owned_locks(level): |
721 |
if should_release(name):
|
722 |
release.append(name) |
723 |
else:
|
724 |
retain.append(name) |
725 |
|
726 |
assert len(lu.owned_locks(level)) == (len(retain) + len(release)) |
727 |
|
728 |
# Release just some locks
|
729 |
lu.glm.release(level, names=release) |
730 |
|
731 |
assert frozenset(lu.owned_locks(level)) == frozenset(retain) |
732 |
else:
|
733 |
# Release everything
|
734 |
lu.glm.release(level) |
735 |
|
736 |
assert not lu.glm.is_owned(level), "No locks should be owned" |
737 |
|
738 |
|
739 |
def _MapInstanceDisksToNodes(instances): |
740 |
"""Creates a map from (node, volume) to instance name.
|
741 |
|
742 |
@type instances: list of L{objects.Instance}
|
743 |
@rtype: dict; tuple of (node name, volume name) as key, instance name as value
|
744 |
|
745 |
"""
|
746 |
return dict(((node, vol), inst.name) |
747 |
for inst in instances |
748 |
for (node, vols) in inst.MapLVsByNode().items() |
749 |
for vol in vols) |
750 |
|
751 |
|
752 |
def _RunPostHook(lu, node_name): |
753 |
"""Runs the post-hook for an opcode on a single node.
|
754 |
|
755 |
"""
|
756 |
hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu) |
757 |
try:
|
758 |
hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name]) |
759 |
except:
|
760 |
# pylint: disable=W0702
|
761 |
lu.LogWarning("Errors occurred running hooks on %s" % node_name)
|
762 |
|
763 |
|
764 |
def _CheckOutputFields(static, dynamic, selected): |
765 |
"""Checks whether all selected fields are valid.
|
766 |
|
767 |
@type static: L{utils.FieldSet}
|
768 |
@param static: static fields set
|
769 |
@type dynamic: L{utils.FieldSet}
|
770 |
@param dynamic: dynamic fields set
|
771 |
|
772 |
"""
|
773 |
f = utils.FieldSet() |
774 |
f.Extend(static) |
775 |
f.Extend(dynamic) |
776 |
|
777 |
delta = f.NonMatching(selected) |
778 |
if delta:
|
779 |
raise errors.OpPrereqError("Unknown output fields selected: %s" |
780 |
% ",".join(delta), errors.ECODE_INVAL)
|
781 |
|
782 |
|
783 |
def _CheckGlobalHvParams(params): |
784 |
"""Validates that given hypervisor params are not global ones.
|
785 |
|
786 |
This will ensure that instances don't get customised versions of
|
787 |
global params.
|
788 |
|
789 |
"""
|
790 |
used_globals = constants.HVC_GLOBALS.intersection(params) |
791 |
if used_globals:
|
792 |
msg = ("The following hypervisor parameters are global and cannot"
|
793 |
" be customized at instance level, please modify them at"
|
794 |
" cluster level: %s" % utils.CommaJoin(used_globals))
|
795 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
796 |
|
797 |
|
798 |
def _CheckNodeOnline(lu, node, msg=None): |
799 |
"""Ensure that a given node is online.
|
800 |
|
801 |
@param lu: the LU on behalf of which we make the check
|
802 |
@param node: the node to check
|
803 |
@param msg: if passed, should be a message to replace the default one
|
804 |
@raise errors.OpPrereqError: if the node is offline
|
805 |
|
806 |
"""
|
807 |
if msg is None: |
808 |
msg = "Can't use offline node"
|
809 |
if lu.cfg.GetNodeInfo(node).offline:
|
810 |
raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE) |
811 |
|
812 |
|
813 |
def _CheckNodeNotDrained(lu, node): |
814 |
"""Ensure that a given node is not drained.
|
815 |
|
816 |
@param lu: the LU on behalf of which we make the check
|
817 |
@param node: the node to check
|
818 |
@raise errors.OpPrereqError: if the node is drained
|
819 |
|
820 |
"""
|
821 |
if lu.cfg.GetNodeInfo(node).drained:
|
822 |
raise errors.OpPrereqError("Can't use drained node %s" % node, |
823 |
errors.ECODE_STATE) |
824 |
|
825 |
|
826 |
def _CheckNodeVmCapable(lu, node): |
827 |
"""Ensure that a given node is vm capable.
|
828 |
|
829 |
@param lu: the LU on behalf of which we make the check
|
830 |
@param node: the node to check
|
831 |
@raise errors.OpPrereqError: if the node is not vm capable
|
832 |
|
833 |
"""
|
834 |
if not lu.cfg.GetNodeInfo(node).vm_capable: |
835 |
raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node, |
836 |
errors.ECODE_STATE) |
837 |
|
838 |
|
839 |
def _CheckNodeHasOS(lu, node, os_name, force_variant): |
840 |
"""Ensure that a node supports a given OS.
|
841 |
|
842 |
@param lu: the LU on behalf of which we make the check
|
843 |
@param node: the node to check
|
844 |
@param os_name: the OS to query about
|
845 |
@param force_variant: whether to ignore variant errors
|
846 |
@raise errors.OpPrereqError: if the node is not supporting the OS
|
847 |
|
848 |
"""
|
849 |
result = lu.rpc.call_os_get(node, os_name) |
850 |
result.Raise("OS '%s' not in supported OS list for node %s" %
|
851 |
(os_name, node), |
852 |
prereq=True, ecode=errors.ECODE_INVAL)
|
853 |
if not force_variant: |
854 |
_CheckOSVariant(result.payload, os_name) |
855 |
|
856 |
|
857 |
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq): |
858 |
"""Ensure that a node has the given secondary ip.
|
859 |
|
860 |
@type lu: L{LogicalUnit}
|
861 |
@param lu: the LU on behalf of which we make the check
|
862 |
@type node: string
|
863 |
@param node: the node to check
|
864 |
@type secondary_ip: string
|
865 |
@param secondary_ip: the ip to check
|
866 |
@type prereq: boolean
|
867 |
@param prereq: whether to throw a prerequisite or an execute error
|
868 |
@raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
|
869 |
@raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
|
870 |
|
871 |
"""
|
872 |
result = lu.rpc.call_node_has_ip_address(node, secondary_ip) |
873 |
result.Raise("Failure checking secondary ip on node %s" % node,
|
874 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
875 |
if not result.payload: |
876 |
msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
|
877 |
" please fix and re-run this command" % secondary_ip)
|
878 |
if prereq:
|
879 |
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
|
880 |
else:
|
881 |
raise errors.OpExecError(msg)
|
882 |
|
883 |
|
884 |
def _GetClusterDomainSecret(): |
885 |
"""Reads the cluster domain secret.
|
886 |
|
887 |
"""
|
888 |
return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
|
889 |
strict=True)
|
890 |
|
891 |
|
892 |
def _CheckInstanceDown(lu, instance, reason): |
893 |
"""Ensure that an instance is not running."""
|
894 |
if instance.admin_up:
|
895 |
raise errors.OpPrereqError("Instance %s is marked to be up, %s" % |
896 |
(instance.name, reason), errors.ECODE_STATE) |
897 |
|
898 |
pnode = instance.primary_node |
899 |
ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode] |
900 |
ins_l.Raise("Can't contact node %s for instance information" % pnode,
|
901 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
902 |
|
903 |
if instance.name in ins_l.payload: |
904 |
raise errors.OpPrereqError("Instance %s is running, %s" % |
905 |
(instance.name, reason), errors.ECODE_STATE) |
906 |
|
907 |
|
908 |
def _ExpandItemName(fn, name, kind): |
909 |
"""Expand an item name.
|
910 |
|
911 |
@param fn: the function to use for expansion
|
912 |
@param name: requested item name
|
913 |
@param kind: text description ('Node' or 'Instance')
|
914 |
@return: the resolved (full) name
|
915 |
@raise errors.OpPrereqError: if the item is not found
|
916 |
|
917 |
"""
|
918 |
full_name = fn(name) |
919 |
if full_name is None: |
920 |
raise errors.OpPrereqError("%s '%s' not known" % (kind, name), |
921 |
errors.ECODE_NOENT) |
922 |
return full_name
|
923 |
|
924 |
|
925 |
def _ExpandNodeName(cfg, name): |
926 |
"""Wrapper over L{_ExpandItemName} for nodes."""
|
927 |
return _ExpandItemName(cfg.ExpandNodeName, name, "Node") |
928 |
|
929 |
|
930 |
def _ExpandInstanceName(cfg, name): |
931 |
"""Wrapper over L{_ExpandItemName} for instance."""
|
932 |
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") |
933 |
|
934 |
|
935 |
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, |
936 |
memory, vcpus, nics, disk_template, disks, |
937 |
bep, hvp, hypervisor_name, tags): |
938 |
"""Builds instance related env variables for hooks
|
939 |
|
940 |
This builds the hook environment from individual variables.
|
941 |
|
942 |
@type name: string
|
943 |
@param name: the name of the instance
|
944 |
@type primary_node: string
|
945 |
@param primary_node: the name of the instance's primary node
|
946 |
@type secondary_nodes: list
|
947 |
@param secondary_nodes: list of secondary nodes as strings
|
948 |
@type os_type: string
|
949 |
@param os_type: the name of the instance's OS
|
950 |
@type status: boolean
|
951 |
@param status: the should_run status of the instance
|
952 |
@type memory: string
|
953 |
@param memory: the memory size of the instance
|
954 |
@type vcpus: string
|
955 |
@param vcpus: the count of VCPUs the instance has
|
956 |
@type nics: list
|
957 |
@param nics: list of tuples (ip, mac, mode, link) representing
|
958 |
the NICs the instance has
|
959 |
@type disk_template: string
|
960 |
@param disk_template: the disk template of the instance
|
961 |
@type disks: list
|
962 |
@param disks: the list of (size, mode) pairs
|
963 |
@type bep: dict
|
964 |
@param bep: the backend parameters for the instance
|
965 |
@type hvp: dict
|
966 |
@param hvp: the hypervisor parameters for the instance
|
967 |
@type hypervisor_name: string
|
968 |
@param hypervisor_name: the hypervisor for the instance
|
969 |
@type tags: list
|
970 |
@param tags: list of instance tags as strings
|
971 |
@rtype: dict
|
972 |
@return: the hook environment for this instance
|
973 |
|
974 |
"""
|
975 |
if status:
|
976 |
str_status = "up"
|
977 |
else:
|
978 |
str_status = "down"
|
979 |
env = { |
980 |
"OP_TARGET": name,
|
981 |
"INSTANCE_NAME": name,
|
982 |
"INSTANCE_PRIMARY": primary_node,
|
983 |
"INSTANCE_SECONDARIES": " ".join(secondary_nodes), |
984 |
"INSTANCE_OS_TYPE": os_type,
|
985 |
"INSTANCE_STATUS": str_status,
|
986 |
"INSTANCE_MEMORY": memory,
|
987 |
"INSTANCE_VCPUS": vcpus,
|
988 |
"INSTANCE_DISK_TEMPLATE": disk_template,
|
989 |
"INSTANCE_HYPERVISOR": hypervisor_name,
|
990 |
} |
991 |
|
992 |
if nics:
|
993 |
nic_count = len(nics)
|
994 |
for idx, (ip, mac, mode, link) in enumerate(nics): |
995 |
if ip is None: |
996 |
ip = ""
|
997 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
998 |
env["INSTANCE_NIC%d_MAC" % idx] = mac
|
999 |
env["INSTANCE_NIC%d_MODE" % idx] = mode
|
1000 |
env["INSTANCE_NIC%d_LINK" % idx] = link
|
1001 |
if mode == constants.NIC_MODE_BRIDGED:
|
1002 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = link
|
1003 |
else:
|
1004 |
nic_count = 0
|
1005 |
|
1006 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
1007 |
|
1008 |
if disks:
|
1009 |
disk_count = len(disks)
|
1010 |
for idx, (size, mode) in enumerate(disks): |
1011 |
env["INSTANCE_DISK%d_SIZE" % idx] = size
|
1012 |
env["INSTANCE_DISK%d_MODE" % idx] = mode
|
1013 |
else:
|
1014 |
disk_count = 0
|
1015 |
|
1016 |
env["INSTANCE_DISK_COUNT"] = disk_count
|
1017 |
|
1018 |
if not tags: |
1019 |
tags = [] |
1020 |
|
1021 |
env["INSTANCE_TAGS"] = " ".join(tags) |
1022 |
|
1023 |
for source, kind in [(bep, "BE"), (hvp, "HV")]: |
1024 |
for key, value in source.items(): |
1025 |
env["INSTANCE_%s_%s" % (kind, key)] = value
|
1026 |
|
1027 |
return env
|
1028 |
|
1029 |
|
1030 |
def _NICListToTuple(lu, nics): |
1031 |
"""Build a list of nic information tuples.
|
1032 |
|
1033 |
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
|
1034 |
value in LUInstanceQueryData.
|
1035 |
|
1036 |
@type lu: L{LogicalUnit}
|
1037 |
@param lu: the logical unit on whose behalf we execute
|
1038 |
@type nics: list of L{objects.NIC}
|
1039 |
@param nics: list of nics to convert to hooks tuples
|
1040 |
|
1041 |
"""
|
1042 |
hooks_nics = [] |
1043 |
cluster = lu.cfg.GetClusterInfo() |
1044 |
for nic in nics: |
1045 |
ip = nic.ip |
1046 |
mac = nic.mac |
1047 |
filled_params = cluster.SimpleFillNIC(nic.nicparams) |
1048 |
mode = filled_params[constants.NIC_MODE] |
1049 |
link = filled_params[constants.NIC_LINK] |
1050 |
hooks_nics.append((ip, mac, mode, link)) |
1051 |
return hooks_nics
|
1052 |
|
1053 |
|
1054 |
def _BuildInstanceHookEnvByObject(lu, instance, override=None): |
1055 |
"""Builds instance related env variables for hooks from an object.
|
1056 |
|
1057 |
@type lu: L{LogicalUnit}
|
1058 |
@param lu: the logical unit on whose behalf we execute
|
1059 |
@type instance: L{objects.Instance}
|
1060 |
@param instance: the instance for which we should build the
|
1061 |
environment
|
1062 |
@type override: dict
|
1063 |
@param override: dictionary with key/values that will override
|
1064 |
our values
|
1065 |
@rtype: dict
|
1066 |
@return: the hook environment dictionary
|
1067 |
|
1068 |
"""
|
1069 |
cluster = lu.cfg.GetClusterInfo() |
1070 |
bep = cluster.FillBE(instance) |
1071 |
hvp = cluster.FillHV(instance) |
1072 |
args = { |
1073 |
"name": instance.name,
|
1074 |
"primary_node": instance.primary_node,
|
1075 |
"secondary_nodes": instance.secondary_nodes,
|
1076 |
"os_type": instance.os,
|
1077 |
"status": instance.admin_up,
|
1078 |
"memory": bep[constants.BE_MEMORY],
|
1079 |
"vcpus": bep[constants.BE_VCPUS],
|
1080 |
"nics": _NICListToTuple(lu, instance.nics),
|
1081 |
"disk_template": instance.disk_template,
|
1082 |
"disks": [(disk.size, disk.mode) for disk in instance.disks], |
1083 |
"bep": bep,
|
1084 |
"hvp": hvp,
|
1085 |
"hypervisor_name": instance.hypervisor,
|
1086 |
"tags": instance.tags,
|
1087 |
} |
1088 |
if override:
|
1089 |
args.update(override) |
1090 |
return _BuildInstanceHookEnv(**args) # pylint: disable=W0142 |
1091 |
|
1092 |
|
1093 |
def _AdjustCandidatePool(lu, exceptions): |
1094 |
"""Adjust the candidate pool after node operations.
|
1095 |
|
1096 |
"""
|
1097 |
mod_list = lu.cfg.MaintainCandidatePool(exceptions) |
1098 |
if mod_list:
|
1099 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
1100 |
utils.CommaJoin(node.name for node in mod_list)) |
1101 |
for name in mod_list: |
1102 |
lu.context.ReaddNode(name) |
1103 |
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
1104 |
if mc_now > mc_max:
|
1105 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
1106 |
(mc_now, mc_max)) |
1107 |
|
1108 |
|
1109 |
def _DecideSelfPromotion(lu, exceptions=None): |
1110 |
"""Decide whether I should promote myself as a master candidate.
|
1111 |
|
1112 |
"""
|
1113 |
cp_size = lu.cfg.GetClusterInfo().candidate_pool_size |
1114 |
mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions) |
1115 |
# the new node will increase mc_max with one, so:
|
1116 |
mc_should = min(mc_should + 1, cp_size) |
1117 |
return mc_now < mc_should
|
1118 |
|
1119 |
|
1120 |
def _CheckNicsBridgesExist(lu, target_nics, target_node): |
1121 |
"""Check that the brigdes needed by a list of nics exist.
|
1122 |
|
1123 |
"""
|
1124 |
cluster = lu.cfg.GetClusterInfo() |
1125 |
paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics] |
1126 |
brlist = [params[constants.NIC_LINK] for params in paramslist |
1127 |
if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
|
1128 |
if brlist:
|
1129 |
result = lu.rpc.call_bridges_exist(target_node, brlist) |
1130 |
result.Raise("Error checking bridges on destination node '%s'" %
|
1131 |
target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
|
1132 |
|
1133 |
|
1134 |
def _CheckInstanceBridgesExist(lu, instance, node=None): |
1135 |
"""Check that the brigdes needed by an instance exist.
|
1136 |
|
1137 |
"""
|
1138 |
if node is None: |
1139 |
node = instance.primary_node |
1140 |
_CheckNicsBridgesExist(lu, instance.nics, node) |
1141 |
|
1142 |
|
1143 |
def _CheckOSVariant(os_obj, name): |
1144 |
"""Check whether an OS name conforms to the os variants specification.
|
1145 |
|
1146 |
@type os_obj: L{objects.OS}
|
1147 |
@param os_obj: OS object to check
|
1148 |
@type name: string
|
1149 |
@param name: OS name passed by the user, to check for validity
|
1150 |
|
1151 |
"""
|
1152 |
variant = objects.OS.GetVariant(name) |
1153 |
if not os_obj.supported_variants: |
1154 |
if variant:
|
1155 |
raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" |
1156 |
" passed)" % (os_obj.name, variant),
|
1157 |
errors.ECODE_INVAL) |
1158 |
return
|
1159 |
if not variant: |
1160 |
raise errors.OpPrereqError("OS name must include a variant", |
1161 |
errors.ECODE_INVAL) |
1162 |
|
1163 |
if variant not in os_obj.supported_variants: |
1164 |
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL) |
1165 |
|
1166 |
|
1167 |
def _GetNodeInstancesInner(cfg, fn): |
1168 |
return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)] |
1169 |
|
1170 |
|
1171 |
def _GetNodeInstances(cfg, node_name): |
1172 |
"""Returns a list of all primary and secondary instances on a node.
|
1173 |
|
1174 |
"""
|
1175 |
|
1176 |
return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes) |
1177 |
|
1178 |
|
1179 |
def _GetNodePrimaryInstances(cfg, node_name): |
1180 |
"""Returns primary instances on a node.
|
1181 |
|
1182 |
"""
|
1183 |
return _GetNodeInstancesInner(cfg,
|
1184 |
lambda inst: node_name == inst.primary_node)
|
1185 |
|
1186 |
|
1187 |
def _GetNodeSecondaryInstances(cfg, node_name): |
1188 |
"""Returns secondary instances on a node.
|
1189 |
|
1190 |
"""
|
1191 |
return _GetNodeInstancesInner(cfg,
|
1192 |
lambda inst: node_name in inst.secondary_nodes) |
1193 |
|
1194 |
|
1195 |
def _GetStorageTypeArgs(cfg, storage_type): |
1196 |
"""Returns the arguments for a storage type.
|
1197 |
|
1198 |
"""
|
1199 |
# Special case for file storage
|
1200 |
if storage_type == constants.ST_FILE:
|
1201 |
# storage.FileStorage wants a list of storage directories
|
1202 |
return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
|
1203 |
|
1204 |
return []
|
1205 |
|
1206 |
|
1207 |
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq): |
1208 |
faulty = [] |
1209 |
|
1210 |
for dev in instance.disks: |
1211 |
cfg.SetDiskID(dev, node_name) |
1212 |
|
1213 |
result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks) |
1214 |
result.Raise("Failed to get disk status from node %s" % node_name,
|
1215 |
prereq=prereq, ecode=errors.ECODE_ENVIRON) |
1216 |
|
1217 |
for idx, bdev_status in enumerate(result.payload): |
1218 |
if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY: |
1219 |
faulty.append(idx) |
1220 |
|
1221 |
return faulty
|
1222 |
|
1223 |
|
1224 |
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot): |
1225 |
"""Check the sanity of iallocator and node arguments and use the
|
1226 |
cluster-wide iallocator if appropriate.
|
1227 |
|
1228 |
Check that at most one of (iallocator, node) is specified. If none is
|
1229 |
specified, then the LU's opcode's iallocator slot is filled with the
|
1230 |
cluster-wide default iallocator.
|
1231 |
|
1232 |
@type iallocator_slot: string
|
1233 |
@param iallocator_slot: the name of the opcode iallocator slot
|
1234 |
@type node_slot: string
|
1235 |
@param node_slot: the name of the opcode target node slot
|
1236 |
|
1237 |
"""
|
1238 |
node = getattr(lu.op, node_slot, None) |
1239 |
iallocator = getattr(lu.op, iallocator_slot, None) |
1240 |
|
1241 |
if node is not None and iallocator is not None: |
1242 |
raise errors.OpPrereqError("Do not specify both, iallocator and node", |
1243 |
errors.ECODE_INVAL) |
1244 |
elif node is None and iallocator is None: |
1245 |
default_iallocator = lu.cfg.GetDefaultIAllocator() |
1246 |
if default_iallocator:
|
1247 |
setattr(lu.op, iallocator_slot, default_iallocator)
|
1248 |
else:
|
1249 |
raise errors.OpPrereqError("No iallocator or node given and no" |
1250 |
" cluster-wide default iallocator found;"
|
1251 |
" please specify either an iallocator or a"
|
1252 |
" node, or set a cluster-wide default"
|
1253 |
" iallocator")
|
1254 |
|
1255 |
|
1256 |
def _GetDefaultIAllocator(cfg, iallocator): |
1257 |
"""Decides on which iallocator to use.
|
1258 |
|
1259 |
@type cfg: L{config.ConfigWriter}
|
1260 |
@param cfg: Cluster configuration object
|
1261 |
@type iallocator: string or None
|
1262 |
@param iallocator: Iallocator specified in opcode
|
1263 |
@rtype: string
|
1264 |
@return: Iallocator name
|
1265 |
|
1266 |
"""
|
1267 |
if not iallocator: |
1268 |
# Use default iallocator
|
1269 |
iallocator = cfg.GetDefaultIAllocator() |
1270 |
|
1271 |
if not iallocator: |
1272 |
raise errors.OpPrereqError("No iallocator was specified, neither in the" |
1273 |
" opcode nor as a cluster-wide default",
|
1274 |
errors.ECODE_INVAL) |
1275 |
|
1276 |
return iallocator
|
1277 |
|
1278 |
|
1279 |
class LUClusterPostInit(LogicalUnit): |
1280 |
"""Logical unit for running hooks after cluster initialization.
|
1281 |
|
1282 |
"""
|
1283 |
HPATH = "cluster-init"
|
1284 |
HTYPE = constants.HTYPE_CLUSTER |
1285 |
|
1286 |
def BuildHooksEnv(self): |
1287 |
"""Build hooks env.
|
1288 |
|
1289 |
"""
|
1290 |
return {
|
1291 |
"OP_TARGET": self.cfg.GetClusterName(), |
1292 |
} |
1293 |
|
1294 |
def BuildHooksNodes(self): |
1295 |
"""Build hooks nodes.
|
1296 |
|
1297 |
"""
|
1298 |
return ([], [self.cfg.GetMasterNode()]) |
1299 |
|
1300 |
def Exec(self, feedback_fn): |
1301 |
"""Nothing to do.
|
1302 |
|
1303 |
"""
|
1304 |
return True |
1305 |
|
1306 |
|
1307 |
class LUClusterDestroy(LogicalUnit): |
1308 |
"""Logical unit for destroying the cluster.
|
1309 |
|
1310 |
"""
|
1311 |
HPATH = "cluster-destroy"
|
1312 |
HTYPE = constants.HTYPE_CLUSTER |
1313 |
|
1314 |
def BuildHooksEnv(self): |
1315 |
"""Build hooks env.
|
1316 |
|
1317 |
"""
|
1318 |
return {
|
1319 |
"OP_TARGET": self.cfg.GetClusterName(), |
1320 |
} |
1321 |
|
1322 |
def BuildHooksNodes(self): |
1323 |
"""Build hooks nodes.
|
1324 |
|
1325 |
"""
|
1326 |
return ([], [])
|
1327 |
|
1328 |
def CheckPrereq(self): |
1329 |
"""Check prerequisites.
|
1330 |
|
1331 |
This checks whether the cluster is empty.
|
1332 |
|
1333 |
Any errors are signaled by raising errors.OpPrereqError.
|
1334 |
|
1335 |
"""
|
1336 |
master = self.cfg.GetMasterNode()
|
1337 |
|
1338 |
nodelist = self.cfg.GetNodeList()
|
1339 |
if len(nodelist) != 1 or nodelist[0] != master: |
1340 |
raise errors.OpPrereqError("There are still %d node(s) in" |
1341 |
" this cluster." % (len(nodelist) - 1), |
1342 |
errors.ECODE_INVAL) |
1343 |
instancelist = self.cfg.GetInstanceList()
|
1344 |
if instancelist:
|
1345 |
raise errors.OpPrereqError("There are still %d instance(s) in" |
1346 |
" this cluster." % len(instancelist), |
1347 |
errors.ECODE_INVAL) |
1348 |
|
1349 |
def Exec(self, feedback_fn): |
1350 |
"""Destroys the cluster.
|
1351 |
|
1352 |
"""
|
1353 |
master = self.cfg.GetMasterNode()
|
1354 |
|
1355 |
# Run post hooks on master node before it's removed
|
1356 |
_RunPostHook(self, master)
|
1357 |
|
1358 |
result = self.rpc.call_node_deactivate_master_ip(master)
|
1359 |
result.Raise("Could not disable the master role")
|
1360 |
|
1361 |
return master
|
1362 |
|
1363 |
|
1364 |
def _VerifyCertificate(filename): |
1365 |
"""Verifies a certificate for L{LUClusterVerifyConfig}.
|
1366 |
|
1367 |
@type filename: string
|
1368 |
@param filename: Path to PEM file
|
1369 |
|
1370 |
"""
|
1371 |
try:
|
1372 |
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, |
1373 |
utils.ReadFile(filename)) |
1374 |
except Exception, err: # pylint: disable=W0703 |
1375 |
return (LUClusterVerifyConfig.ETYPE_ERROR,
|
1376 |
"Failed to load X509 certificate %s: %s" % (filename, err))
|
1377 |
|
1378 |
(errcode, msg) = \ |
1379 |
utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN, |
1380 |
constants.SSL_CERT_EXPIRATION_ERROR) |
1381 |
|
1382 |
if msg:
|
1383 |
fnamemsg = "While verifying %s: %s" % (filename, msg)
|
1384 |
else:
|
1385 |
fnamemsg = None
|
1386 |
|
1387 |
if errcode is None: |
1388 |
return (None, fnamemsg) |
1389 |
elif errcode == utils.CERT_WARNING:
|
1390 |
return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
|
1391 |
elif errcode == utils.CERT_ERROR:
|
1392 |
return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
|
1393 |
|
1394 |
raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode) |
1395 |
|
1396 |
|
1397 |
def _GetAllHypervisorParameters(cluster, instances): |
1398 |
"""Compute the set of all hypervisor parameters.
|
1399 |
|
1400 |
@type cluster: L{objects.Cluster}
|
1401 |
@param cluster: the cluster object
|
1402 |
@param instances: list of L{objects.Instance}
|
1403 |
@param instances: additional instances from which to obtain parameters
|
1404 |
@rtype: list of (origin, hypervisor, parameters)
|
1405 |
@return: a list with all parameters found, indicating the hypervisor they
|
1406 |
apply to, and the origin (can be "cluster", "os X", or "instance Y")
|
1407 |
|
1408 |
"""
|
1409 |
hvp_data = [] |
1410 |
|
1411 |
for hv_name in cluster.enabled_hypervisors: |
1412 |
hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
|
1413 |
|
1414 |
for os_name, os_hvp in cluster.os_hvp.items(): |
1415 |
for hv_name, hv_params in os_hvp.items(): |
1416 |
if hv_params:
|
1417 |
full_params = cluster.GetHVDefaults(hv_name, os_name=os_name) |
1418 |
hvp_data.append(("os %s" % os_name, hv_name, full_params))
|
1419 |
|
1420 |
# TODO: collapse identical parameter values in a single one
|
1421 |
for instance in instances: |
1422 |
if instance.hvparams:
|
1423 |
hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
|
1424 |
cluster.FillHV(instance))) |
1425 |
|
1426 |
return hvp_data
|
1427 |
|
1428 |
|
1429 |
class _VerifyErrors(object): |
1430 |
"""Mix-in for cluster/group verify LUs.
|
1431 |
|
1432 |
It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
|
1433 |
self.op and self._feedback_fn to be available.)
|
1434 |
|
1435 |
"""
|
1436 |
|
1437 |
ETYPE_FIELD = "code"
|
1438 |
ETYPE_ERROR = "ERROR"
|
1439 |
ETYPE_WARNING = "WARNING"
|
1440 |
|
1441 |
def _Error(self, ecode, item, msg, *args, **kwargs): |
1442 |
"""Format an error message.
|
1443 |
|
1444 |
Based on the opcode's error_codes parameter, either format a
|
1445 |
parseable error code, or a simpler error string.
|
1446 |
|
1447 |
This must be called only from Exec and functions called from Exec.
|
1448 |
|
1449 |
"""
|
1450 |
ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) |
1451 |
itype, etxt, _ = ecode |
1452 |
# first complete the msg
|
1453 |
if args:
|
1454 |
msg = msg % args |
1455 |
# then format the whole message
|
1456 |
if self.op.error_codes: # This is a mix-in. pylint: disable=E1101 |
1457 |
msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
|
1458 |
else:
|
1459 |
if item:
|
1460 |
item = " " + item
|
1461 |
else:
|
1462 |
item = ""
|
1463 |
msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
|
1464 |
# and finally report it via the feedback_fn
|
1465 |
self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101 |
1466 |
|
1467 |
def _ErrorIf(self, cond, ecode, *args, **kwargs): |
1468 |
"""Log an error message if the passed condition is True.
|
1469 |
|
1470 |
"""
|
1471 |
cond = (bool(cond)
|
1472 |
or self.op.debug_simulate_errors) # pylint: disable=E1101 |
1473 |
|
1474 |
# If the error code is in the list of ignored errors, demote the error to a
|
1475 |
# warning
|
1476 |
(_, etxt, _) = ecode |
1477 |
if etxt in self.op.ignore_errors: # pylint: disable=E1101 |
1478 |
kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING |
1479 |
|
1480 |
if cond:
|
1481 |
self._Error(ecode, *args, **kwargs)
|
1482 |
|
1483 |
# do not mark the operation as failed for WARN cases only
|
1484 |
if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR: |
1485 |
self.bad = self.bad or cond |
1486 |
|
1487 |
|
1488 |
class LUClusterVerify(NoHooksLU): |
1489 |
"""Submits all jobs necessary to verify the cluster.
|
1490 |
|
1491 |
"""
|
1492 |
REQ_BGL = False
|
1493 |
|
1494 |
def ExpandNames(self): |
1495 |
self.needed_locks = {}
|
1496 |
|
1497 |
def Exec(self, feedback_fn): |
1498 |
jobs = [] |
1499 |
|
1500 |
if self.op.group_name: |
1501 |
groups = [self.op.group_name]
|
1502 |
depends_fn = lambda: None |
1503 |
else:
|
1504 |
groups = self.cfg.GetNodeGroupList()
|
1505 |
|
1506 |
# Verify global configuration
|
1507 |
jobs.append([ |
1508 |
opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
|
1509 |
]) |
1510 |
|
1511 |
# Always depend on global verification
|
1512 |
depends_fn = lambda: [(-len(jobs), [])] |
1513 |
|
1514 |
jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group, |
1515 |
ignore_errors=self.op.ignore_errors,
|
1516 |
depends=depends_fn())] |
1517 |
for group in groups) |
1518 |
|
1519 |
# Fix up all parameters
|
1520 |
for op in itertools.chain(*jobs): # pylint: disable=W0142 |
1521 |
op.debug_simulate_errors = self.op.debug_simulate_errors
|
1522 |
op.verbose = self.op.verbose
|
1523 |
op.error_codes = self.op.error_codes
|
1524 |
try:
|
1525 |
op.skip_checks = self.op.skip_checks
|
1526 |
except AttributeError: |
1527 |
assert not isinstance(op, opcodes.OpClusterVerifyGroup) |
1528 |
|
1529 |
return ResultWithJobs(jobs)
|
1530 |
|
1531 |
|
1532 |
class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors): |
1533 |
"""Verifies the cluster config.
|
1534 |
|
1535 |
"""
|
1536 |
REQ_BGL = True
|
1537 |
|
1538 |
def _VerifyHVP(self, hvp_data): |
1539 |
"""Verifies locally the syntax of the hypervisor parameters.
|
1540 |
|
1541 |
"""
|
1542 |
for item, hv_name, hv_params in hvp_data: |
1543 |
msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
|
1544 |
(item, hv_name)) |
1545 |
try:
|
1546 |
hv_class = hypervisor.GetHypervisor(hv_name) |
1547 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
1548 |
hv_class.CheckParameterSyntax(hv_params) |
1549 |
except errors.GenericError, err:
|
1550 |
self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err)) |
1551 |
|
1552 |
def ExpandNames(self): |
1553 |
# Information can be safely retrieved as the BGL is acquired in exclusive
|
1554 |
# mode
|
1555 |
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER) |
1556 |
self.all_group_info = self.cfg.GetAllNodeGroupsInfo() |
1557 |
self.all_node_info = self.cfg.GetAllNodesInfo() |
1558 |
self.all_inst_info = self.cfg.GetAllInstancesInfo() |
1559 |
self.needed_locks = {}
|
1560 |
|
1561 |
def Exec(self, feedback_fn): |
1562 |
"""Verify integrity of cluster, performing various test on nodes.
|
1563 |
|
1564 |
"""
|
1565 |
self.bad = False |
1566 |
self._feedback_fn = feedback_fn
|
1567 |
|
1568 |
feedback_fn("* Verifying cluster config")
|
1569 |
|
1570 |
for msg in self.cfg.VerifyConfig(): |
1571 |
self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg) |
1572 |
|
1573 |
feedback_fn("* Verifying cluster certificate files")
|
1574 |
|
1575 |
for cert_filename in constants.ALL_CERT_FILES: |
1576 |
(errcode, msg) = _VerifyCertificate(cert_filename) |
1577 |
self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode) |
1578 |
|
1579 |
feedback_fn("* Verifying hypervisor parameters")
|
1580 |
|
1581 |
self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(), |
1582 |
self.all_inst_info.values()))
|
1583 |
|
1584 |
feedback_fn("* Verifying all nodes belong to an existing group")
|
1585 |
|
1586 |
# We do this verification here because, should this bogus circumstance
|
1587 |
# occur, it would never be caught by VerifyGroup, which only acts on
|
1588 |
# nodes/instances reachable from existing node groups.
|
1589 |
|
1590 |
dangling_nodes = set(node.name for node in self.all_node_info.values() |
1591 |
if node.group not in self.all_group_info) |
1592 |
|
1593 |
dangling_instances = {} |
1594 |
no_node_instances = [] |
1595 |
|
1596 |
for inst in self.all_inst_info.values(): |
1597 |
if inst.primary_node in dangling_nodes: |
1598 |
dangling_instances.setdefault(inst.primary_node, []).append(inst.name) |
1599 |
elif inst.primary_node not in self.all_node_info: |
1600 |
no_node_instances.append(inst.name) |
1601 |
|
1602 |
pretty_dangling = [ |
1603 |
"%s (%s)" %
|
1604 |
(node.name, |
1605 |
utils.CommaJoin(dangling_instances.get(node.name, |
1606 |
["no instances"])))
|
1607 |
for node in dangling_nodes] |
1608 |
|
1609 |
self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES, |
1610 |
None,
|
1611 |
"the following nodes (and their instances) belong to a non"
|
1612 |
" existing group: %s", utils.CommaJoin(pretty_dangling))
|
1613 |
|
1614 |
self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST, |
1615 |
None,
|
1616 |
"the following instances have a non-existing primary-node:"
|
1617 |
" %s", utils.CommaJoin(no_node_instances))
|
1618 |
|
1619 |
return not self.bad |
1620 |
|
1621 |
|
1622 |
class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): |
1623 |
"""Verifies the status of a node group.
|
1624 |
|
1625 |
"""
|
1626 |
HPATH = "cluster-verify"
|
1627 |
HTYPE = constants.HTYPE_CLUSTER |
1628 |
REQ_BGL = False
|
1629 |
|
1630 |
_HOOKS_INDENT_RE = re.compile("^", re.M)
|
1631 |
|
1632 |
class NodeImage(object): |
1633 |
"""A class representing the logical and physical status of a node.
|
1634 |
|
1635 |
@type name: string
|
1636 |
@ivar name: the node name to which this object refers
|
1637 |
@ivar volumes: a structure as returned from
|
1638 |
L{ganeti.backend.GetVolumeList} (runtime)
|
1639 |
@ivar instances: a list of running instances (runtime)
|
1640 |
@ivar pinst: list of configured primary instances (config)
|
1641 |
@ivar sinst: list of configured secondary instances (config)
|
1642 |
@ivar sbp: dictionary of {primary-node: list of instances} for all
|
1643 |
instances for which this node is secondary (config)
|
1644 |
@ivar mfree: free memory, as reported by hypervisor (runtime)
|
1645 |
@ivar dfree: free disk, as reported by the node (runtime)
|
1646 |
@ivar offline: the offline status (config)
|
1647 |
@type rpc_fail: boolean
|
1648 |
@ivar rpc_fail: whether the RPC verify call was successfull (overall,
|
1649 |
not whether the individual keys were correct) (runtime)
|
1650 |
@type lvm_fail: boolean
|
1651 |
@ivar lvm_fail: whether the RPC call didn't return valid LVM data
|
1652 |
@type hyp_fail: boolean
|
1653 |
@ivar hyp_fail: whether the RPC call didn't return the instance list
|
1654 |
@type ghost: boolean
|
1655 |
@ivar ghost: whether this is a known node or not (config)
|
1656 |
@type os_fail: boolean
|
1657 |
@ivar os_fail: whether the RPC call didn't return valid OS data
|
1658 |
@type oslist: list
|
1659 |
@ivar oslist: list of OSes as diagnosed by DiagnoseOS
|
1660 |
@type vm_capable: boolean
|
1661 |
@ivar vm_capable: whether the node can host instances
|
1662 |
|
1663 |
"""
|
1664 |
def __init__(self, offline=False, name=None, vm_capable=True): |
1665 |
self.name = name
|
1666 |
self.volumes = {}
|
1667 |
self.instances = []
|
1668 |
self.pinst = []
|
1669 |
self.sinst = []
|
1670 |
self.sbp = {}
|
1671 |
self.mfree = 0 |
1672 |
self.dfree = 0 |
1673 |
self.offline = offline
|
1674 |
self.vm_capable = vm_capable
|
1675 |
self.rpc_fail = False |
1676 |
self.lvm_fail = False |
1677 |
self.hyp_fail = False |
1678 |
self.ghost = False |
1679 |
self.os_fail = False |
1680 |
self.oslist = {}
|
1681 |
|
1682 |
def ExpandNames(self): |
1683 |
# This raises errors.OpPrereqError on its own:
|
1684 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
1685 |
|
1686 |
# Get instances in node group; this is unsafe and needs verification later
|
1687 |
inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid) |
1688 |
|
1689 |
self.needed_locks = {
|
1690 |
locking.LEVEL_INSTANCE: inst_names, |
1691 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
1692 |
locking.LEVEL_NODE: [], |
1693 |
} |
1694 |
|
1695 |
self.share_locks = _ShareAll()
|
1696 |
|
1697 |
def DeclareLocks(self, level): |
1698 |
if level == locking.LEVEL_NODE:
|
1699 |
# Get members of node group; this is unsafe and needs verification later
|
1700 |
nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members) |
1701 |
|
1702 |
all_inst_info = self.cfg.GetAllInstancesInfo()
|
1703 |
|
1704 |
# In Exec(), we warn about mirrored instances that have primary and
|
1705 |
# secondary living in separate node groups. To fully verify that
|
1706 |
# volumes for these instances are healthy, we will need to do an
|
1707 |
# extra call to their secondaries. We ensure here those nodes will
|
1708 |
# be locked.
|
1709 |
for inst in self.owned_locks(locking.LEVEL_INSTANCE): |
1710 |
# Important: access only the instances whose lock is owned
|
1711 |
if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR: |
1712 |
nodes.update(all_inst_info[inst].secondary_nodes) |
1713 |
|
1714 |
self.needed_locks[locking.LEVEL_NODE] = nodes
|
1715 |
|
1716 |
def CheckPrereq(self): |
1717 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
1718 |
self.group_info = self.cfg.GetNodeGroup(self.group_uuid) |
1719 |
|
1720 |
group_nodes = set(self.group_info.members) |
1721 |
group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid) |
1722 |
|
1723 |
unlocked_nodes = \ |
1724 |
group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
|
1725 |
|
1726 |
unlocked_instances = \ |
1727 |
group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
|
1728 |
|
1729 |
if unlocked_nodes:
|
1730 |
raise errors.OpPrereqError("Missing lock for nodes: %s" % |
1731 |
utils.CommaJoin(unlocked_nodes)) |
1732 |
|
1733 |
if unlocked_instances:
|
1734 |
raise errors.OpPrereqError("Missing lock for instances: %s" % |
1735 |
utils.CommaJoin(unlocked_instances)) |
1736 |
|
1737 |
self.all_node_info = self.cfg.GetAllNodesInfo() |
1738 |
self.all_inst_info = self.cfg.GetAllInstancesInfo() |
1739 |
|
1740 |
self.my_node_names = utils.NiceSort(group_nodes)
|
1741 |
self.my_inst_names = utils.NiceSort(group_instances)
|
1742 |
|
1743 |
self.my_node_info = dict((name, self.all_node_info[name]) |
1744 |
for name in self.my_node_names) |
1745 |
|
1746 |
self.my_inst_info = dict((name, self.all_inst_info[name]) |
1747 |
for name in self.my_inst_names) |
1748 |
|
1749 |
# We detect here the nodes that will need the extra RPC calls for verifying
|
1750 |
# split LV volumes; they should be locked.
|
1751 |
extra_lv_nodes = set()
|
1752 |
|
1753 |
for inst in self.my_inst_info.values(): |
1754 |
if inst.disk_template in constants.DTS_INT_MIRROR: |
1755 |
group = self.my_node_info[inst.primary_node].group
|
1756 |
for nname in inst.secondary_nodes: |
1757 |
if self.all_node_info[nname].group != group: |
1758 |
extra_lv_nodes.add(nname) |
1759 |
|
1760 |
unlocked_lv_nodes = \ |
1761 |
extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
|
1762 |
|
1763 |
if unlocked_lv_nodes:
|
1764 |
raise errors.OpPrereqError("these nodes could be locked: %s" % |
1765 |
utils.CommaJoin(unlocked_lv_nodes)) |
1766 |
self.extra_lv_nodes = list(extra_lv_nodes) |
1767 |
|
1768 |
def _VerifyNode(self, ninfo, nresult): |
1769 |
"""Perform some basic validation on data returned from a node.
|
1770 |
|
1771 |
- check the result data structure is well formed and has all the
|
1772 |
mandatory fields
|
1773 |
- check ganeti version
|
1774 |
|
1775 |
@type ninfo: L{objects.Node}
|
1776 |
@param ninfo: the node to check
|
1777 |
@param nresult: the results from the node
|
1778 |
@rtype: boolean
|
1779 |
@return: whether overall this call was successful (and we can expect
|
1780 |
reasonable values in the respose)
|
1781 |
|
1782 |
"""
|
1783 |
node = ninfo.name |
1784 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1785 |
|
1786 |
# main result, nresult should be a non-empty dict
|
1787 |
test = not nresult or not isinstance(nresult, dict) |
1788 |
_ErrorIf(test, constants.CV_ENODERPC, node, |
1789 |
"unable to verify node: no data returned")
|
1790 |
if test:
|
1791 |
return False |
1792 |
|
1793 |
# compares ganeti version
|
1794 |
local_version = constants.PROTOCOL_VERSION |
1795 |
remote_version = nresult.get("version", None) |
1796 |
test = not (remote_version and |
1797 |
isinstance(remote_version, (list, tuple)) and |
1798 |
len(remote_version) == 2) |
1799 |
_ErrorIf(test, constants.CV_ENODERPC, node, |
1800 |
"connection to node returned invalid data")
|
1801 |
if test:
|
1802 |
return False |
1803 |
|
1804 |
test = local_version != remote_version[0]
|
1805 |
_ErrorIf(test, constants.CV_ENODEVERSION, node, |
1806 |
"incompatible protocol versions: master %s,"
|
1807 |
" node %s", local_version, remote_version[0]) |
1808 |
if test:
|
1809 |
return False |
1810 |
|
1811 |
# node seems compatible, we can actually try to look into its results
|
1812 |
|
1813 |
# full package version
|
1814 |
self._ErrorIf(constants.RELEASE_VERSION != remote_version[1], |
1815 |
constants.CV_ENODEVERSION, node, |
1816 |
"software version mismatch: master %s, node %s",
|
1817 |
constants.RELEASE_VERSION, remote_version[1],
|
1818 |
code=self.ETYPE_WARNING)
|
1819 |
|
1820 |
hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
|
1821 |
if ninfo.vm_capable and isinstance(hyp_result, dict): |
1822 |
for hv_name, hv_result in hyp_result.iteritems(): |
1823 |
test = hv_result is not None |
1824 |
_ErrorIf(test, constants.CV_ENODEHV, node, |
1825 |
"hypervisor %s verify failure: '%s'", hv_name, hv_result)
|
1826 |
|
1827 |
hvp_result = nresult.get(constants.NV_HVPARAMS, None)
|
1828 |
if ninfo.vm_capable and isinstance(hvp_result, list): |
1829 |
for item, hv_name, hv_result in hvp_result: |
1830 |
_ErrorIf(True, constants.CV_ENODEHV, node,
|
1831 |
"hypervisor %s parameter verify failure (source %s): %s",
|
1832 |
hv_name, item, hv_result) |
1833 |
|
1834 |
test = nresult.get(constants.NV_NODESETUP, |
1835 |
["Missing NODESETUP results"])
|
1836 |
_ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
|
1837 |
"; ".join(test))
|
1838 |
|
1839 |
return True |
1840 |
|
1841 |
def _VerifyNodeTime(self, ninfo, nresult, |
1842 |
nvinfo_starttime, nvinfo_endtime): |
1843 |
"""Check the node time.
|
1844 |
|
1845 |
@type ninfo: L{objects.Node}
|
1846 |
@param ninfo: the node to check
|
1847 |
@param nresult: the remote results for the node
|
1848 |
@param nvinfo_starttime: the start time of the RPC call
|
1849 |
@param nvinfo_endtime: the end time of the RPC call
|
1850 |
|
1851 |
"""
|
1852 |
node = ninfo.name |
1853 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1854 |
|
1855 |
ntime = nresult.get(constants.NV_TIME, None)
|
1856 |
try:
|
1857 |
ntime_merged = utils.MergeTime(ntime) |
1858 |
except (ValueError, TypeError): |
1859 |
_ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time") |
1860 |
return
|
1861 |
|
1862 |
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
|
1863 |
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged) |
1864 |
elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
|
1865 |
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime) |
1866 |
else:
|
1867 |
ntime_diff = None
|
1868 |
|
1869 |
_ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node, |
1870 |
"Node time diverges by at least %s from master node time",
|
1871 |
ntime_diff) |
1872 |
|
1873 |
def _VerifyNodeLVM(self, ninfo, nresult, vg_name): |
1874 |
"""Check the node LVM results.
|
1875 |
|
1876 |
@type ninfo: L{objects.Node}
|
1877 |
@param ninfo: the node to check
|
1878 |
@param nresult: the remote results for the node
|
1879 |
@param vg_name: the configured VG name
|
1880 |
|
1881 |
"""
|
1882 |
if vg_name is None: |
1883 |
return
|
1884 |
|
1885 |
node = ninfo.name |
1886 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1887 |
|
1888 |
# checks vg existence and size > 20G
|
1889 |
vglist = nresult.get(constants.NV_VGLIST, None)
|
1890 |
test = not vglist
|
1891 |
_ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
|
1892 |
if not test: |
1893 |
vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name, |
1894 |
constants.MIN_VG_SIZE) |
1895 |
_ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus) |
1896 |
|
1897 |
# check pv names
|
1898 |
pvlist = nresult.get(constants.NV_PVLIST, None)
|
1899 |
test = pvlist is None |
1900 |
_ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
|
1901 |
if not test: |
1902 |
# check that ':' is not present in PV names, since it's a
|
1903 |
# special character for lvcreate (denotes the range of PEs to
|
1904 |
# use on the PV)
|
1905 |
for _, pvname, owner_vg in pvlist: |
1906 |
test = ":" in pvname |
1907 |
_ErrorIf(test, constants.CV_ENODELVM, node, |
1908 |
"Invalid character ':' in PV '%s' of VG '%s'",
|
1909 |
pvname, owner_vg) |
1910 |
|
1911 |
def _VerifyNodeBridges(self, ninfo, nresult, bridges): |
1912 |
"""Check the node bridges.
|
1913 |
|
1914 |
@type ninfo: L{objects.Node}
|
1915 |
@param ninfo: the node to check
|
1916 |
@param nresult: the remote results for the node
|
1917 |
@param bridges: the expected list of bridges
|
1918 |
|
1919 |
"""
|
1920 |
if not bridges: |
1921 |
return
|
1922 |
|
1923 |
node = ninfo.name |
1924 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1925 |
|
1926 |
missing = nresult.get(constants.NV_BRIDGES, None)
|
1927 |
test = not isinstance(missing, list) |
1928 |
_ErrorIf(test, constants.CV_ENODENET, node, |
1929 |
"did not return valid bridge information")
|
1930 |
if not test: |
1931 |
_ErrorIf(bool(missing), constants.CV_ENODENET, node,
|
1932 |
"missing bridges: %s" % utils.CommaJoin(sorted(missing))) |
1933 |
|
1934 |
def _VerifyNodeNetwork(self, ninfo, nresult): |
1935 |
"""Check the node network connectivity results.
|
1936 |
|
1937 |
@type ninfo: L{objects.Node}
|
1938 |
@param ninfo: the node to check
|
1939 |
@param nresult: the remote results for the node
|
1940 |
|
1941 |
"""
|
1942 |
node = ninfo.name |
1943 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1944 |
|
1945 |
test = constants.NV_NODELIST not in nresult |
1946 |
_ErrorIf(test, constants.CV_ENODESSH, node, |
1947 |
"node hasn't returned node ssh connectivity data")
|
1948 |
if not test: |
1949 |
if nresult[constants.NV_NODELIST]:
|
1950 |
for a_node, a_msg in nresult[constants.NV_NODELIST].items(): |
1951 |
_ErrorIf(True, constants.CV_ENODESSH, node,
|
1952 |
"ssh communication with node '%s': %s", a_node, a_msg)
|
1953 |
|
1954 |
test = constants.NV_NODENETTEST not in nresult |
1955 |
_ErrorIf(test, constants.CV_ENODENET, node, |
1956 |
"node hasn't returned node tcp connectivity data")
|
1957 |
if not test: |
1958 |
if nresult[constants.NV_NODENETTEST]:
|
1959 |
nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys()) |
1960 |
for anode in nlist: |
1961 |
_ErrorIf(True, constants.CV_ENODENET, node,
|
1962 |
"tcp communication with node '%s': %s",
|
1963 |
anode, nresult[constants.NV_NODENETTEST][anode]) |
1964 |
|
1965 |
test = constants.NV_MASTERIP not in nresult |
1966 |
_ErrorIf(test, constants.CV_ENODENET, node, |
1967 |
"node hasn't returned node master IP reachability data")
|
1968 |
if not test: |
1969 |
if not nresult[constants.NV_MASTERIP]: |
1970 |
if node == self.master_node: |
1971 |
msg = "the master node cannot reach the master IP (not configured?)"
|
1972 |
else:
|
1973 |
msg = "cannot reach the master IP"
|
1974 |
_ErrorIf(True, constants.CV_ENODENET, node, msg)
|
1975 |
|
1976 |
def _VerifyInstance(self, instance, instanceconfig, node_image, |
1977 |
diskstatus): |
1978 |
"""Verify an instance.
|
1979 |
|
1980 |
This function checks to see if the required block devices are
|
1981 |
available on the instance's node.
|
1982 |
|
1983 |
"""
|
1984 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
1985 |
node_current = instanceconfig.primary_node |
1986 |
|
1987 |
node_vol_should = {} |
1988 |
instanceconfig.MapLVsByNode(node_vol_should) |
1989 |
|
1990 |
for node in node_vol_should: |
1991 |
n_img = node_image[node] |
1992 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
1993 |
# ignore missing volumes on offline or broken nodes
|
1994 |
continue
|
1995 |
for volume in node_vol_should[node]: |
1996 |
test = volume not in n_img.volumes |
1997 |
_ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance, |
1998 |
"volume %s missing on node %s", volume, node)
|
1999 |
|
2000 |
if instanceconfig.admin_up:
|
2001 |
pri_img = node_image[node_current] |
2002 |
test = instance not in pri_img.instances and not pri_img.offline |
2003 |
_ErrorIf(test, constants.CV_EINSTANCEDOWN, instance, |
2004 |
"instance not running on its primary node %s",
|
2005 |
node_current) |
2006 |
|
2007 |
diskdata = [(nname, success, status, idx) |
2008 |
for (nname, disks) in diskstatus.items() |
2009 |
for idx, (success, status) in enumerate(disks)] |
2010 |
|
2011 |
for nname, success, bdev_status, idx in diskdata: |
2012 |
# the 'ghost node' construction in Exec() ensures that we have a
|
2013 |
# node here
|
2014 |
snode = node_image[nname] |
2015 |
bad_snode = snode.ghost or snode.offline
|
2016 |
_ErrorIf(instanceconfig.admin_up and not success and not bad_snode, |
2017 |
constants.CV_EINSTANCEFAULTYDISK, instance, |
2018 |
"couldn't retrieve status for disk/%s on %s: %s",
|
2019 |
idx, nname, bdev_status) |
2020 |
_ErrorIf((instanceconfig.admin_up and success and |
2021 |
bdev_status.ldisk_status == constants.LDS_FAULTY), |
2022 |
constants.CV_EINSTANCEFAULTYDISK, instance, |
2023 |
"disk/%s on %s is faulty", idx, nname)
|
2024 |
|
2025 |
def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved): |
2026 |
"""Verify if there are any unknown volumes in the cluster.
|
2027 |
|
2028 |
The .os, .swap and backup volumes are ignored. All other volumes are
|
2029 |
reported as unknown.
|
2030 |
|
2031 |
@type reserved: L{ganeti.utils.FieldSet}
|
2032 |
@param reserved: a FieldSet of reserved volume names
|
2033 |
|
2034 |
"""
|
2035 |
for node, n_img in node_image.items(): |
2036 |
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail: |
2037 |
# skip non-healthy nodes
|
2038 |
continue
|
2039 |
for volume in n_img.volumes: |
2040 |
test = ((node not in node_vol_should or |
2041 |
volume not in node_vol_should[node]) and |
2042 |
not reserved.Matches(volume))
|
2043 |
self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
|
2044 |
"volume %s is unknown", volume)
|
2045 |
|
2046 |
def _VerifyNPlusOneMemory(self, node_image, instance_cfg): |
2047 |
"""Verify N+1 Memory Resilience.
|
2048 |
|
2049 |
Check that if one single node dies we can still start all the
|
2050 |
instances it was primary for.
|
2051 |
|
2052 |
"""
|
2053 |
cluster_info = self.cfg.GetClusterInfo()
|
2054 |
for node, n_img in node_image.items(): |
2055 |
# This code checks that every node which is now listed as
|
2056 |
# secondary has enough memory to host all instances it is
|
2057 |
# supposed to should a single other node in the cluster fail.
|
2058 |
# FIXME: not ready for failover to an arbitrary node
|
2059 |
# FIXME: does not support file-backed instances
|
2060 |
# WARNING: we currently take into account down instances as well
|
2061 |
# as up ones, considering that even if they're down someone
|
2062 |
# might want to start them even in the event of a node failure.
|
2063 |
if n_img.offline:
|
2064 |
# we're skipping offline nodes from the N+1 warning, since
|
2065 |
# most likely we don't have good memory infromation from them;
|
2066 |
# we already list instances living on such nodes, and that's
|
2067 |
# enough warning
|
2068 |
continue
|
2069 |
for prinode, instances in n_img.sbp.items(): |
2070 |
needed_mem = 0
|
2071 |
for instance in instances: |
2072 |
bep = cluster_info.FillBE(instance_cfg[instance]) |
2073 |
if bep[constants.BE_AUTO_BALANCE]:
|
2074 |
needed_mem += bep[constants.BE_MEMORY] |
2075 |
test = n_img.mfree < needed_mem |
2076 |
self._ErrorIf(test, constants.CV_ENODEN1, node,
|
2077 |
"not enough memory to accomodate instance failovers"
|
2078 |
" should node %s fail (%dMiB needed, %dMiB available)",
|
2079 |
prinode, needed_mem, n_img.mfree) |
2080 |
|
2081 |
@classmethod
|
2082 |
def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo, |
2083 |
(files_all, files_all_opt, files_mc, files_vm)): |
2084 |
"""Verifies file checksums collected from all nodes.
|
2085 |
|
2086 |
@param errorif: Callback for reporting errors
|
2087 |
@param nodeinfo: List of L{objects.Node} objects
|
2088 |
@param master_node: Name of master node
|
2089 |
@param all_nvinfo: RPC results
|
2090 |
|
2091 |
"""
|
2092 |
assert (len(files_all | files_all_opt | files_mc | files_vm) == |
2093 |
sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \ |
2094 |
"Found file listed in more than one file list"
|
2095 |
|
2096 |
# Define functions determining which nodes to consider for a file
|
2097 |
files2nodefn = [ |
2098 |
(files_all, None),
|
2099 |
(files_all_opt, None),
|
2100 |
(files_mc, lambda node: (node.master_candidate or |
2101 |
node.name == master_node)), |
2102 |
(files_vm, lambda node: node.vm_capable),
|
2103 |
] |
2104 |
|
2105 |
# Build mapping from filename to list of nodes which should have the file
|
2106 |
nodefiles = {} |
2107 |
for (files, fn) in files2nodefn: |
2108 |
if fn is None: |
2109 |
filenodes = nodeinfo |
2110 |
else:
|
2111 |
filenodes = filter(fn, nodeinfo)
|
2112 |
nodefiles.update((filename, |
2113 |
frozenset(map(operator.attrgetter("name"), filenodes))) |
2114 |
for filename in files) |
2115 |
|
2116 |
assert set(nodefiles) == (files_all | files_all_opt | files_mc | files_vm) |
2117 |
|
2118 |
fileinfo = dict((filename, {}) for filename in nodefiles) |
2119 |
ignore_nodes = set()
|
2120 |
|
2121 |
for node in nodeinfo: |
2122 |
if node.offline:
|
2123 |
ignore_nodes.add(node.name) |
2124 |
continue
|
2125 |
|
2126 |
nresult = all_nvinfo[node.name] |
2127 |
|
2128 |
if nresult.fail_msg or not nresult.payload: |
2129 |
node_files = None
|
2130 |
else:
|
2131 |
node_files = nresult.payload.get(constants.NV_FILELIST, None)
|
2132 |
|
2133 |
test = not (node_files and isinstance(node_files, dict)) |
2134 |
errorif(test, constants.CV_ENODEFILECHECK, node.name, |
2135 |
"Node did not return file checksum data")
|
2136 |
if test:
|
2137 |
ignore_nodes.add(node.name) |
2138 |
continue
|
2139 |
|
2140 |
# Build per-checksum mapping from filename to nodes having it
|
2141 |
for (filename, checksum) in node_files.items(): |
2142 |
assert filename in nodefiles |
2143 |
fileinfo[filename].setdefault(checksum, set()).add(node.name)
|
2144 |
|
2145 |
for (filename, checksums) in fileinfo.items(): |
2146 |
assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum" |
2147 |
|
2148 |
# Nodes having the file
|
2149 |
with_file = frozenset(node_name
|
2150 |
for nodes in fileinfo[filename].values() |
2151 |
for node_name in nodes) - ignore_nodes |
2152 |
|
2153 |
expected_nodes = nodefiles[filename] - ignore_nodes |
2154 |
|
2155 |
# Nodes missing file
|
2156 |
missing_file = expected_nodes - with_file |
2157 |
|
2158 |
if filename in files_all_opt: |
2159 |
# All or no nodes
|
2160 |
errorif(missing_file and missing_file != expected_nodes,
|
2161 |
constants.CV_ECLUSTERFILECHECK, None,
|
2162 |
"File %s is optional, but it must exist on all or no"
|
2163 |
" nodes (not found on %s)",
|
2164 |
filename, utils.CommaJoin(utils.NiceSort(missing_file))) |
2165 |
else:
|
2166 |
errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
|
2167 |
"File %s is missing from node(s) %s", filename,
|
2168 |
utils.CommaJoin(utils.NiceSort(missing_file))) |
2169 |
|
2170 |
# Warn if a node has a file it shouldn't
|
2171 |
unexpected = with_file - expected_nodes |
2172 |
errorif(unexpected, |
2173 |
constants.CV_ECLUSTERFILECHECK, None,
|
2174 |
"File %s should not exist on node(s) %s",
|
2175 |
filename, utils.CommaJoin(utils.NiceSort(unexpected))) |
2176 |
|
2177 |
# See if there are multiple versions of the file
|
2178 |
test = len(checksums) > 1 |
2179 |
if test:
|
2180 |
variants = ["variant %s on %s" %
|
2181 |
(idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
|
2182 |
for (idx, (checksum, nodes)) in |
2183 |
enumerate(sorted(checksums.items()))] |
2184 |
else:
|
2185 |
variants = [] |
2186 |
|
2187 |
errorif(test, constants.CV_ECLUSTERFILECHECK, None,
|
2188 |
"File %s found with %s different checksums (%s)",
|
2189 |
filename, len(checksums), "; ".join(variants)) |
2190 |
|
2191 |
def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper, |
2192 |
drbd_map): |
2193 |
"""Verifies and the node DRBD status.
|
2194 |
|
2195 |
@type ninfo: L{objects.Node}
|
2196 |
@param ninfo: the node to check
|
2197 |
@param nresult: the remote results for the node
|
2198 |
@param instanceinfo: the dict of instances
|
2199 |
@param drbd_helper: the configured DRBD usermode helper
|
2200 |
@param drbd_map: the DRBD map as returned by
|
2201 |
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
|
2202 |
|
2203 |
"""
|
2204 |
node = ninfo.name |
2205 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2206 |
|
2207 |
if drbd_helper:
|
2208 |
helper_result = nresult.get(constants.NV_DRBDHELPER, None)
|
2209 |
test = (helper_result == None)
|
2210 |
_ErrorIf(test, constants.CV_ENODEDRBDHELPER, node, |
2211 |
"no drbd usermode helper returned")
|
2212 |
if helper_result:
|
2213 |
status, payload = helper_result |
2214 |
test = not status
|
2215 |
_ErrorIf(test, constants.CV_ENODEDRBDHELPER, node, |
2216 |
"drbd usermode helper check unsuccessful: %s", payload)
|
2217 |
test = status and (payload != drbd_helper)
|
2218 |
_ErrorIf(test, constants.CV_ENODEDRBDHELPER, node, |
2219 |
"wrong drbd usermode helper: %s", payload)
|
2220 |
|
2221 |
# compute the DRBD minors
|
2222 |
node_drbd = {} |
2223 |
for minor, instance in drbd_map[node].items(): |
2224 |
test = instance not in instanceinfo |
2225 |
_ErrorIf(test, constants.CV_ECLUSTERCFG, None,
|
2226 |
"ghost instance '%s' in temporary DRBD map", instance)
|
2227 |
# ghost instance should not be running, but otherwise we
|
2228 |
# don't give double warnings (both ghost instance and
|
2229 |
# unallocated minor in use)
|
2230 |
if test:
|
2231 |
node_drbd[minor] = (instance, False)
|
2232 |
else:
|
2233 |
instance = instanceinfo[instance] |
2234 |
node_drbd[minor] = (instance.name, instance.admin_up) |
2235 |
|
2236 |
# and now check them
|
2237 |
used_minors = nresult.get(constants.NV_DRBDLIST, []) |
2238 |
test = not isinstance(used_minors, (tuple, list)) |
2239 |
_ErrorIf(test, constants.CV_ENODEDRBD, node, |
2240 |
"cannot parse drbd status file: %s", str(used_minors)) |
2241 |
if test:
|
2242 |
# we cannot check drbd status
|
2243 |
return
|
2244 |
|
2245 |
for minor, (iname, must_exist) in node_drbd.items(): |
2246 |
test = minor not in used_minors and must_exist |
2247 |
_ErrorIf(test, constants.CV_ENODEDRBD, node, |
2248 |
"drbd minor %d of instance %s is not active", minor, iname)
|
2249 |
for minor in used_minors: |
2250 |
test = minor not in node_drbd |
2251 |
_ErrorIf(test, constants.CV_ENODEDRBD, node, |
2252 |
"unallocated drbd minor %d is in use", minor)
|
2253 |
|
2254 |
def _UpdateNodeOS(self, ninfo, nresult, nimg): |
2255 |
"""Builds the node OS structures.
|
2256 |
|
2257 |
@type ninfo: L{objects.Node}
|
2258 |
@param ninfo: the node to check
|
2259 |
@param nresult: the remote results for the node
|
2260 |
@param nimg: the node image object
|
2261 |
|
2262 |
"""
|
2263 |
node = ninfo.name |
2264 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2265 |
|
2266 |
remote_os = nresult.get(constants.NV_OSLIST, None)
|
2267 |
test = (not isinstance(remote_os, list) or |
2268 |
not compat.all(isinstance(v, list) and len(v) == 7 |
2269 |
for v in remote_os)) |
2270 |
|
2271 |
_ErrorIf(test, constants.CV_ENODEOS, node, |
2272 |
"node hasn't returned valid OS data")
|
2273 |
|
2274 |
nimg.os_fail = test |
2275 |
|
2276 |
if test:
|
2277 |
return
|
2278 |
|
2279 |
os_dict = {} |
2280 |
|
2281 |
for (name, os_path, status, diagnose,
|
2282 |
variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
|
2283 |
|
2284 |
if name not in os_dict: |
2285 |
os_dict[name] = [] |
2286 |
|
2287 |
# parameters is a list of lists instead of list of tuples due to
|
2288 |
# JSON lacking a real tuple type, fix it:
|
2289 |
parameters = [tuple(v) for v in parameters] |
2290 |
os_dict[name].append((os_path, status, diagnose, |
2291 |
set(variants), set(parameters), set(api_ver))) |
2292 |
|
2293 |
nimg.oslist = os_dict |
2294 |
|
2295 |
def _VerifyNodeOS(self, ninfo, nimg, base): |
2296 |
"""Verifies the node OS list.
|
2297 |
|
2298 |
@type ninfo: L{objects.Node}
|
2299 |
@param ninfo: the node to check
|
2300 |
@param nimg: the node image object
|
2301 |
@param base: the 'template' node we match against (e.g. from the master)
|
2302 |
|
2303 |
"""
|
2304 |
node = ninfo.name |
2305 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2306 |
|
2307 |
assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?" |
2308 |
|
2309 |
beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l] |
2310 |
for os_name, os_data in nimg.oslist.items(): |
2311 |
assert os_data, "Empty OS status for OS %s?!" % os_name |
2312 |
f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
|
2313 |
_ErrorIf(not f_status, constants.CV_ENODEOS, node,
|
2314 |
"Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
|
2315 |
_ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node, |
2316 |
"OS '%s' has multiple entries (first one shadows the rest): %s",
|
2317 |
os_name, utils.CommaJoin([v[0] for v in os_data])) |
2318 |
# comparisons with the 'base' image
|
2319 |
test = os_name not in base.oslist |
2320 |
_ErrorIf(test, constants.CV_ENODEOS, node, |
2321 |
"Extra OS %s not present on reference node (%s)",
|
2322 |
os_name, base.name) |
2323 |
if test:
|
2324 |
continue
|
2325 |
assert base.oslist[os_name], "Base node has empty OS status?" |
2326 |
_, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
|
2327 |
if not b_status: |
2328 |
# base OS is invalid, skipping
|
2329 |
continue
|
2330 |
for kind, a, b in [("API version", f_api, b_api), |
2331 |
("variants list", f_var, b_var),
|
2332 |
("parameters", beautify_params(f_param),
|
2333 |
beautify_params(b_param))]: |
2334 |
_ErrorIf(a != b, constants.CV_ENODEOS, node, |
2335 |
"OS %s for %s differs from reference node %s: [%s] vs. [%s]",
|
2336 |
kind, os_name, base.name, |
2337 |
utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b))) |
2338 |
|
2339 |
# check any missing OSes
|
2340 |
missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
|
2341 |
_ErrorIf(missing, constants.CV_ENODEOS, node, |
2342 |
"OSes present on reference node %s but missing on this node: %s",
|
2343 |
base.name, utils.CommaJoin(missing)) |
2344 |
|
2345 |
def _VerifyOob(self, ninfo, nresult): |
2346 |
"""Verifies out of band functionality of a node.
|
2347 |
|
2348 |
@type ninfo: L{objects.Node}
|
2349 |
@param ninfo: the node to check
|
2350 |
@param nresult: the remote results for the node
|
2351 |
|
2352 |
"""
|
2353 |
node = ninfo.name |
2354 |
# We just have to verify the paths on master and/or master candidates
|
2355 |
# as the oob helper is invoked on the master
|
2356 |
if ((ninfo.master_candidate or ninfo.master_capable) and |
2357 |
constants.NV_OOB_PATHS in nresult):
|
2358 |
for path_result in nresult[constants.NV_OOB_PATHS]: |
2359 |
self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
|
2360 |
|
2361 |
def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name): |
2362 |
"""Verifies and updates the node volume data.
|
2363 |
|
2364 |
This function will update a L{NodeImage}'s internal structures
|
2365 |
with data from the remote call.
|
2366 |
|
2367 |
@type ninfo: L{objects.Node}
|
2368 |
@param ninfo: the node to check
|
2369 |
@param nresult: the remote results for the node
|
2370 |
@param nimg: the node image object
|
2371 |
@param vg_name: the configured VG name
|
2372 |
|
2373 |
"""
|
2374 |
node = ninfo.name |
2375 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2376 |
|
2377 |
nimg.lvm_fail = True
|
2378 |
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
|
2379 |
if vg_name is None: |
2380 |
pass
|
2381 |
elif isinstance(lvdata, basestring): |
2382 |
_ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s", |
2383 |
utils.SafeEncode(lvdata)) |
2384 |
elif not isinstance(lvdata, dict): |
2385 |
_ErrorIf(True, constants.CV_ENODELVM, node,
|
2386 |
"rpc call to node failed (lvlist)")
|
2387 |
else:
|
2388 |
nimg.volumes = lvdata |
2389 |
nimg.lvm_fail = False
|
2390 |
|
2391 |
def _UpdateNodeInstances(self, ninfo, nresult, nimg): |
2392 |
"""Verifies and updates the node instance list.
|
2393 |
|
2394 |
If the listing was successful, then updates this node's instance
|
2395 |
list. Otherwise, it marks the RPC call as failed for the instance
|
2396 |
list key.
|
2397 |
|
2398 |
@type ninfo: L{objects.Node}
|
2399 |
@param ninfo: the node to check
|
2400 |
@param nresult: the remote results for the node
|
2401 |
@param nimg: the node image object
|
2402 |
|
2403 |
"""
|
2404 |
idata = nresult.get(constants.NV_INSTANCELIST, None)
|
2405 |
test = not isinstance(idata, list) |
2406 |
self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
|
2407 |
"rpc call to node failed (instancelist): %s",
|
2408 |
utils.SafeEncode(str(idata)))
|
2409 |
if test:
|
2410 |
nimg.hyp_fail = True
|
2411 |
else:
|
2412 |
nimg.instances = idata |
2413 |
|
2414 |
def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name): |
2415 |
"""Verifies and computes a node information map
|
2416 |
|
2417 |
@type ninfo: L{objects.Node}
|
2418 |
@param ninfo: the node to check
|
2419 |
@param nresult: the remote results for the node
|
2420 |
@param nimg: the node image object
|
2421 |
@param vg_name: the configured VG name
|
2422 |
|
2423 |
"""
|
2424 |
node = ninfo.name |
2425 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2426 |
|
2427 |
# try to read free memory (from the hypervisor)
|
2428 |
hv_info = nresult.get(constants.NV_HVINFO, None)
|
2429 |
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info |
2430 |
_ErrorIf(test, constants.CV_ENODEHV, node, |
2431 |
"rpc call to node failed (hvinfo)")
|
2432 |
if not test: |
2433 |
try:
|
2434 |
nimg.mfree = int(hv_info["memory_free"]) |
2435 |
except (ValueError, TypeError): |
2436 |
_ErrorIf(True, constants.CV_ENODERPC, node,
|
2437 |
"node returned invalid nodeinfo, check hypervisor")
|
2438 |
|
2439 |
# FIXME: devise a free space model for file based instances as well
|
2440 |
if vg_name is not None: |
2441 |
test = (constants.NV_VGLIST not in nresult or |
2442 |
vg_name not in nresult[constants.NV_VGLIST]) |
2443 |
_ErrorIf(test, constants.CV_ENODELVM, node, |
2444 |
"node didn't return data for the volume group '%s'"
|
2445 |
" - it is either missing or broken", vg_name)
|
2446 |
if not test: |
2447 |
try:
|
2448 |
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
|
2449 |
except (ValueError, TypeError): |
2450 |
_ErrorIf(True, constants.CV_ENODERPC, node,
|
2451 |
"node returned invalid LVM info, check LVM status")
|
2452 |
|
2453 |
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo): |
2454 |
"""Gets per-disk status information for all instances.
|
2455 |
|
2456 |
@type nodelist: list of strings
|
2457 |
@param nodelist: Node names
|
2458 |
@type node_image: dict of (name, L{objects.Node})
|
2459 |
@param node_image: Node objects
|
2460 |
@type instanceinfo: dict of (name, L{objects.Instance})
|
2461 |
@param instanceinfo: Instance objects
|
2462 |
@rtype: {instance: {node: [(succes, payload)]}}
|
2463 |
@return: a dictionary of per-instance dictionaries with nodes as
|
2464 |
keys and disk information as values; the disk information is a
|
2465 |
list of tuples (success, payload)
|
2466 |
|
2467 |
"""
|
2468 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2469 |
|
2470 |
node_disks = {} |
2471 |
node_disks_devonly = {} |
2472 |
diskless_instances = set()
|
2473 |
diskless = constants.DT_DISKLESS |
2474 |
|
2475 |
for nname in nodelist: |
2476 |
node_instances = list(itertools.chain(node_image[nname].pinst,
|
2477 |
node_image[nname].sinst)) |
2478 |
diskless_instances.update(inst for inst in node_instances |
2479 |
if instanceinfo[inst].disk_template == diskless)
|
2480 |
disks = [(inst, disk) |
2481 |
for inst in node_instances |
2482 |
for disk in instanceinfo[inst].disks] |
2483 |
|
2484 |
if not disks: |
2485 |
# No need to collect data
|
2486 |
continue
|
2487 |
|
2488 |
node_disks[nname] = disks |
2489 |
|
2490 |
# Creating copies as SetDiskID below will modify the objects and that can
|
2491 |
# lead to incorrect data returned from nodes
|
2492 |
devonly = [dev.Copy() for (_, dev) in disks] |
2493 |
|
2494 |
for dev in devonly: |
2495 |
self.cfg.SetDiskID(dev, nname)
|
2496 |
|
2497 |
node_disks_devonly[nname] = devonly |
2498 |
|
2499 |
assert len(node_disks) == len(node_disks_devonly) |
2500 |
|
2501 |
# Collect data from all nodes with disks
|
2502 |
result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
|
2503 |
node_disks_devonly) |
2504 |
|
2505 |
assert len(result) == len(node_disks) |
2506 |
|
2507 |
instdisk = {} |
2508 |
|
2509 |
for (nname, nres) in result.items(): |
2510 |
disks = node_disks[nname] |
2511 |
|
2512 |
if nres.offline:
|
2513 |
# No data from this node
|
2514 |
data = len(disks) * [(False, "node offline")] |
2515 |
else:
|
2516 |
msg = nres.fail_msg |
2517 |
_ErrorIf(msg, constants.CV_ENODERPC, nname, |
2518 |
"while getting disk information: %s", msg)
|
2519 |
if msg:
|
2520 |
# No data from this node
|
2521 |
data = len(disks) * [(False, msg)] |
2522 |
else:
|
2523 |
data = [] |
2524 |
for idx, i in enumerate(nres.payload): |
2525 |
if isinstance(i, (tuple, list)) and len(i) == 2: |
2526 |
data.append(i) |
2527 |
else:
|
2528 |
logging.warning("Invalid result from node %s, entry %d: %s",
|
2529 |
nname, idx, i) |
2530 |
data.append((False, "Invalid result from the remote node")) |
2531 |
|
2532 |
for ((inst, _), status) in zip(disks, data): |
2533 |
instdisk.setdefault(inst, {}).setdefault(nname, []).append(status) |
2534 |
|
2535 |
# Add empty entries for diskless instances.
|
2536 |
for inst in diskless_instances: |
2537 |
assert inst not in instdisk |
2538 |
instdisk[inst] = {} |
2539 |
|
2540 |
assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and |
2541 |
len(nnames) <= len(instanceinfo[inst].all_nodes) and |
2542 |
compat.all(isinstance(s, (tuple, list)) and |
2543 |
len(s) == 2 for s in statuses) |
2544 |
for inst, nnames in instdisk.items() |
2545 |
for nname, statuses in nnames.items()) |
2546 |
assert set(instdisk) == set(instanceinfo), "instdisk consistency failure" |
2547 |
|
2548 |
return instdisk
|
2549 |
|
2550 |
@staticmethod
|
2551 |
def _SshNodeSelector(group_uuid, all_nodes): |
2552 |
"""Create endless iterators for all potential SSH check hosts.
|
2553 |
|
2554 |
"""
|
2555 |
nodes = [node for node in all_nodes |
2556 |
if (node.group != group_uuid and |
2557 |
not node.offline)]
|
2558 |
keyfunc = operator.attrgetter("group")
|
2559 |
|
2560 |
return map(itertools.cycle, |
2561 |
[sorted(map(operator.attrgetter("name"), names)) |
2562 |
for _, names in itertools.groupby(sorted(nodes, key=keyfunc), |
2563 |
keyfunc)]) |
2564 |
|
2565 |
@classmethod
|
2566 |
def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes): |
2567 |
"""Choose which nodes should talk to which other nodes.
|
2568 |
|
2569 |
We will make nodes contact all nodes in their group, and one node from
|
2570 |
every other group.
|
2571 |
|
2572 |
@warning: This algorithm has a known issue if one node group is much
|
2573 |
smaller than others (e.g. just one node). In such a case all other
|
2574 |
nodes will talk to the single node.
|
2575 |
|
2576 |
"""
|
2577 |
online_nodes = sorted(node.name for node in group_nodes if not node.offline) |
2578 |
sel = cls._SshNodeSelector(group_uuid, all_nodes) |
2579 |
|
2580 |
return (online_nodes,
|
2581 |
dict((name, sorted([i.next() for i in sel])) |
2582 |
for name in online_nodes)) |
2583 |
|
2584 |
def BuildHooksEnv(self): |
2585 |
"""Build hooks env.
|
2586 |
|
2587 |
Cluster-Verify hooks just ran in the post phase and their failure makes
|
2588 |
the output be logged in the verify output and the verification to fail.
|
2589 |
|
2590 |
"""
|
2591 |
env = { |
2592 |
"CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()) |
2593 |
} |
2594 |
|
2595 |
env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags())) |
2596 |
for node in self.my_node_info.values()) |
2597 |
|
2598 |
return env
|
2599 |
|
2600 |
def BuildHooksNodes(self): |
2601 |
"""Build hooks nodes.
|
2602 |
|
2603 |
"""
|
2604 |
return ([], self.my_node_names) |
2605 |
|
2606 |
def Exec(self, feedback_fn): |
2607 |
"""Verify integrity of the node group, performing various test on nodes.
|
2608 |
|
2609 |
"""
|
2610 |
# This method has too many local variables. pylint: disable=R0914
|
2611 |
feedback_fn("* Verifying group '%s'" % self.group_info.name) |
2612 |
|
2613 |
if not self.my_node_names: |
2614 |
# empty node group
|
2615 |
feedback_fn("* Empty node group, skipping verification")
|
2616 |
return True |
2617 |
|
2618 |
self.bad = False |
2619 |
_ErrorIf = self._ErrorIf # pylint: disable=C0103 |
2620 |
verbose = self.op.verbose
|
2621 |
self._feedback_fn = feedback_fn
|
2622 |
|
2623 |
vg_name = self.cfg.GetVGName()
|
2624 |
drbd_helper = self.cfg.GetDRBDHelper()
|
2625 |
cluster = self.cfg.GetClusterInfo()
|
2626 |
groupinfo = self.cfg.GetAllNodeGroupsInfo()
|
2627 |
hypervisors = cluster.enabled_hypervisors |
2628 |
node_data_list = [self.my_node_info[name] for name in self.my_node_names] |
2629 |
|
2630 |
i_non_redundant = [] # Non redundant instances
|
2631 |
i_non_a_balanced = [] # Non auto-balanced instances
|
2632 |
n_offline = 0 # Count of offline nodes |
2633 |
n_drained = 0 # Count of nodes being drained |
2634 |
node_vol_should = {} |
2635 |
|
2636 |
# FIXME: verify OS list
|
2637 |
|
2638 |
# File verification
|
2639 |
filemap = _ComputeAncillaryFiles(cluster, False)
|
2640 |
|
2641 |
# do local checksums
|
2642 |
master_node = self.master_node = self.cfg.GetMasterNode() |
2643 |
master_ip = self.cfg.GetMasterIP()
|
2644 |
|
2645 |
feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names)) |
2646 |
|
2647 |
node_verify_param = { |
2648 |
constants.NV_FILELIST: |
2649 |
utils.UniqueSequence(filename |
2650 |
for files in filemap |
2651 |
for filename in files), |
2652 |
constants.NV_NODELIST: |
2653 |
self._SelectSshCheckNodes(node_data_list, self.group_uuid, |
2654 |
self.all_node_info.values()),
|
2655 |
constants.NV_HYPERVISOR: hypervisors, |
2656 |
constants.NV_HVPARAMS: |
2657 |
_GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
|
2658 |
constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip) |
2659 |
for node in node_data_list |
2660 |
if not node.offline], |
2661 |
constants.NV_INSTANCELIST: hypervisors, |
2662 |
constants.NV_VERSION: None,
|
2663 |
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
|
2664 |
constants.NV_NODESETUP: None,
|
2665 |
constants.NV_TIME: None,
|
2666 |
constants.NV_MASTERIP: (master_node, master_ip), |
2667 |
constants.NV_OSLIST: None,
|
2668 |
constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
|
2669 |
} |
2670 |
|
2671 |
if vg_name is not None: |
2672 |
node_verify_param[constants.NV_VGLIST] = None
|
2673 |
node_verify_param[constants.NV_LVLIST] = vg_name |
2674 |
node_verify_param[constants.NV_PVLIST] = [vg_name] |
2675 |
node_verify_param[constants.NV_DRBDLIST] = None
|
2676 |
|
2677 |
if drbd_helper:
|
2678 |
node_verify_param[constants.NV_DRBDHELPER] = drbd_helper |
2679 |
|
2680 |
# bridge checks
|
2681 |
# FIXME: this needs to be changed per node-group, not cluster-wide
|
2682 |
bridges = set()
|
2683 |
default_nicpp = cluster.nicparams[constants.PP_DEFAULT] |
2684 |
if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
2685 |
bridges.add(default_nicpp[constants.NIC_LINK]) |
2686 |
for instance in self.my_inst_info.values(): |
2687 |
for nic in instance.nics: |
2688 |
full_nic = cluster.SimpleFillNIC(nic.nicparams) |
2689 |
if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
2690 |
bridges.add(full_nic[constants.NIC_LINK]) |
2691 |
|
2692 |
if bridges:
|
2693 |
node_verify_param[constants.NV_BRIDGES] = list(bridges)
|
2694 |
|
2695 |
# Build our expected cluster state
|
2696 |
node_image = dict((node.name, self.NodeImage(offline=node.offline, |
2697 |
name=node.name, |
2698 |
vm_capable=node.vm_capable)) |
2699 |
for node in node_data_list) |
2700 |
|
2701 |
# Gather OOB paths
|
2702 |
oob_paths = [] |
2703 |
for node in self.all_node_info.values(): |
2704 |
path = _SupportsOob(self.cfg, node)
|
2705 |
if path and path not in oob_paths: |
2706 |
oob_paths.append(path) |
2707 |
|
2708 |
if oob_paths:
|
2709 |
node_verify_param[constants.NV_OOB_PATHS] = oob_paths |
2710 |
|
2711 |
for instance in self.my_inst_names: |
2712 |
inst_config = self.my_inst_info[instance]
|
2713 |
|
2714 |
for nname in inst_config.all_nodes: |
2715 |
if nname not in node_image: |
2716 |
gnode = self.NodeImage(name=nname)
|
2717 |
gnode.ghost = (nname not in self.all_node_info) |
2718 |
node_image[nname] = gnode |
2719 |
|
2720 |
inst_config.MapLVsByNode(node_vol_should) |
2721 |
|
2722 |
pnode = inst_config.primary_node |
2723 |
node_image[pnode].pinst.append(instance) |
2724 |
|
2725 |
for snode in inst_config.secondary_nodes: |
2726 |
nimg = node_image[snode] |
2727 |
nimg.sinst.append(instance) |
2728 |
if pnode not in nimg.sbp: |
2729 |
nimg.sbp[pnode] = [] |
2730 |
nimg.sbp[pnode].append(instance) |
2731 |
|
2732 |
# At this point, we have the in-memory data structures complete,
|
2733 |
# except for the runtime information, which we'll gather next
|
2734 |
|
2735 |
# Due to the way our RPC system works, exact response times cannot be
|
2736 |
# guaranteed (e.g. a broken node could run into a timeout). By keeping the
|
2737 |
# time before and after executing the request, we can at least have a time
|
2738 |
# window.
|
2739 |
nvinfo_starttime = time.time() |
2740 |
all_nvinfo = self.rpc.call_node_verify(self.my_node_names, |
2741 |
node_verify_param, |
2742 |
self.cfg.GetClusterName())
|
2743 |
nvinfo_endtime = time.time() |
2744 |
|
2745 |
if self.extra_lv_nodes and vg_name is not None: |
2746 |
extra_lv_nvinfo = \ |
2747 |
self.rpc.call_node_verify(self.extra_lv_nodes, |
2748 |
{constants.NV_LVLIST: vg_name}, |
2749 |
self.cfg.GetClusterName())
|
2750 |
else:
|
2751 |
extra_lv_nvinfo = {} |
2752 |
|
2753 |
all_drbd_map = self.cfg.ComputeDRBDMap()
|
2754 |
|
2755 |
feedback_fn("* Gathering disk information (%s nodes)" %
|
2756 |
len(self.my_node_names)) |
2757 |
instdisk = self._CollectDiskInfo(self.my_node_names, node_image, |
2758 |
self.my_inst_info)
|
2759 |
|
2760 |
feedback_fn("* Verifying configuration file consistency")
|
2761 |
|
2762 |
# If not all nodes are being checked, we need to make sure the master node
|
2763 |
# and a non-checked vm_capable node are in the list.
|
2764 |
absent_nodes = set(self.all_node_info).difference(self.my_node_info) |
2765 |
if absent_nodes:
|
2766 |
vf_nvinfo = all_nvinfo.copy() |
2767 |
vf_node_info = list(self.my_node_info.values()) |
2768 |
additional_nodes = [] |
2769 |
if master_node not in self.my_node_info: |
2770 |
additional_nodes.append(master_node) |
2771 |
vf_node_info.append(self.all_node_info[master_node])
|
2772 |
# Add the first vm_capable node we find which is not included
|
2773 |
for node in absent_nodes: |
2774 |
nodeinfo = self.all_node_info[node]
|
2775 |
if nodeinfo.vm_capable and not nodeinfo.offline: |
2776 |
additional_nodes.append(node) |
2777 |
vf_node_info.append(self.all_node_info[node])
|
2778 |
break
|
2779 |
key = constants.NV_FILELIST |
2780 |
vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
|
2781 |
{key: node_verify_param[key]}, |
2782 |
self.cfg.GetClusterName()))
|
2783 |
else:
|
2784 |
vf_nvinfo = all_nvinfo |
2785 |
vf_node_info = self.my_node_info.values()
|
2786 |
|
2787 |
self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
|
2788 |
|
2789 |
feedback_fn("* Verifying node status")
|
2790 |
|
2791 |
refos_img = None
|
2792 |
|
2793 |
for node_i in node_data_list: |
2794 |
node = node_i.name |
2795 |
nimg = node_image[node] |
2796 |
|
2797 |
if node_i.offline:
|
2798 |
if verbose:
|
2799 |
feedback_fn("* Skipping offline node %s" % (node,))
|
2800 |
n_offline += 1
|
2801 |
continue
|
2802 |
|
2803 |
if node == master_node:
|
2804 |
ntype = "master"
|
2805 |
elif node_i.master_candidate:
|
2806 |
ntype = "master candidate"
|
2807 |
elif node_i.drained:
|
2808 |
ntype = "drained"
|
2809 |
n_drained += 1
|
2810 |
else:
|
2811 |
ntype = "regular"
|
2812 |
if verbose:
|
2813 |
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
|
2814 |
|
2815 |
msg = all_nvinfo[node].fail_msg |
2816 |
_ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
|
2817 |
msg) |
2818 |
if msg:
|
2819 |
nimg.rpc_fail = True
|
2820 |
continue
|
2821 |
|
2822 |
nresult = all_nvinfo[node].payload |
2823 |
|
2824 |
nimg.call_ok = self._VerifyNode(node_i, nresult)
|
2825 |
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
|
2826 |
self._VerifyNodeNetwork(node_i, nresult)
|
2827 |
self._VerifyOob(node_i, nresult)
|
2828 |
|
2829 |
if nimg.vm_capable:
|
2830 |
self._VerifyNodeLVM(node_i, nresult, vg_name)
|
2831 |
self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper, |
2832 |
all_drbd_map) |
2833 |
|
2834 |
self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
|
2835 |
self._UpdateNodeInstances(node_i, nresult, nimg)
|
2836 |
self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
|
2837 |
self._UpdateNodeOS(node_i, nresult, nimg)
|
2838 |
|
2839 |
if not nimg.os_fail: |
2840 |
if refos_img is None: |
2841 |
refos_img = nimg |
2842 |
self._VerifyNodeOS(node_i, nimg, refos_img)
|
2843 |
self._VerifyNodeBridges(node_i, nresult, bridges)
|
2844 |
|
2845 |
# Check whether all running instancies are primary for the node. (This
|
2846 |
# can no longer be done from _VerifyInstance below, since some of the
|
2847 |
# wrong instances could be from other node groups.)
|
2848 |
non_primary_inst = set(nimg.instances).difference(nimg.pinst)
|
2849 |
|
2850 |
for inst in non_primary_inst: |
2851 |
test = inst in self.all_inst_info |
2852 |
_ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst, |
2853 |
"instance should not run on node %s", node_i.name)
|
2854 |
_ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
|
2855 |
"node is running unknown instance %s", inst)
|
2856 |
|
2857 |
for node, result in extra_lv_nvinfo.items(): |
2858 |
self._UpdateNodeVolumes(self.all_node_info[node], result.payload, |
2859 |
node_image[node], vg_name) |
2860 |
|
2861 |
feedback_fn("* Verifying instance status")
|
2862 |
for instance in self.my_inst_names: |
2863 |
if verbose:
|
2864 |
feedback_fn("* Verifying instance %s" % instance)
|
2865 |
inst_config = self.my_inst_info[instance]
|
2866 |
self._VerifyInstance(instance, inst_config, node_image,
|
2867 |
instdisk[instance]) |
2868 |
inst_nodes_offline = [] |
2869 |
|
2870 |
pnode = inst_config.primary_node |
2871 |
pnode_img = node_image[pnode] |
2872 |
_ErrorIf(pnode_img.rpc_fail and not pnode_img.offline, |
2873 |
constants.CV_ENODERPC, pnode, "instance %s, connection to"
|
2874 |
" primary node failed", instance)
|
2875 |
|
2876 |
_ErrorIf(inst_config.admin_up and pnode_img.offline,
|
2877 |
constants.CV_EINSTANCEBADNODE, instance, |
2878 |
"instance is marked as running and lives on offline node %s",
|
2879 |
inst_config.primary_node) |
2880 |
|
2881 |
# If the instance is non-redundant we cannot survive losing its primary
|
2882 |
# node, so we are not N+1 compliant. On the other hand we have no disk
|
2883 |
# templates with more than one secondary so that situation is not well
|
2884 |
# supported either.
|
2885 |
# FIXME: does not support file-backed instances
|
2886 |
if not inst_config.secondary_nodes: |
2887 |
i_non_redundant.append(instance) |
2888 |
|
2889 |
_ErrorIf(len(inst_config.secondary_nodes) > 1, |
2890 |
constants.CV_EINSTANCELAYOUT, |
2891 |
instance, "instance has multiple secondary nodes: %s",
|
2892 |
utils.CommaJoin(inst_config.secondary_nodes), |
2893 |
code=self.ETYPE_WARNING)
|
2894 |
|
2895 |
if inst_config.disk_template in constants.DTS_INT_MIRROR: |
2896 |
pnode = inst_config.primary_node |
2897 |
instance_nodes = utils.NiceSort(inst_config.all_nodes) |
2898 |
instance_groups = {} |
2899 |
|
2900 |
for node in instance_nodes: |
2901 |
instance_groups.setdefault(self.all_node_info[node].group,
|
2902 |
[]).append(node) |
2903 |
|
2904 |
pretty_list = [ |
2905 |
"%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
|
2906 |
# Sort so that we always list the primary node first.
|
2907 |
for group, nodes in sorted(instance_groups.items(), |
2908 |
key=lambda (_, nodes): pnode in nodes, |
2909 |
reverse=True)]
|
2910 |
|
2911 |
self._ErrorIf(len(instance_groups) > 1, |
2912 |
constants.CV_EINSTANCESPLITGROUPS, |
2913 |
instance, "instance has primary and secondary nodes in"
|
2914 |
" different groups: %s", utils.CommaJoin(pretty_list),
|
2915 |
code=self.ETYPE_WARNING)
|
2916 |
|
2917 |
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]: |
2918 |
i_non_a_balanced.append(instance) |
2919 |
|
2920 |
for snode in inst_config.secondary_nodes: |
2921 |
s_img = node_image[snode] |
2922 |
_ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC, |
2923 |
snode, "instance %s, connection to secondary node failed",
|
2924 |
instance) |
2925 |
|
2926 |
if s_img.offline:
|
2927 |
inst_nodes_offline.append(snode) |
2928 |
|
2929 |
# warn that the instance lives on offline nodes
|
2930 |
_ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance, |
2931 |
"instance has offline secondary node(s) %s",
|
2932 |
utils.CommaJoin(inst_nodes_offline)) |
2933 |
# ... or ghost/non-vm_capable nodes
|
2934 |
for node in inst_config.all_nodes: |
2935 |
_ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE, |
2936 |
instance, "instance lives on ghost node %s", node)
|
2937 |
_ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
|
2938 |
instance, "instance lives on non-vm_capable node %s", node)
|
2939 |
|
2940 |
feedback_fn("* Verifying orphan volumes")
|
2941 |
reserved = utils.FieldSet(*cluster.reserved_lvs) |
2942 |
|
2943 |
# We will get spurious "unknown volume" warnings if any node of this group
|
2944 |
# is secondary for an instance whose primary is in another group. To avoid
|
2945 |
# them, we find these instances and add their volumes to node_vol_should.
|
2946 |
for inst in self.all_inst_info.values(): |
2947 |
for secondary in inst.secondary_nodes: |
2948 |
if (secondary in self.my_node_info |
2949 |
and inst.name not in self.my_inst_info): |
2950 |
inst.MapLVsByNode(node_vol_should) |
2951 |
break
|
2952 |
|
2953 |
self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
|
2954 |
|
2955 |
if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks: |
2956 |
feedback_fn("* Verifying N+1 Memory redundancy")
|
2957 |
self._VerifyNPlusOneMemory(node_image, self.my_inst_info) |
2958 |
|
2959 |
feedback_fn("* Other Notes")
|
2960 |
if i_non_redundant:
|
2961 |
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
|
2962 |
% len(i_non_redundant))
|
2963 |
|
2964 |
if i_non_a_balanced:
|
2965 |
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
|
2966 |
% len(i_non_a_balanced))
|
2967 |
|
2968 |
if n_offline:
|
2969 |
feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
|
2970 |
|
2971 |
if n_drained:
|
2972 |
feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
|
2973 |
|
2974 |
return not self.bad |
2975 |
|
2976 |
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): |
2977 |
"""Analyze the post-hooks' result
|
2978 |
|
2979 |
This method analyses the hook result, handles it, and sends some
|
2980 |
nicely-formatted feedback back to the user.
|
2981 |
|
2982 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
2983 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
2984 |
@param hooks_results: the results of the multi-node hooks rpc call
|
2985 |
@param feedback_fn: function used send feedback back to the caller
|
2986 |
@param lu_result: previous Exec result
|
2987 |
@return: the new Exec result, based on the previous result
|
2988 |
and hook results
|
2989 |
|
2990 |
"""
|
2991 |
# We only really run POST phase hooks, only for non-empty groups,
|
2992 |
# and are only interested in their results
|
2993 |
if not self.my_node_names: |
2994 |
# empty node group
|
2995 |
pass
|
2996 |
elif phase == constants.HOOKS_PHASE_POST:
|
2997 |
# Used to change hooks' output to proper indentation
|
2998 |
feedback_fn("* Hooks Results")
|
2999 |
assert hooks_results, "invalid result from hooks" |
3000 |
|
3001 |
for node_name in hooks_results: |
3002 |
res = hooks_results[node_name] |
3003 |
msg = res.fail_msg |
3004 |
test = msg and not res.offline |
3005 |
self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
|
3006 |
"Communication failure in hooks execution: %s", msg)
|
3007 |
if res.offline or msg: |
3008 |
# No need to investigate payload if node is offline or gave
|
3009 |
# an error.
|
3010 |
continue
|
3011 |
for script, hkr, output in res.payload: |
3012 |
test = hkr == constants.HKR_FAIL |
3013 |
self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
|
3014 |
"Script %s failed, output:", script)
|
3015 |
if test:
|
3016 |
output = self._HOOKS_INDENT_RE.sub(" ", output) |
3017 |
feedback_fn("%s" % output)
|
3018 |
lu_result = False
|
3019 |
|
3020 |
return lu_result
|
3021 |
|
3022 |
|
3023 |
class LUClusterVerifyDisks(NoHooksLU): |
3024 |
"""Verifies the cluster disks status.
|
3025 |
|
3026 |
"""
|
3027 |
REQ_BGL = False
|
3028 |
|
3029 |
def ExpandNames(self): |
3030 |
self.share_locks = _ShareAll()
|
3031 |
self.needed_locks = {
|
3032 |
locking.LEVEL_NODEGROUP: locking.ALL_SET, |
3033 |
} |
3034 |
|
3035 |
def Exec(self, feedback_fn): |
3036 |
group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
|
3037 |
|
3038 |
# Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
|
3039 |
return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
|
3040 |
for group in group_names]) |
3041 |
|
3042 |
|
3043 |
class LUGroupVerifyDisks(NoHooksLU): |
3044 |
"""Verifies the status of all disks in a node group.
|
3045 |
|
3046 |
"""
|
3047 |
REQ_BGL = False
|
3048 |
|
3049 |
def ExpandNames(self): |
3050 |
# Raises errors.OpPrereqError on its own if group can't be found
|
3051 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
3052 |
|
3053 |
self.share_locks = _ShareAll()
|
3054 |
self.needed_locks = {
|
3055 |
locking.LEVEL_INSTANCE: [], |
3056 |
locking.LEVEL_NODEGROUP: [], |
3057 |
locking.LEVEL_NODE: [], |
3058 |
} |
3059 |
|
3060 |
def DeclareLocks(self, level): |
3061 |
if level == locking.LEVEL_INSTANCE:
|
3062 |
assert not self.needed_locks[locking.LEVEL_INSTANCE] |
3063 |
|
3064 |
# Lock instances optimistically, needs verification once node and group
|
3065 |
# locks have been acquired
|
3066 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
3067 |
self.cfg.GetNodeGroupInstances(self.group_uuid) |
3068 |
|
3069 |
elif level == locking.LEVEL_NODEGROUP:
|
3070 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
3071 |
|
3072 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
3073 |
set([self.group_uuid] + |
3074 |
# Lock all groups used by instances optimistically; this requires
|
3075 |
# going via the node before it's locked, requiring verification
|
3076 |
# later on
|
3077 |
[group_uuid |
3078 |
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE) |
3079 |
for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)]) |
3080 |
|
3081 |
elif level == locking.LEVEL_NODE:
|
3082 |
# This will only lock the nodes in the group to be verified which contain
|
3083 |
# actual instances
|
3084 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
3085 |
self._LockInstancesNodes()
|
3086 |
|
3087 |
# Lock all nodes in group to be verified
|
3088 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
3089 |
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members |
3090 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
3091 |
|
3092 |
def CheckPrereq(self): |
3093 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
3094 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
3095 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
3096 |
|
3097 |
assert self.group_uuid in owned_groups |
3098 |
|
3099 |
# Check if locked instances are still correct
|
3100 |
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
3101 |
|
3102 |
# Get instance information
|
3103 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances)) |
3104 |
|
3105 |
# Check if node groups for locked instances are still correct
|
3106 |
for (instance_name, inst) in self.instances.items(): |
3107 |
assert owned_nodes.issuperset(inst.all_nodes), \
|
3108 |
"Instance %s's nodes changed while we kept the lock" % instance_name
|
3109 |
|
3110 |
inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
|
3111 |
owned_groups) |
3112 |
|
3113 |
assert self.group_uuid in inst_groups, \ |
3114 |
"Instance %s has no node in group %s" % (instance_name, self.group_uuid) |
3115 |
|
3116 |
def Exec(self, feedback_fn): |
3117 |
"""Verify integrity of cluster disks.
|
3118 |
|
3119 |
@rtype: tuple of three items
|
3120 |
@return: a tuple of (dict of node-to-node_error, list of instances
|
3121 |
which need activate-disks, dict of instance: (node, volume) for
|
3122 |
missing volumes
|
3123 |
|
3124 |
"""
|
3125 |
res_nodes = {} |
3126 |
res_instances = set()
|
3127 |
res_missing = {} |
3128 |
|
3129 |
nv_dict = _MapInstanceDisksToNodes([inst |
3130 |
for inst in self.instances.values() |
3131 |
if inst.admin_up])
|
3132 |
|
3133 |
if nv_dict:
|
3134 |
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) & |
3135 |
set(self.cfg.GetVmCapableNodeList())) |
3136 |
|
3137 |
node_lvs = self.rpc.call_lv_list(nodes, [])
|
3138 |
|
3139 |
for (node, node_res) in node_lvs.items(): |
3140 |
if node_res.offline:
|
3141 |
continue
|
3142 |
|
3143 |
msg = node_res.fail_msg |
3144 |
if msg:
|
3145 |
logging.warning("Error enumerating LVs on node %s: %s", node, msg)
|
3146 |
res_nodes[node] = msg |
3147 |
continue
|
3148 |
|
3149 |
for lv_name, (_, _, lv_online) in node_res.payload.items(): |
3150 |
inst = nv_dict.pop((node, lv_name), None)
|
3151 |
if not (lv_online or inst is None): |
3152 |
res_instances.add(inst) |
3153 |
|
3154 |
# any leftover items in nv_dict are missing LVs, let's arrange the data
|
3155 |
# better
|
3156 |
for key, inst in nv_dict.iteritems(): |
3157 |
res_missing.setdefault(inst, []).append(key) |
3158 |
|
3159 |
return (res_nodes, list(res_instances), res_missing) |
3160 |
|
3161 |
|
3162 |
class LUClusterRepairDiskSizes(NoHooksLU): |
3163 |
"""Verifies the cluster disks sizes.
|
3164 |
|
3165 |
"""
|
3166 |
REQ_BGL = False
|
3167 |
|
3168 |
def ExpandNames(self): |
3169 |
if self.op.instances: |
3170 |
self.wanted_names = _GetWantedInstances(self, self.op.instances) |
3171 |
self.needed_locks = {
|
3172 |
locking.LEVEL_NODE: [], |
3173 |
locking.LEVEL_INSTANCE: self.wanted_names,
|
3174 |
} |
3175 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3176 |
else:
|
3177 |
self.wanted_names = None |
3178 |
self.needed_locks = {
|
3179 |
locking.LEVEL_NODE: locking.ALL_SET, |
3180 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
3181 |
} |
3182 |
self.share_locks = _ShareAll()
|
3183 |
|
3184 |
def DeclareLocks(self, level): |
3185 |
if level == locking.LEVEL_NODE and self.wanted_names is not None: |
3186 |
self._LockInstancesNodes(primary_only=True) |
3187 |
|
3188 |
def CheckPrereq(self): |
3189 |
"""Check prerequisites.
|
3190 |
|
3191 |
This only checks the optional instance list against the existing names.
|
3192 |
|
3193 |
"""
|
3194 |
if self.wanted_names is None: |
3195 |
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE) |
3196 |
|
3197 |
self.wanted_instances = \
|
3198 |
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names)) |
3199 |
|
3200 |
def _EnsureChildSizes(self, disk): |
3201 |
"""Ensure children of the disk have the needed disk size.
|
3202 |
|
3203 |
This is valid mainly for DRBD8 and fixes an issue where the
|
3204 |
children have smaller disk size.
|
3205 |
|
3206 |
@param disk: an L{ganeti.objects.Disk} object
|
3207 |
|
3208 |
"""
|
3209 |
if disk.dev_type == constants.LD_DRBD8:
|
3210 |
assert disk.children, "Empty children for DRBD8?" |
3211 |
fchild = disk.children[0]
|
3212 |
mismatch = fchild.size < disk.size |
3213 |
if mismatch:
|
3214 |
self.LogInfo("Child disk has size %d, parent %d, fixing", |
3215 |
fchild.size, disk.size) |
3216 |
fchild.size = disk.size |
3217 |
|
3218 |
# and we recurse on this child only, not on the metadev
|
3219 |
return self._EnsureChildSizes(fchild) or mismatch |
3220 |
else:
|
3221 |
return False |
3222 |
|
3223 |
def Exec(self, feedback_fn): |
3224 |
"""Verify the size of cluster disks.
|
3225 |
|
3226 |
"""
|
3227 |
# TODO: check child disks too
|
3228 |
# TODO: check differences in size between primary/secondary nodes
|
3229 |
per_node_disks = {} |
3230 |
for instance in self.wanted_instances: |
3231 |
pnode = instance.primary_node |
3232 |
if pnode not in per_node_disks: |
3233 |
per_node_disks[pnode] = [] |
3234 |
for idx, disk in enumerate(instance.disks): |
3235 |
per_node_disks[pnode].append((instance, idx, disk)) |
3236 |
|
3237 |
changed = [] |
3238 |
for node, dskl in per_node_disks.items(): |
3239 |
newl = [v[2].Copy() for v in dskl] |
3240 |
for dsk in newl: |
3241 |
self.cfg.SetDiskID(dsk, node)
|
3242 |
result = self.rpc.call_blockdev_getsize(node, newl)
|
3243 |
if result.fail_msg:
|
3244 |
self.LogWarning("Failure in blockdev_getsize call to node" |
3245 |
" %s, ignoring", node)
|
3246 |
continue
|
3247 |
if len(result.payload) != len(dskl): |
3248 |
logging.warning("Invalid result from node %s: len(dksl)=%d,"
|
3249 |
" result.payload=%s", node, len(dskl), result.payload) |
3250 |
self.LogWarning("Invalid result from node %s, ignoring node results", |
3251 |
node) |
3252 |
continue
|
3253 |
for ((instance, idx, disk), size) in zip(dskl, result.payload): |
3254 |
if size is None: |
3255 |
self.LogWarning("Disk %d of instance %s did not return size" |
3256 |
" information, ignoring", idx, instance.name)
|
3257 |
continue
|
3258 |
if not isinstance(size, (int, long)): |
3259 |
self.LogWarning("Disk %d of instance %s did not return valid" |
3260 |
" size information, ignoring", idx, instance.name)
|
3261 |
continue
|
3262 |
size = size >> 20
|
3263 |
if size != disk.size:
|
3264 |
self.LogInfo("Disk %d of instance %s has mismatched size," |
3265 |
" correcting: recorded %d, actual %d", idx,
|
3266 |
instance.name, disk.size, size) |
3267 |
disk.size = size |
3268 |
self.cfg.Update(instance, feedback_fn)
|
3269 |
changed.append((instance.name, idx, size)) |
3270 |
if self._EnsureChildSizes(disk): |
3271 |
self.cfg.Update(instance, feedback_fn)
|
3272 |
changed.append((instance.name, idx, disk.size)) |
3273 |
return changed
|
3274 |
|
3275 |
|
3276 |
class LUClusterRename(LogicalUnit): |
3277 |
"""Rename the cluster.
|
3278 |
|
3279 |
"""
|
3280 |
HPATH = "cluster-rename"
|
3281 |
HTYPE = constants.HTYPE_CLUSTER |
3282 |
|
3283 |
def BuildHooksEnv(self): |
3284 |
"""Build hooks env.
|
3285 |
|
3286 |
"""
|
3287 |
return {
|
3288 |
"OP_TARGET": self.cfg.GetClusterName(), |
3289 |
"NEW_NAME": self.op.name, |
3290 |
} |
3291 |
|
3292 |
def BuildHooksNodes(self): |
3293 |
"""Build hooks nodes.
|
3294 |
|
3295 |
"""
|
3296 |
return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList()) |
3297 |
|
3298 |
def CheckPrereq(self): |
3299 |
"""Verify that the passed name is a valid one.
|
3300 |
|
3301 |
"""
|
3302 |
hostname = netutils.GetHostname(name=self.op.name,
|
3303 |
family=self.cfg.GetPrimaryIPFamily())
|
3304 |
|
3305 |
new_name = hostname.name |
3306 |
self.ip = new_ip = hostname.ip
|
3307 |
old_name = self.cfg.GetClusterName()
|
3308 |
old_ip = self.cfg.GetMasterIP()
|
3309 |
if new_name == old_name and new_ip == old_ip: |
3310 |
raise errors.OpPrereqError("Neither the name nor the IP address of the" |
3311 |
" cluster has changed",
|
3312 |
errors.ECODE_INVAL) |
3313 |
if new_ip != old_ip:
|
3314 |
if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
|
3315 |
raise errors.OpPrereqError("The given cluster IP address (%s) is" |
3316 |
" reachable on the network" %
|
3317 |
new_ip, errors.ECODE_NOTUNIQUE) |
3318 |
|
3319 |
self.op.name = new_name
|
3320 |
|
3321 |
def Exec(self, feedback_fn): |
3322 |
"""Rename the cluster.
|
3323 |
|
3324 |
"""
|
3325 |
clustername = self.op.name
|
3326 |
ip = self.ip
|
3327 |
|
3328 |
# shutdown the master IP
|
3329 |
master = self.cfg.GetMasterNode()
|
3330 |
result = self.rpc.call_node_deactivate_master_ip(master)
|
3331 |
result.Raise("Could not disable the master role")
|
3332 |
|
3333 |
try:
|
3334 |
cluster = self.cfg.GetClusterInfo()
|
3335 |
cluster.cluster_name = clustername |
3336 |
cluster.master_ip = ip |
3337 |
self.cfg.Update(cluster, feedback_fn)
|
3338 |
|
3339 |
# update the known hosts file
|
3340 |
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
|
3341 |
node_list = self.cfg.GetOnlineNodeList()
|
3342 |
try:
|
3343 |
node_list.remove(master) |
3344 |
except ValueError: |
3345 |
pass
|
3346 |
_UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
|
3347 |
finally:
|
3348 |
result = self.rpc.call_node_activate_master_ip(master)
|
3349 |
msg = result.fail_msg |
3350 |
if msg:
|
3351 |
self.LogWarning("Could not re-enable the master role on" |
3352 |
" the master, please restart manually: %s", msg)
|
3353 |
|
3354 |
return clustername
|
3355 |
|
3356 |
|
3357 |
def _ValidateNetmask(cfg, netmask): |
3358 |
"""Checks if a netmask is valid.
|
3359 |
|
3360 |
@type cfg: L{config.ConfigWriter}
|
3361 |
@param cfg: The cluster configuration
|
3362 |
@type netmask: int
|
3363 |
@param netmask: the netmask to be verified
|
3364 |
@raise errors.OpPrereqError: if the validation fails
|
3365 |
|
3366 |
"""
|
3367 |
ip_family = cfg.GetPrimaryIPFamily() |
3368 |
try:
|
3369 |
ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family) |
3370 |
except errors.ProgrammerError:
|
3371 |
raise errors.OpPrereqError("Invalid primary ip family: %s." % |
3372 |
ip_family) |
3373 |
if not ipcls.ValidateNetmask(netmask): |
3374 |
raise errors.OpPrereqError("CIDR netmask (%s) not valid" % |
3375 |
(netmask)) |
3376 |
|
3377 |
|
3378 |
class LUClusterSetParams(LogicalUnit): |
3379 |
"""Change the parameters of the cluster.
|
3380 |
|
3381 |
"""
|
3382 |
HPATH = "cluster-modify"
|
3383 |
HTYPE = constants.HTYPE_CLUSTER |
3384 |
REQ_BGL = False
|
3385 |
|
3386 |
def CheckArguments(self): |
3387 |
"""Check parameters
|
3388 |
|
3389 |
"""
|
3390 |
if self.op.uid_pool: |
3391 |
uidpool.CheckUidPool(self.op.uid_pool)
|
3392 |
|
3393 |
if self.op.add_uids: |
3394 |
uidpool.CheckUidPool(self.op.add_uids)
|
3395 |
|
3396 |
if self.op.remove_uids: |
3397 |
uidpool.CheckUidPool(self.op.remove_uids)
|
3398 |
|
3399 |
if self.op.master_netmask is not None: |
3400 |
_ValidateNetmask(self.cfg, self.op.master_netmask) |
3401 |
|
3402 |
def ExpandNames(self): |
3403 |
# FIXME: in the future maybe other cluster params won't require checking on
|
3404 |
# all nodes to be modified.
|
3405 |
self.needed_locks = {
|
3406 |
locking.LEVEL_NODE: locking.ALL_SET, |
3407 |
} |
3408 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3409 |
|
3410 |
def BuildHooksEnv(self): |
3411 |
"""Build hooks env.
|
3412 |
|
3413 |
"""
|
3414 |
return {
|
3415 |
"OP_TARGET": self.cfg.GetClusterName(), |
3416 |
"NEW_VG_NAME": self.op.vg_name, |
3417 |
} |
3418 |
|
3419 |
def BuildHooksNodes(self): |
3420 |
"""Build hooks nodes.
|
3421 |
|
3422 |
"""
|
3423 |
mn = self.cfg.GetMasterNode()
|
3424 |
return ([mn], [mn])
|
3425 |
|
3426 |
def CheckPrereq(self): |
3427 |
"""Check prerequisites.
|
3428 |
|
3429 |
This checks whether the given params don't conflict and
|
3430 |
if the given volume group is valid.
|
3431 |
|
3432 |
"""
|
3433 |
if self.op.vg_name is not None and not self.op.vg_name: |
3434 |
if self.cfg.HasAnyDiskOfType(constants.LD_LV): |
3435 |
raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based" |
3436 |
" instances exist", errors.ECODE_INVAL)
|
3437 |
|
3438 |
if self.op.drbd_helper is not None and not self.op.drbd_helper: |
3439 |
if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8): |
3440 |
raise errors.OpPrereqError("Cannot disable drbd helper while" |
3441 |
" drbd-based instances exist",
|
3442 |
errors.ECODE_INVAL) |
3443 |
|
3444 |
node_list = self.owned_locks(locking.LEVEL_NODE)
|
3445 |
|
3446 |
# if vg_name not None, checks given volume group on all nodes
|
3447 |
if self.op.vg_name: |
3448 |
vglist = self.rpc.call_vg_list(node_list)
|
3449 |
for node in node_list: |
3450 |
msg = vglist[node].fail_msg |
3451 |
if msg:
|
3452 |
# ignoring down node
|
3453 |
self.LogWarning("Error while gathering data on node %s" |
3454 |
" (ignoring node): %s", node, msg)
|
3455 |
continue
|
3456 |
vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload, |
3457 |
self.op.vg_name,
|
3458 |
constants.MIN_VG_SIZE) |
3459 |
if vgstatus:
|
3460 |
raise errors.OpPrereqError("Error on node '%s': %s" % |
3461 |
(node, vgstatus), errors.ECODE_ENVIRON) |
3462 |
|
3463 |
if self.op.drbd_helper: |
3464 |
# checks given drbd helper on all nodes
|
3465 |
helpers = self.rpc.call_drbd_helper(node_list)
|
3466 |
for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list): |
3467 |
if ninfo.offline:
|
3468 |
self.LogInfo("Not checking drbd helper on offline node %s", node) |
3469 |
continue
|
3470 |
msg = helpers[node].fail_msg |
3471 |
if msg:
|
3472 |
raise errors.OpPrereqError("Error checking drbd helper on node" |
3473 |
" '%s': %s" % (node, msg),
|
3474 |
errors.ECODE_ENVIRON) |
3475 |
node_helper = helpers[node].payload |
3476 |
if node_helper != self.op.drbd_helper: |
3477 |
raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" % |
3478 |
(node, node_helper), errors.ECODE_ENVIRON) |
3479 |
|
3480 |
self.cluster = cluster = self.cfg.GetClusterInfo() |
3481 |
# validate params changes
|
3482 |
if self.op.beparams: |
3483 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
3484 |
self.new_beparams = cluster.SimpleFillBE(self.op.beparams) |
3485 |
|
3486 |
if self.op.ndparams: |
3487 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
3488 |
self.new_ndparams = cluster.SimpleFillND(self.op.ndparams) |
3489 |
|
3490 |
# TODO: we need a more general way to handle resetting
|
3491 |
# cluster-level parameters to default values
|
3492 |
if self.new_ndparams["oob_program"] == "": |
3493 |
self.new_ndparams["oob_program"] = \ |
3494 |
constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM] |
3495 |
|
3496 |
if self.op.nicparams: |
3497 |
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
|
3498 |
self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams) |
3499 |
objects.NIC.CheckParameterSyntax(self.new_nicparams)
|
3500 |
nic_errors = [] |
3501 |
|
3502 |
# check all instances for consistency
|
3503 |
for instance in self.cfg.GetAllInstancesInfo().values(): |
3504 |
for nic_idx, nic in enumerate(instance.nics): |
3505 |
params_copy = copy.deepcopy(nic.nicparams) |
3506 |
params_filled = objects.FillDict(self.new_nicparams, params_copy)
|
3507 |
|
3508 |
# check parameter syntax
|
3509 |
try:
|
3510 |
objects.NIC.CheckParameterSyntax(params_filled) |
3511 |
except errors.ConfigurationError, err:
|
3512 |
nic_errors.append("Instance %s, nic/%d: %s" %
|
3513 |
(instance.name, nic_idx, err)) |
3514 |
|
3515 |
# if we're moving instances to routed, check that they have an ip
|
3516 |
target_mode = params_filled[constants.NIC_MODE] |
3517 |
if target_mode == constants.NIC_MODE_ROUTED and not nic.ip: |
3518 |
nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
|
3519 |
" address" % (instance.name, nic_idx))
|
3520 |
if nic_errors:
|
3521 |
raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" % |
3522 |
"\n".join(nic_errors))
|
3523 |
|
3524 |
# hypervisor list/parameters
|
3525 |
self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
|
3526 |
if self.op.hvparams: |
3527 |
for hv_name, hv_dict in self.op.hvparams.items(): |
3528 |
if hv_name not in self.new_hvparams: |
3529 |
self.new_hvparams[hv_name] = hv_dict
|
3530 |
else:
|
3531 |
self.new_hvparams[hv_name].update(hv_dict)
|
3532 |
|
3533 |
# os hypervisor parameters
|
3534 |
self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
|
3535 |
if self.op.os_hvp: |
3536 |
for os_name, hvs in self.op.os_hvp.items(): |
3537 |
if os_name not in self.new_os_hvp: |
3538 |
self.new_os_hvp[os_name] = hvs
|
3539 |
else:
|
3540 |
for hv_name, hv_dict in hvs.items(): |
3541 |
if hv_name not in self.new_os_hvp[os_name]: |
3542 |
self.new_os_hvp[os_name][hv_name] = hv_dict
|
3543 |
else:
|
3544 |
self.new_os_hvp[os_name][hv_name].update(hv_dict)
|
3545 |
|
3546 |
# os parameters
|
3547 |
self.new_osp = objects.FillDict(cluster.osparams, {})
|
3548 |
if self.op.osparams: |
3549 |
for os_name, osp in self.op.osparams.items(): |
3550 |
if os_name not in self.new_osp: |
3551 |
self.new_osp[os_name] = {}
|
3552 |
|
3553 |
self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp, |
3554 |
use_none=True)
|
3555 |
|
3556 |
if not self.new_osp[os_name]: |
3557 |
# we removed all parameters
|
3558 |
del self.new_osp[os_name] |
3559 |
else:
|
3560 |
# check the parameter validity (remote check)
|
3561 |
_CheckOSParams(self, False, [self.cfg.GetMasterNode()], |
3562 |
os_name, self.new_osp[os_name])
|
3563 |
|
3564 |
# changes to the hypervisor list
|
3565 |
if self.op.enabled_hypervisors is not None: |
3566 |
self.hv_list = self.op.enabled_hypervisors |
3567 |
for hv in self.hv_list: |
3568 |
# if the hypervisor doesn't already exist in the cluster
|
3569 |
# hvparams, we initialize it to empty, and then (in both
|
3570 |
# cases) we make sure to fill the defaults, as we might not
|
3571 |
# have a complete defaults list if the hypervisor wasn't
|
3572 |
# enabled before
|
3573 |
if hv not in new_hvp: |
3574 |
new_hvp[hv] = {} |
3575 |
new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv]) |
3576 |
utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES) |
3577 |
else:
|
3578 |
self.hv_list = cluster.enabled_hypervisors
|
3579 |
|
3580 |
if self.op.hvparams or self.op.enabled_hypervisors is not None: |
3581 |
# either the enabled list has changed, or the parameters have, validate
|
3582 |
for hv_name, hv_params in self.new_hvparams.items(): |
3583 |
if ((self.op.hvparams and hv_name in self.op.hvparams) or |
3584 |
(self.op.enabled_hypervisors and |
3585 |
hv_name in self.op.enabled_hypervisors)): |
3586 |
# either this is a new hypervisor, or its parameters have changed
|
3587 |
hv_class = hypervisor.GetHypervisor(hv_name) |
3588 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
3589 |
hv_class.CheckParameterSyntax(hv_params) |
3590 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
3591 |
|
3592 |
if self.op.os_hvp: |
3593 |
# no need to check any newly-enabled hypervisors, since the
|
3594 |
# defaults have already been checked in the above code-block
|
3595 |
for os_name, os_hvp in self.new_os_hvp.items(): |
3596 |
for hv_name, hv_params in os_hvp.items(): |
3597 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
3598 |
# we need to fill in the new os_hvp on top of the actual hv_p
|
3599 |
cluster_defaults = self.new_hvparams.get(hv_name, {})
|
3600 |
new_osp = objects.FillDict(cluster_defaults, hv_params) |
3601 |
hv_class = hypervisor.GetHypervisor(hv_name) |
3602 |
hv_class.CheckParameterSyntax(new_osp) |
3603 |
_CheckHVParams(self, node_list, hv_name, new_osp)
|
3604 |
|
3605 |
if self.op.default_iallocator: |
3606 |
alloc_script = utils.FindFile(self.op.default_iallocator,
|
3607 |
constants.IALLOCATOR_SEARCH_PATH, |
3608 |
os.path.isfile) |
3609 |
if alloc_script is None: |
3610 |
raise errors.OpPrereqError("Invalid default iallocator script '%s'" |
3611 |
" specified" % self.op.default_iallocator, |
3612 |
errors.ECODE_INVAL) |
3613 |
|
3614 |
def Exec(self, feedback_fn): |
3615 |
"""Change the parameters of the cluster.
|
3616 |
|
3617 |
"""
|
3618 |
if self.op.vg_name is not None: |
3619 |
new_volume = self.op.vg_name
|
3620 |
if not new_volume: |
3621 |
new_volume = None
|
3622 |
if new_volume != self.cfg.GetVGName(): |
3623 |
self.cfg.SetVGName(new_volume)
|
3624 |
else:
|
3625 |
feedback_fn("Cluster LVM configuration already in desired"
|
3626 |
" state, not changing")
|
3627 |
if self.op.drbd_helper is not None: |
3628 |
new_helper = self.op.drbd_helper
|
3629 |
if not new_helper: |
3630 |
new_helper = None
|
3631 |
if new_helper != self.cfg.GetDRBDHelper(): |
3632 |
self.cfg.SetDRBDHelper(new_helper)
|
3633 |
else:
|
3634 |
feedback_fn("Cluster DRBD helper already in desired state,"
|
3635 |
" not changing")
|
3636 |
if self.op.hvparams: |
3637 |
self.cluster.hvparams = self.new_hvparams |
3638 |
if self.op.os_hvp: |
3639 |
self.cluster.os_hvp = self.new_os_hvp |
3640 |
if self.op.enabled_hypervisors is not None: |
3641 |
self.cluster.hvparams = self.new_hvparams |
3642 |
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors |
3643 |
if self.op.beparams: |
3644 |
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams |
3645 |
if self.op.nicparams: |
3646 |
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams |
3647 |
if self.op.osparams: |
3648 |
self.cluster.osparams = self.new_osp |
3649 |
if self.op.ndparams: |
3650 |
self.cluster.ndparams = self.new_ndparams |
3651 |
|
3652 |
if self.op.candidate_pool_size is not None: |
3653 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
3654 |
# we need to update the pool size here, otherwise the save will fail
|
3655 |
_AdjustCandidatePool(self, [])
|
3656 |
|
3657 |
if self.op.maintain_node_health is not None: |
3658 |
self.cluster.maintain_node_health = self.op.maintain_node_health |
3659 |
|
3660 |
if self.op.prealloc_wipe_disks is not None: |
3661 |
self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks |
3662 |
|
3663 |
if self.op.add_uids is not None: |
3664 |
uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids) |
3665 |
|
3666 |
if self.op.remove_uids is not None: |
3667 |
uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids) |
3668 |
|
3669 |
if self.op.uid_pool is not None: |
3670 |
self.cluster.uid_pool = self.op.uid_pool |
3671 |
|
3672 |
if self.op.default_iallocator is not None: |
3673 |
self.cluster.default_iallocator = self.op.default_iallocator |
3674 |
|
3675 |
if self.op.reserved_lvs is not None: |
3676 |
self.cluster.reserved_lvs = self.op.reserved_lvs |
3677 |
|
3678 |
def helper_os(aname, mods, desc): |
3679 |
desc += " OS list"
|
3680 |
lst = getattr(self.cluster, aname) |
3681 |
for key, val in mods: |
3682 |
if key == constants.DDM_ADD:
|
3683 |
if val in lst: |
3684 |
feedback_fn("OS %s already in %s, ignoring" % (val, desc))
|
3685 |
else:
|
3686 |
lst.append(val) |
3687 |
elif key == constants.DDM_REMOVE:
|
3688 |
if val in lst: |
3689 |
lst.remove(val) |
3690 |
else:
|
3691 |
feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
|
3692 |
else:
|
3693 |
raise errors.ProgrammerError("Invalid modification '%s'" % key) |
3694 |
|
3695 |
if self.op.hidden_os: |
3696 |
helper_os("hidden_os", self.op.hidden_os, "hidden") |
3697 |
|
3698 |
if self.op.blacklisted_os: |
3699 |
helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted") |
3700 |
|
3701 |
if self.op.master_netdev: |
3702 |
master = self.cfg.GetMasterNode()
|
3703 |
feedback_fn("Shutting down master ip on the current netdev (%s)" %
|
3704 |
self.cluster.master_netdev)
|
3705 |
result = self.rpc.call_node_deactivate_master_ip(master)
|
3706 |
result.Raise("Could not disable the master ip")
|
3707 |
feedback_fn("Changing master_netdev from %s to %s" %
|
3708 |
(self.cluster.master_netdev, self.op.master_netdev)) |
3709 |
self.cluster.master_netdev = self.op.master_netdev |
3710 |
|
3711 |
if self.op.master_netmask: |
3712 |
master = self.cfg.GetMasterNode()
|
3713 |
feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask) |
3714 |
result = self.rpc.call_node_change_master_netmask(master,
|
3715 |
self.op.master_netmask)
|
3716 |
if result.fail_msg:
|
3717 |
msg = "Could not change the master IP netmask: %s" % result.fail_msg
|
3718 |
self.LogWarning(msg)
|
3719 |
feedback_fn(msg) |
3720 |
else:
|
3721 |
self.cluster.master_netmask = self.op.master_netmask |
3722 |
|
3723 |
self.cfg.Update(self.cluster, feedback_fn) |
3724 |
|
3725 |
if self.op.master_netdev: |
3726 |
feedback_fn("Starting the master ip on the new master netdev (%s)" %
|
3727 |
self.op.master_netdev)
|
3728 |
result = self.rpc.call_node_activate_master_ip(master)
|
3729 |
if result.fail_msg:
|
3730 |
self.LogWarning("Could not re-enable the master ip on" |
3731 |
" the master, please restart manually: %s",
|
3732 |
result.fail_msg) |
3733 |
|
3734 |
|
3735 |
def _UploadHelper(lu, nodes, fname): |
3736 |
"""Helper for uploading a file and showing warnings.
|
3737 |
|
3738 |
"""
|
3739 |
if os.path.exists(fname):
|
3740 |
result = lu.rpc.call_upload_file(nodes, fname) |
3741 |
for to_node, to_result in result.items(): |
3742 |
msg = to_result.fail_msg |
3743 |
if msg:
|
3744 |
msg = ("Copy of file %s to node %s failed: %s" %
|
3745 |
(fname, to_node, msg)) |
3746 |
lu.proc.LogWarning(msg) |
3747 |
|
3748 |
|
3749 |
def _ComputeAncillaryFiles(cluster, redist): |
3750 |
"""Compute files external to Ganeti which need to be consistent.
|
3751 |
|
3752 |
@type redist: boolean
|
3753 |
@param redist: Whether to include files which need to be redistributed
|
3754 |
|
3755 |
"""
|
3756 |
# Compute files for all nodes
|
3757 |
files_all = set([
|
3758 |
constants.SSH_KNOWN_HOSTS_FILE, |
3759 |
constants.CONFD_HMAC_KEY, |
3760 |
constants.CLUSTER_DOMAIN_SECRET_FILE, |
3761 |
]) |
3762 |
|
3763 |
if not redist: |
3764 |
files_all.update(constants.ALL_CERT_FILES) |
3765 |
files_all.update(ssconf.SimpleStore().GetFileList()) |
3766 |
else:
|
3767 |
# we need to ship at least the RAPI certificate
|
3768 |
files_all.add(constants.RAPI_CERT_FILE) |
3769 |
|
3770 |
if cluster.modify_etc_hosts:
|
3771 |
files_all.add(constants.ETC_HOSTS) |
3772 |
|
3773 |
# Files which must either exist on all nodes or on none
|
3774 |
files_all_opt = set([
|
3775 |
constants.RAPI_USERS_FILE, |
3776 |
]) |
3777 |
|
3778 |
# Files which should only be on master candidates
|
3779 |
files_mc = set()
|
3780 |
if not redist: |
3781 |
files_mc.add(constants.CLUSTER_CONF_FILE) |
3782 |
|
3783 |
# Files which should only be on VM-capable nodes
|
3784 |
files_vm = set(filename
|
3785 |
for hv_name in cluster.enabled_hypervisors |
3786 |
for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()) |
3787 |
|
3788 |
# Filenames must be unique
|
3789 |
assert (len(files_all | files_all_opt | files_mc | files_vm) == |
3790 |
sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \ |
3791 |
"Found file listed in more than one file list"
|
3792 |
|
3793 |
return (files_all, files_all_opt, files_mc, files_vm)
|
3794 |
|
3795 |
|
3796 |
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True): |
3797 |
"""Distribute additional files which are part of the cluster configuration.
|
3798 |
|
3799 |
ConfigWriter takes care of distributing the config and ssconf files, but
|
3800 |
there are more files which should be distributed to all nodes. This function
|
3801 |
makes sure those are copied.
|
3802 |
|
3803 |
@param lu: calling logical unit
|
3804 |
@param additional_nodes: list of nodes not in the config to distribute to
|
3805 |
@type additional_vm: boolean
|
3806 |
@param additional_vm: whether the additional nodes are vm-capable or not
|
3807 |
|
3808 |
"""
|
3809 |
# Gather target nodes
|
3810 |
cluster = lu.cfg.GetClusterInfo() |
3811 |
master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode()) |
3812 |
|
3813 |
online_nodes = lu.cfg.GetOnlineNodeList() |
3814 |
vm_nodes = lu.cfg.GetVmCapableNodeList() |
3815 |
|
3816 |
if additional_nodes is not None: |
3817 |
online_nodes.extend(additional_nodes) |
3818 |
if additional_vm:
|
3819 |
vm_nodes.extend(additional_nodes) |
3820 |
|
3821 |
# Never distribute to master node
|
3822 |
for nodelist in [online_nodes, vm_nodes]: |
3823 |
if master_info.name in nodelist: |
3824 |
nodelist.remove(master_info.name) |
3825 |
|
3826 |
# Gather file lists
|
3827 |
(files_all, files_all_opt, files_mc, files_vm) = \ |
3828 |
_ComputeAncillaryFiles(cluster, True)
|
3829 |
|
3830 |
# Never re-distribute configuration file from here
|
3831 |
assert not (constants.CLUSTER_CONF_FILE in files_all or |
3832 |
constants.CLUSTER_CONF_FILE in files_vm)
|
3833 |
assert not files_mc, "Master candidates not handled in this function" |
3834 |
|
3835 |
filemap = [ |
3836 |
(online_nodes, files_all), |
3837 |
(online_nodes, files_all_opt), |
3838 |
(vm_nodes, files_vm), |
3839 |
] |
3840 |
|
3841 |
# Upload the files
|
3842 |
for (node_list, files) in filemap: |
3843 |
for fname in files: |
3844 |
_UploadHelper(lu, node_list, fname) |
3845 |
|
3846 |
|
3847 |
class LUClusterRedistConf(NoHooksLU): |
3848 |
"""Force the redistribution of cluster configuration.
|
3849 |
|
3850 |
This is a very simple LU.
|
3851 |
|
3852 |
"""
|
3853 |
REQ_BGL = False
|
3854 |
|
3855 |
def ExpandNames(self): |
3856 |
self.needed_locks = {
|
3857 |
locking.LEVEL_NODE: locking.ALL_SET, |
3858 |
} |
3859 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3860 |
|
3861 |
def Exec(self, feedback_fn): |
3862 |
"""Redistribute the configuration.
|
3863 |
|
3864 |
"""
|
3865 |
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn) |
3866 |
_RedistributeAncillaryFiles(self)
|
3867 |
|
3868 |
|
3869 |
class LUClusterActivateMasterIp(NoHooksLU): |
3870 |
"""Activate the master IP on the master node.
|
3871 |
|
3872 |
"""
|
3873 |
def Exec(self, feedback_fn): |
3874 |
"""Activate the master IP.
|
3875 |
|
3876 |
"""
|
3877 |
master = self.cfg.GetMasterNode()
|
3878 |
self.rpc.call_node_activate_master_ip(master)
|
3879 |
|
3880 |
|
3881 |
class LUClusterDeactivateMasterIp(NoHooksLU): |
3882 |
"""Deactivate the master IP on the master node.
|
3883 |
|
3884 |
"""
|
3885 |
def Exec(self, feedback_fn): |
3886 |
"""Deactivate the master IP.
|
3887 |
|
3888 |
"""
|
3889 |
master = self.cfg.GetMasterNode()
|
3890 |
self.rpc.call_node_deactivate_master_ip(master)
|
3891 |
|
3892 |
|
3893 |
def _WaitForSync(lu, instance, disks=None, oneshot=False): |
3894 |
"""Sleep and poll for an instance's disk to sync.
|
3895 |
|
3896 |
"""
|
3897 |
if not instance.disks or disks is not None and not disks: |
3898 |
return True |
3899 |
|
3900 |
disks = _ExpandCheckDisks(instance, disks) |
3901 |
|
3902 |
if not oneshot: |
3903 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
3904 |
|
3905 |
node = instance.primary_node |
3906 |
|
3907 |
for dev in disks: |
3908 |
lu.cfg.SetDiskID(dev, node) |
3909 |
|
3910 |
# TODO: Convert to utils.Retry
|
3911 |
|
3912 |
retries = 0
|
3913 |
degr_retries = 10 # in seconds, as we sleep 1 second each time |
3914 |
while True: |
3915 |
max_time = 0
|
3916 |
done = True
|
3917 |
cumul_degraded = False
|
3918 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks) |
3919 |
msg = rstats.fail_msg |
3920 |
if msg:
|
3921 |
lu.LogWarning("Can't get any data from node %s: %s", node, msg)
|
3922 |
retries += 1
|
3923 |
if retries >= 10: |
3924 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
3925 |
" aborting." % node)
|
3926 |
time.sleep(6)
|
3927 |
continue
|
3928 |
rstats = rstats.payload |
3929 |
retries = 0
|
3930 |
for i, mstat in enumerate(rstats): |
3931 |
if mstat is None: |
3932 |
lu.LogWarning("Can't compute data for node %s/%s",
|
3933 |
node, disks[i].iv_name) |
3934 |
continue
|
3935 |
|
3936 |
cumul_degraded = (cumul_degraded or
|
3937 |
(mstat.is_degraded and mstat.sync_percent is None)) |
3938 |
if mstat.sync_percent is not None: |
3939 |
done = False
|
3940 |
if mstat.estimated_time is not None: |
3941 |
rem_time = ("%s remaining (estimated)" %
|
3942 |
utils.FormatSeconds(mstat.estimated_time)) |
3943 |
max_time = mstat.estimated_time |
3944 |
else:
|
3945 |
rem_time = "no time estimate"
|
3946 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
3947 |
(disks[i].iv_name, mstat.sync_percent, rem_time)) |
3948 |
|
3949 |
# if we're done but degraded, let's do a few small retries, to
|
3950 |
# make sure we see a stable and not transient situation; therefore
|
3951 |
# we force restart of the loop
|
3952 |
if (done or oneshot) and cumul_degraded and degr_retries > 0: |
3953 |
logging.info("Degraded disks found, %d retries left", degr_retries)
|
3954 |
degr_retries -= 1
|
3955 |
time.sleep(1)
|
3956 |
continue
|
3957 |
|
3958 |
if done or oneshot: |
3959 |
break
|
3960 |
|
3961 |
time.sleep(min(60, max_time)) |
3962 |
|
3963 |
if done:
|
3964 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
3965 |
return not cumul_degraded |
3966 |
|
3967 |
|
3968 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): |
3969 |
"""Check that mirrors are not degraded.
|
3970 |
|
3971 |
The ldisk parameter, if True, will change the test from the
|
3972 |
is_degraded attribute (which represents overall non-ok status for
|
3973 |
the device(s)) to the ldisk (representing the local storage status).
|
3974 |
|
3975 |
"""
|
3976 |
lu.cfg.SetDiskID(dev, node) |
3977 |
|
3978 |
result = True
|
3979 |
|
3980 |
if on_primary or dev.AssembleOnSecondary(): |
3981 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
3982 |
msg = rstats.fail_msg |
3983 |
if msg:
|
3984 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg)
|
3985 |
result = False
|
3986 |
elif not rstats.payload: |
3987 |
lu.LogWarning("Can't find disk on node %s", node)
|
3988 |
result = False
|
3989 |
else:
|
3990 |
if ldisk:
|
3991 |
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
|
3992 |
else:
|
3993 |
result = result and not rstats.payload.is_degraded |
3994 |
|
3995 |
if dev.children:
|
3996 |
for child in dev.children: |
3997 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
3998 |
|
3999 |
return result
|
4000 |
|
4001 |
|
4002 |
class LUOobCommand(NoHooksLU): |
4003 |
"""Logical unit for OOB handling.
|
4004 |
|
4005 |
"""
|
4006 |
REG_BGL = False
|
4007 |
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) |
4008 |
|
4009 |
def ExpandNames(self): |
4010 |
"""Gather locks we need.
|
4011 |
|
4012 |
"""
|
4013 |
if self.op.node_names: |
4014 |
self.op.node_names = _GetWantedNodes(self, self.op.node_names) |
4015 |
lock_names = self.op.node_names
|
4016 |
else:
|
4017 |
lock_names = locking.ALL_SET |
4018 |
|
4019 |
self.needed_locks = {
|
4020 |
locking.LEVEL_NODE: lock_names, |
4021 |
} |
4022 |
|
4023 |
def CheckPrereq(self): |
4024 |
"""Check prerequisites.
|
4025 |
|
4026 |
This checks:
|
4027 |
- the node exists in the configuration
|
4028 |
- OOB is supported
|
4029 |
|
4030 |
Any errors are signaled by raising errors.OpPrereqError.
|
4031 |
|
4032 |
"""
|
4033 |
self.nodes = []
|
4034 |
self.master_node = self.cfg.GetMasterNode() |
4035 |
|
4036 |
assert self.op.power_delay >= 0.0 |
4037 |
|
4038 |
if self.op.node_names: |
4039 |
if (self.op.command in self._SKIP_MASTER and |
4040 |
self.master_node in self.op.node_names): |
4041 |
master_node_obj = self.cfg.GetNodeInfo(self.master_node) |
4042 |
master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
|
4043 |
|
4044 |
if master_oob_handler:
|
4045 |
additional_text = ("run '%s %s %s' if you want to operate on the"
|
4046 |
" master regardless") % (master_oob_handler,
|
4047 |
self.op.command,
|
4048 |
self.master_node)
|
4049 |
else:
|
4050 |
additional_text = "it does not support out-of-band operations"
|
4051 |
|
4052 |
raise errors.OpPrereqError(("Operating on the master node %s is not" |
4053 |
" allowed for %s; %s") %
|
4054 |
(self.master_node, self.op.command, |
4055 |
additional_text), errors.ECODE_INVAL) |
4056 |
else:
|
4057 |
self.op.node_names = self.cfg.GetNodeList() |
4058 |
if self.op.command in self._SKIP_MASTER: |
4059 |
self.op.node_names.remove(self.master_node) |
4060 |
|
4061 |
if self.op.command in self._SKIP_MASTER: |
4062 |
assert self.master_node not in self.op.node_names |
4063 |
|
4064 |
for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names): |
4065 |
if node is None: |
4066 |
raise errors.OpPrereqError("Node %s not found" % node_name, |
4067 |
errors.ECODE_NOENT) |
4068 |
else:
|
4069 |
self.nodes.append(node)
|
4070 |
|
4071 |
if (not self.op.ignore_status and |
4072 |
(self.op.command == constants.OOB_POWER_OFF and not node.offline)): |
4073 |
raise errors.OpPrereqError(("Cannot power off node %s because it is" |
4074 |
" not marked offline") % node_name,
|
4075 |
errors.ECODE_STATE) |
4076 |
|
4077 |
def Exec(self, feedback_fn): |
4078 |
"""Execute OOB and return result if we expect any.
|
4079 |
|
4080 |
"""
|
4081 |
master_node = self.master_node
|
4082 |
ret = [] |
4083 |
|
4084 |
for idx, node in enumerate(utils.NiceSort(self.nodes, |
4085 |
key=lambda node: node.name)):
|
4086 |
node_entry = [(constants.RS_NORMAL, node.name)] |
4087 |
ret.append(node_entry) |
4088 |
|
4089 |
oob_program = _SupportsOob(self.cfg, node)
|
4090 |
|
4091 |
if not oob_program: |
4092 |
node_entry.append((constants.RS_UNAVAIL, None))
|
4093 |
continue
|
4094 |
|
4095 |
logging.info("Executing out-of-band command '%s' using '%s' on %s",
|
4096 |
self.op.command, oob_program, node.name)
|
4097 |
result = self.rpc.call_run_oob(master_node, oob_program,
|
4098 |
self.op.command, node.name,
|
4099 |
self.op.timeout)
|
4100 |
|
4101 |
if result.fail_msg:
|
4102 |
self.LogWarning("Out-of-band RPC failed on node '%s': %s", |
4103 |
node.name, result.fail_msg) |
4104 |
node_entry.append((constants.RS_NODATA, None))
|
4105 |
else:
|
4106 |
try:
|
4107 |
self._CheckPayload(result)
|
4108 |
except errors.OpExecError, err:
|
4109 |
self.LogWarning("Payload returned by node '%s' is not valid: %s", |
4110 |
node.name, err) |
4111 |
node_entry.append((constants.RS_NODATA, None))
|
4112 |
else:
|
4113 |
if self.op.command == constants.OOB_HEALTH: |
4114 |
# For health we should log important events
|
4115 |
for item, status in result.payload: |
4116 |
if status in [constants.OOB_STATUS_WARNING, |
4117 |
constants.OOB_STATUS_CRITICAL]: |
4118 |
self.LogWarning("Item '%s' on node '%s' has status '%s'", |
4119 |
item, node.name, status) |
4120 |
|
4121 |
if self.op.command == constants.OOB_POWER_ON: |
4122 |
node.powered = True
|
4123 |
elif self.op.command == constants.OOB_POWER_OFF: |
4124 |
node.powered = False
|
4125 |
elif self.op.command == constants.OOB_POWER_STATUS: |
4126 |
powered = result.payload[constants.OOB_POWER_STATUS_POWERED] |
4127 |
if powered != node.powered:
|
4128 |
logging.warning(("Recorded power state (%s) of node '%s' does not"
|
4129 |
" match actual power state (%s)"), node.powered,
|
4130 |
node.name, powered) |
4131 |
|
4132 |
# For configuration changing commands we should update the node
|
4133 |
if self.op.command in (constants.OOB_POWER_ON, |
4134 |
constants.OOB_POWER_OFF): |
4135 |
self.cfg.Update(node, feedback_fn)
|
4136 |
|
4137 |
node_entry.append((constants.RS_NORMAL, result.payload)) |
4138 |
|
4139 |
if (self.op.command == constants.OOB_POWER_ON and |
4140 |
idx < len(self.nodes) - 1): |
4141 |
time.sleep(self.op.power_delay)
|
4142 |
|
4143 |
return ret
|
4144 |
|
4145 |
def _CheckPayload(self, result): |
4146 |
"""Checks if the payload is valid.
|
4147 |
|
4148 |
@param result: RPC result
|
4149 |
@raises errors.OpExecError: If payload is not valid
|
4150 |
|
4151 |
"""
|
4152 |
errs = [] |
4153 |
if self.op.command == constants.OOB_HEALTH: |
4154 |
if not isinstance(result.payload, list): |
4155 |
errs.append("command 'health' is expected to return a list but got %s" %
|
4156 |
type(result.payload))
|
4157 |
else:
|
4158 |
for item, status in result.payload: |
4159 |
if status not in constants.OOB_STATUSES: |
4160 |
errs.append("health item '%s' has invalid status '%s'" %
|
4161 |
(item, status)) |
4162 |
|
4163 |
if self.op.command == constants.OOB_POWER_STATUS: |
4164 |
if not isinstance(result.payload, dict): |
4165 |
errs.append("power-status is expected to return a dict but got %s" %
|
4166 |
type(result.payload))
|
4167 |
|
4168 |
if self.op.command in [ |
4169 |
constants.OOB_POWER_ON, |
4170 |
constants.OOB_POWER_OFF, |
4171 |
constants.OOB_POWER_CYCLE, |
4172 |
]: |
4173 |
if result.payload is not None: |
4174 |
errs.append("%s is expected to not return payload but got '%s'" %
|
4175 |
(self.op.command, result.payload))
|
4176 |
|
4177 |
if errs:
|
4178 |
raise errors.OpExecError("Check of out-of-band payload failed due to %s" % |
4179 |
utils.CommaJoin(errs)) |
4180 |
|
4181 |
|
4182 |
class _OsQuery(_QueryBase): |
4183 |
FIELDS = query.OS_FIELDS |
4184 |
|
4185 |
def ExpandNames(self, lu): |
4186 |
# Lock all nodes in shared mode
|
4187 |
# Temporary removal of locks, should be reverted later
|
4188 |
# TODO: reintroduce locks when they are lighter-weight
|
4189 |
lu.needed_locks = {} |
4190 |
#self.share_locks[locking.LEVEL_NODE] = 1
|
4191 |
#self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4192 |
|
4193 |
# The following variables interact with _QueryBase._GetNames
|
4194 |
if self.names: |
4195 |
self.wanted = self.names |
4196 |
else:
|
4197 |
self.wanted = locking.ALL_SET
|
4198 |
|
4199 |
self.do_locking = self.use_locking |
4200 |
|
4201 |
def DeclareLocks(self, lu, level): |
4202 |
pass
|
4203 |
|
4204 |
@staticmethod
|
4205 |
def _DiagnoseByOS(rlist): |
4206 |
"""Remaps a per-node return list into an a per-os per-node dictionary
|
4207 |
|
4208 |
@param rlist: a map with node names as keys and OS objects as values
|
4209 |
|
4210 |
@rtype: dict
|
4211 |
@return: a dictionary with osnames as keys and as value another
|
4212 |
map, with nodes as keys and tuples of (path, status, diagnose,
|
4213 |
variants, parameters, api_versions) as values, eg::
|
4214 |
|
4215 |
{"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
|
4216 |
(/srv/..., False, "invalid api")],
|
4217 |
"node2": [(/srv/..., True, "", [], [])]}
|
4218 |
}
|
4219 |
|
4220 |
"""
|
4221 |
all_os = {} |
4222 |
# we build here the list of nodes that didn't fail the RPC (at RPC
|
4223 |
# level), so that nodes with a non-responding node daemon don't
|
4224 |
# make all OSes invalid
|
4225 |
good_nodes = [node_name for node_name in rlist |
4226 |
if not rlist[node_name].fail_msg] |
4227 |
for node_name, nr in rlist.items(): |
4228 |
if nr.fail_msg or not nr.payload: |
4229 |
continue
|
4230 |
for (name, path, status, diagnose, variants,
|
4231 |
params, api_versions) in nr.payload:
|
4232 |
if name not in all_os: |
4233 |
# build a list of nodes for this os containing empty lists
|
4234 |
# for each node in node_list
|
4235 |
all_os[name] = {} |
4236 |
for nname in good_nodes: |
4237 |
all_os[name][nname] = [] |
4238 |
# convert params from [name, help] to (name, help)
|
4239 |
params = [tuple(v) for v in params] |
4240 |
all_os[name][node_name].append((path, status, diagnose, |
4241 |
variants, params, api_versions)) |
4242 |
return all_os
|
4243 |
|
4244 |
def _GetQueryData(self, lu): |
4245 |
"""Computes the list of nodes and their attributes.
|
4246 |
|
4247 |
"""
|
4248 |
# Locking is not used
|
4249 |
assert not (compat.any(lu.glm.is_owned(level) |
4250 |
for level in locking.LEVELS |
4251 |
if level != locking.LEVEL_CLUSTER) or |
4252 |
self.do_locking or self.use_locking) |
4253 |
|
4254 |
valid_nodes = [node.name |
4255 |
for node in lu.cfg.GetAllNodesInfo().values() |
4256 |
if not node.offline and node.vm_capable] |
4257 |
pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
|
4258 |
cluster = lu.cfg.GetClusterInfo() |
4259 |
|
4260 |
data = {} |
4261 |
|
4262 |
for (os_name, os_data) in pol.items(): |
4263 |
info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
|
4264 |
hidden=(os_name in cluster.hidden_os),
|
4265 |
blacklisted=(os_name in cluster.blacklisted_os))
|
4266 |
|
4267 |
variants = set()
|
4268 |
parameters = set()
|
4269 |
api_versions = set()
|
4270 |
|
4271 |
for idx, osl in enumerate(os_data.values()): |
4272 |
info.valid = bool(info.valid and osl and osl[0][1]) |
4273 |
if not info.valid: |
4274 |
break
|
4275 |
|
4276 |
(node_variants, node_params, node_api) = osl[0][3:6] |
4277 |
if idx == 0: |
4278 |
# First entry
|
4279 |
variants.update(node_variants) |
4280 |
parameters.update(node_params) |
4281 |
api_versions.update(node_api) |
4282 |
else:
|
4283 |
# Filter out inconsistent values
|
4284 |
variants.intersection_update(node_variants) |
4285 |
parameters.intersection_update(node_params) |
4286 |
api_versions.intersection_update(node_api) |
4287 |
|
4288 |
info.variants = list(variants)
|
4289 |
info.parameters = list(parameters)
|
4290 |
info.api_versions = list(api_versions)
|
4291 |
|
4292 |
data[os_name] = info |
4293 |
|
4294 |
# Prepare data in requested order
|
4295 |
return [data[name] for name in self._GetNames(lu, pol.keys(), None) |
4296 |
if name in data] |
4297 |
|
4298 |
|
4299 |
class LUOsDiagnose(NoHooksLU): |
4300 |
"""Logical unit for OS diagnose/query.
|
4301 |
|
4302 |
"""
|
4303 |
REQ_BGL = False
|
4304 |
|
4305 |
@staticmethod
|
4306 |
def _BuildFilter(fields, names): |
4307 |
"""Builds a filter for querying OSes.
|
4308 |
|
4309 |
"""
|
4310 |
name_filter = qlang.MakeSimpleFilter("name", names)
|
4311 |
|
4312 |
# Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
|
4313 |
# respective field is not requested
|
4314 |
status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]] |
4315 |
for fname in ["hidden", "blacklisted"] |
4316 |
if fname not in fields] |
4317 |
if "valid" not in fields: |
4318 |
status_filter.append([qlang.OP_TRUE, "valid"])
|
4319 |
|
4320 |
if status_filter:
|
4321 |
status_filter.insert(0, qlang.OP_AND)
|
4322 |
else:
|
4323 |
status_filter = None
|
4324 |
|
4325 |
if name_filter and status_filter: |
4326 |
return [qlang.OP_AND, name_filter, status_filter]
|
4327 |
elif name_filter:
|
4328 |
return name_filter
|
4329 |
else:
|
4330 |
return status_filter
|
4331 |
|
4332 |
def CheckArguments(self): |
4333 |
self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names), |
4334 |
self.op.output_fields, False) |
4335 |
|
4336 |
def ExpandNames(self): |
4337 |
self.oq.ExpandNames(self) |
4338 |
|
4339 |
def Exec(self, feedback_fn): |
4340 |
return self.oq.OldStyleQuery(self) |
4341 |
|
4342 |
|
4343 |
class LUNodeRemove(LogicalUnit): |
4344 |
"""Logical unit for removing a node.
|
4345 |
|
4346 |
"""
|
4347 |
HPATH = "node-remove"
|
4348 |
HTYPE = constants.HTYPE_NODE |
4349 |
|
4350 |
def BuildHooksEnv(self): |
4351 |
"""Build hooks env.
|
4352 |
|
4353 |
This doesn't run on the target node in the pre phase as a failed
|
4354 |
node would then be impossible to remove.
|
4355 |
|
4356 |
"""
|
4357 |
return {
|
4358 |
"OP_TARGET": self.op.node_name, |
4359 |
"NODE_NAME": self.op.node_name, |
4360 |
} |
4361 |
|
4362 |
def BuildHooksNodes(self): |
4363 |
"""Build hooks nodes.
|
4364 |
|
4365 |
"""
|
4366 |
all_nodes = self.cfg.GetNodeList()
|
4367 |
try:
|
4368 |
all_nodes.remove(self.op.node_name)
|
4369 |
except ValueError: |
4370 |
logging.warning("Node '%s', which is about to be removed, was not found"
|
4371 |
" in the list of all nodes", self.op.node_name) |
4372 |
return (all_nodes, all_nodes)
|
4373 |
|
4374 |
def CheckPrereq(self): |
4375 |
"""Check prerequisites.
|
4376 |
|
4377 |
This checks:
|
4378 |
- the node exists in the configuration
|
4379 |
- it does not have primary or secondary instances
|
4380 |
- it's not the master
|
4381 |
|
4382 |
Any errors are signaled by raising errors.OpPrereqError.
|
4383 |
|
4384 |
"""
|
4385 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
4386 |
node = self.cfg.GetNodeInfo(self.op.node_name) |
4387 |
assert node is not None |
4388 |
|
4389 |
masternode = self.cfg.GetMasterNode()
|
4390 |
if node.name == masternode:
|
4391 |
raise errors.OpPrereqError("Node is the master node, failover to another" |
4392 |
" node is required", errors.ECODE_INVAL)
|
4393 |
|
4394 |
for instance_name, instance in self.cfg.GetAllInstancesInfo(): |
4395 |
if node.name in instance.all_nodes: |
4396 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
4397 |
" please remove first" % instance_name,
|
4398 |
errors.ECODE_INVAL) |
4399 |
self.op.node_name = node.name
|
4400 |
self.node = node
|
4401 |
|
4402 |
def Exec(self, feedback_fn): |
4403 |
"""Removes the node from the cluster.
|
4404 |
|
4405 |
"""
|
4406 |
node = self.node
|
4407 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
4408 |
node.name) |
4409 |
|
4410 |
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
|
4411 |
|
4412 |
# Promote nodes to master candidate as needed
|
4413 |
_AdjustCandidatePool(self, exceptions=[node.name])
|
4414 |
self.context.RemoveNode(node.name)
|
4415 |
|
4416 |
# Run post hooks on the node before it's removed
|
4417 |
_RunPostHook(self, node.name)
|
4418 |
|
4419 |
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
|
4420 |
msg = result.fail_msg |
4421 |
if msg:
|
4422 |
self.LogWarning("Errors encountered on the remote node while leaving" |
4423 |
" the cluster: %s", msg)
|
4424 |
|
4425 |
# Remove node from our /etc/hosts
|
4426 |
if self.cfg.GetClusterInfo().modify_etc_hosts: |
4427 |
master_node = self.cfg.GetMasterNode()
|
4428 |
result = self.rpc.call_etc_hosts_modify(master_node,
|
4429 |
constants.ETC_HOSTS_REMOVE, |
4430 |
node.name, None)
|
4431 |
result.Raise("Can't update hosts file with new host data")
|
4432 |
_RedistributeAncillaryFiles(self)
|
4433 |
|
4434 |
|
4435 |
class _NodeQuery(_QueryBase): |
4436 |
FIELDS = query.NODE_FIELDS |
4437 |
|
4438 |
def ExpandNames(self, lu): |
4439 |
lu.needed_locks = {} |
4440 |
lu.share_locks = _ShareAll() |
4441 |
|
4442 |
if self.names: |
4443 |
self.wanted = _GetWantedNodes(lu, self.names) |
4444 |
else:
|
4445 |
self.wanted = locking.ALL_SET
|
4446 |
|
4447 |
self.do_locking = (self.use_locking and |
4448 |
query.NQ_LIVE in self.requested_data) |
4449 |
|
4450 |
if self.do_locking: |
4451 |
# If any non-static field is requested we need to lock the nodes
|
4452 |
lu.needed_locks[locking.LEVEL_NODE] = self.wanted
|
4453 |
|
4454 |
def DeclareLocks(self, lu, level): |
4455 |
pass
|
4456 |
|
4457 |
def _GetQueryData(self, lu): |
4458 |
"""Computes the list of nodes and their attributes.
|
4459 |
|
4460 |
"""
|
4461 |
all_info = lu.cfg.GetAllNodesInfo() |
4462 |
|
4463 |
nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
|
4464 |
|
4465 |
# Gather data as requested
|
4466 |
if query.NQ_LIVE in self.requested_data: |
4467 |
# filter out non-vm_capable nodes
|
4468 |
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable] |
4469 |
|
4470 |
node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(), |
4471 |
lu.cfg.GetHypervisorType()) |
4472 |
live_data = dict((name, nresult.payload)
|
4473 |
for (name, nresult) in node_data.items() |
4474 |
if not nresult.fail_msg and nresult.payload) |
4475 |
else:
|
4476 |
live_data = None
|
4477 |
|
4478 |
if query.NQ_INST in self.requested_data: |
4479 |
node_to_primary = dict([(name, set()) for name in nodenames]) |
4480 |
node_to_secondary = dict([(name, set()) for name in nodenames]) |
4481 |
|
4482 |
inst_data = lu.cfg.GetAllInstancesInfo() |
4483 |
|
4484 |
for inst in inst_data.values(): |
4485 |
if inst.primary_node in node_to_primary: |
4486 |
node_to_primary[inst.primary_node].add(inst.name) |
4487 |
for secnode in inst.secondary_nodes: |
4488 |
if secnode in node_to_secondary: |
4489 |
node_to_secondary[secnode].add(inst.name) |
4490 |
else:
|
4491 |
node_to_primary = None
|
4492 |
node_to_secondary = None
|
4493 |
|
4494 |
if query.NQ_OOB in self.requested_data: |
4495 |
oob_support = dict((name, bool(_SupportsOob(lu.cfg, node))) |
4496 |
for name, node in all_info.iteritems()) |
4497 |
else:
|
4498 |
oob_support = None
|
4499 |
|
4500 |
if query.NQ_GROUP in self.requested_data: |
4501 |
groups = lu.cfg.GetAllNodeGroupsInfo() |
4502 |
else:
|
4503 |
groups = {} |
4504 |
|
4505 |
return query.NodeQueryData([all_info[name] for name in nodenames], |
4506 |
live_data, lu.cfg.GetMasterNode(), |
4507 |
node_to_primary, node_to_secondary, groups, |
4508 |
oob_support, lu.cfg.GetClusterInfo()) |
4509 |
|
4510 |
|
4511 |
class LUNodeQuery(NoHooksLU): |
4512 |
"""Logical unit for querying nodes.
|
4513 |
|
4514 |
"""
|
4515 |
# pylint: disable=W0142
|
4516 |
REQ_BGL = False
|
4517 |
|
4518 |
def CheckArguments(self): |
4519 |
self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names), |
4520 |
self.op.output_fields, self.op.use_locking) |
4521 |
|
4522 |
def ExpandNames(self): |
4523 |
self.nq.ExpandNames(self) |
4524 |
|
4525 |
def Exec(self, feedback_fn): |
4526 |
return self.nq.OldStyleQuery(self) |
4527 |
|
4528 |
|
4529 |
class LUNodeQueryvols(NoHooksLU): |
4530 |
"""Logical unit for getting volumes on node(s).
|
4531 |
|
4532 |
"""
|
4533 |
REQ_BGL = False
|
4534 |
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance") |
4535 |
_FIELDS_STATIC = utils.FieldSet("node")
|
4536 |
|
4537 |
def CheckArguments(self): |
4538 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
4539 |
dynamic=self._FIELDS_DYNAMIC,
|
4540 |
selected=self.op.output_fields)
|
4541 |
|
4542 |
def ExpandNames(self): |
4543 |
self.needed_locks = {}
|
4544 |
self.share_locks[locking.LEVEL_NODE] = 1 |
4545 |
if not self.op.nodes: |
4546 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4547 |
else:
|
4548 |
self.needed_locks[locking.LEVEL_NODE] = \
|
4549 |
_GetWantedNodes(self, self.op.nodes) |
4550 |
|
4551 |
def Exec(self, feedback_fn): |
4552 |
"""Computes the list of nodes and their attributes.
|
4553 |
|
4554 |
"""
|
4555 |
nodenames = self.owned_locks(locking.LEVEL_NODE)
|
4556 |
volumes = self.rpc.call_node_volumes(nodenames)
|
4557 |
|
4558 |
ilist = self.cfg.GetAllInstancesInfo()
|
4559 |
vol2inst = _MapInstanceDisksToNodes(ilist.values()) |
4560 |
|
4561 |
output = [] |
4562 |
for node in nodenames: |
4563 |
nresult = volumes[node] |
4564 |
if nresult.offline:
|
4565 |
continue
|
4566 |
msg = nresult.fail_msg |
4567 |
if msg:
|
4568 |
self.LogWarning("Can't compute volume data on node %s: %s", node, msg) |
4569 |
continue
|
4570 |
|
4571 |
node_vols = sorted(nresult.payload,
|
4572 |
key=operator.itemgetter("dev"))
|
4573 |
|
4574 |
for vol in node_vols: |
4575 |
node_output = [] |
4576 |
for field in self.op.output_fields: |
4577 |
if field == "node": |
4578 |
val = node |
4579 |
elif field == "phys": |
4580 |
val = vol["dev"]
|
4581 |
elif field == "vg": |
4582 |
val = vol["vg"]
|
4583 |
elif field == "name": |
4584 |
val = vol["name"]
|
4585 |
elif field == "size": |
4586 |
val = int(float(vol["size"])) |
4587 |
elif field == "instance": |
4588 |
val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-") |
4589 |
else:
|
4590 |
raise errors.ParameterError(field)
|
4591 |
node_output.append(str(val))
|
4592 |
|
4593 |
output.append(node_output) |
4594 |
|
4595 |
return output
|
4596 |
|
4597 |
|
4598 |
class LUNodeQueryStorage(NoHooksLU): |
4599 |
"""Logical unit for getting information on storage units on node(s).
|
4600 |
|
4601 |
"""
|
4602 |
_FIELDS_STATIC = utils.FieldSet(constants.SF_NODE) |
4603 |
REQ_BGL = False
|
4604 |
|
4605 |
def CheckArguments(self): |
4606 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
4607 |
dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS), |
4608 |
selected=self.op.output_fields)
|
4609 |
|
4610 |
def ExpandNames(self): |
4611 |
self.needed_locks = {}
|
4612 |
self.share_locks[locking.LEVEL_NODE] = 1 |
4613 |
|
4614 |
if self.op.nodes: |
4615 |
self.needed_locks[locking.LEVEL_NODE] = \
|
4616 |
_GetWantedNodes(self, self.op.nodes) |
4617 |
else:
|
4618 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4619 |
|
4620 |
def Exec(self, feedback_fn): |
4621 |
"""Computes the list of nodes and their attributes.
|
4622 |
|
4623 |
"""
|
4624 |
self.nodes = self.owned_locks(locking.LEVEL_NODE) |
4625 |
|
4626 |
# Always get name to sort by
|
4627 |
if constants.SF_NAME in self.op.output_fields: |
4628 |
fields = self.op.output_fields[:]
|
4629 |
else:
|
4630 |
fields = [constants.SF_NAME] + self.op.output_fields
|
4631 |
|
4632 |
# Never ask for node or type as it's only known to the LU
|
4633 |
for extra in [constants.SF_NODE, constants.SF_TYPE]: |
4634 |
while extra in fields: |
4635 |
fields.remove(extra) |
4636 |
|
4637 |
field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)]) |
4638 |
name_idx = field_idx[constants.SF_NAME] |
4639 |
|
4640 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
4641 |
data = self.rpc.call_storage_list(self.nodes, |
4642 |
self.op.storage_type, st_args,
|
4643 |
self.op.name, fields)
|
4644 |
|
4645 |
result = [] |
4646 |
|
4647 |
for node in utils.NiceSort(self.nodes): |
4648 |
nresult = data[node] |
4649 |
if nresult.offline:
|
4650 |
continue
|
4651 |
|
4652 |
msg = nresult.fail_msg |
4653 |
if msg:
|
4654 |
self.LogWarning("Can't get storage data from node %s: %s", node, msg) |
4655 |
continue
|
4656 |
|
4657 |
rows = dict([(row[name_idx], row) for row in nresult.payload]) |
4658 |
|
4659 |
for name in utils.NiceSort(rows.keys()): |
4660 |
row = rows[name] |
4661 |
|
4662 |
out = [] |
4663 |
|
4664 |
for field in self.op.output_fields: |
4665 |
if field == constants.SF_NODE:
|
4666 |
val = node |
4667 |
elif field == constants.SF_TYPE:
|
4668 |
val = self.op.storage_type
|
4669 |
elif field in field_idx: |
4670 |
val = row[field_idx[field]] |
4671 |
else:
|
4672 |
raise errors.ParameterError(field)
|
4673 |
|
4674 |
out.append(val) |
4675 |
|
4676 |
result.append(out) |
4677 |
|
4678 |
return result
|
4679 |
|
4680 |
|
4681 |
class _InstanceQuery(_QueryBase): |
4682 |
FIELDS = query.INSTANCE_FIELDS |
4683 |
|
4684 |
def ExpandNames(self, lu): |
4685 |
lu.needed_locks = {} |
4686 |
lu.share_locks = _ShareAll() |
4687 |
|
4688 |
if self.names: |
4689 |
self.wanted = _GetWantedInstances(lu, self.names) |
4690 |
else:
|
4691 |
self.wanted = locking.ALL_SET
|
4692 |
|
4693 |
self.do_locking = (self.use_locking and |
4694 |
query.IQ_LIVE in self.requested_data) |
4695 |
if self.do_locking: |
4696 |
lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
|
4697 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = [] |
4698 |
lu.needed_locks[locking.LEVEL_NODE] = [] |
4699 |
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
4700 |
|
4701 |
self.do_grouplocks = (self.do_locking and |
4702 |
query.IQ_NODES in self.requested_data) |
4703 |
|
4704 |
def DeclareLocks(self, lu, level): |
4705 |
if self.do_locking: |
4706 |
if level == locking.LEVEL_NODEGROUP and self.do_grouplocks: |
4707 |
assert not lu.needed_locks[locking.LEVEL_NODEGROUP] |
4708 |
|
4709 |
# Lock all groups used by instances optimistically; this requires going
|
4710 |
# via the node before it's locked, requiring verification later on
|
4711 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = \ |
4712 |
set(group_uuid
|
4713 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
4714 |
for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name)) |
4715 |
elif level == locking.LEVEL_NODE:
|
4716 |
lu._LockInstancesNodes() # pylint: disable=W0212
|
4717 |
|
4718 |
@staticmethod
|
4719 |
def _CheckGroupLocks(lu): |
4720 |
owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
|
4721 |
owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
|
4722 |
|
4723 |
# Check if node groups for locked instances are still correct
|
4724 |
for instance_name in owned_instances: |
4725 |
_CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups) |
4726 |
|
4727 |
def _GetQueryData(self, lu): |
4728 |
"""Computes the list of instances and their attributes.
|
4729 |
|
4730 |
"""
|
4731 |
if self.do_grouplocks: |
4732 |
self._CheckGroupLocks(lu)
|
4733 |
|
4734 |
cluster = lu.cfg.GetClusterInfo() |
4735 |
all_info = lu.cfg.GetAllInstancesInfo() |
4736 |
|
4737 |
instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
|
4738 |
|
4739 |
instance_list = [all_info[name] for name in instance_names] |
4740 |
nodes = frozenset(itertools.chain(*(inst.all_nodes
|
4741 |
for inst in instance_list))) |
4742 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
4743 |
bad_nodes = [] |
4744 |
offline_nodes = [] |
4745 |
wrongnode_inst = set()
|
4746 |
|
4747 |
# Gather data as requested
|
4748 |
if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]): |
4749 |
live_data = {} |
4750 |
node_data = lu.rpc.call_all_instances_info(nodes, hv_list) |
4751 |
for name in nodes: |
4752 |
result = node_data[name] |
4753 |
if result.offline:
|
4754 |
# offline nodes will be in both lists
|
4755 |
assert result.fail_msg
|
4756 |
offline_nodes.append(name) |
4757 |
if result.fail_msg:
|
4758 |
bad_nodes.append(name) |
4759 |
elif result.payload:
|
4760 |
for inst in result.payload: |
4761 |
if inst in all_info: |
4762 |
if all_info[inst].primary_node == name:
|
4763 |
live_data.update(result.payload) |
4764 |
else:
|
4765 |
wrongnode_inst.add(inst) |
4766 |
else:
|
4767 |
# orphan instance; we don't list it here as we don't
|
4768 |
# handle this case yet in the output of instance listing
|
4769 |
logging.warning("Orphan instance '%s' found on node %s",
|
4770 |
inst, name) |
4771 |
# else no instance is alive
|
4772 |
else:
|
4773 |
live_data = {} |
4774 |
|
4775 |
if query.IQ_DISKUSAGE in self.requested_data: |
4776 |
disk_usage = dict((inst.name,
|
4777 |
_ComputeDiskSize(inst.disk_template, |
4778 |
[{constants.IDISK_SIZE: disk.size} |
4779 |
for disk in inst.disks])) |
4780 |
for inst in instance_list) |
4781 |
else:
|
4782 |
disk_usage = None
|
4783 |
|
4784 |
if query.IQ_CONSOLE in self.requested_data: |
4785 |
consinfo = {} |
4786 |
for inst in instance_list: |
4787 |
if inst.name in live_data: |
4788 |
# Instance is running
|
4789 |
consinfo[inst.name] = _GetInstanceConsole(cluster, inst) |
4790 |
else:
|
4791 |
consinfo[inst.name] = None
|
4792 |
assert set(consinfo.keys()) == set(instance_names) |
4793 |
else:
|
4794 |
consinfo = None
|
4795 |
|
4796 |
if query.IQ_NODES in self.requested_data: |
4797 |
node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"), |
4798 |
instance_list))) |
4799 |
nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
|
4800 |
groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
|
4801 |
for uuid in set(map(operator.attrgetter("group"), |
4802 |
nodes.values()))) |
4803 |
else:
|
4804 |
nodes = None
|
4805 |
groups = None
|
4806 |
|
4807 |
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
|
4808 |
disk_usage, offline_nodes, bad_nodes, |
4809 |
live_data, wrongnode_inst, consinfo, |
4810 |
nodes, groups) |
4811 |
|
4812 |
|
4813 |
class LUQuery(NoHooksLU): |
4814 |
"""Query for resources/items of a certain kind.
|
4815 |
|
4816 |
"""
|
4817 |
# pylint: disable=W0142
|
4818 |
REQ_BGL = False
|
4819 |
|
4820 |
def CheckArguments(self): |
4821 |
qcls = _GetQueryImplementation(self.op.what)
|
4822 |
|
4823 |
self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking) |
4824 |
|
4825 |
def ExpandNames(self): |
4826 |
self.impl.ExpandNames(self) |
4827 |
|
4828 |
def DeclareLocks(self, level): |
4829 |
self.impl.DeclareLocks(self, level) |
4830 |
|
4831 |
def Exec(self, feedback_fn): |
4832 |
return self.impl.NewStyleQuery(self) |
4833 |
|
4834 |
|
4835 |
class LUQueryFields(NoHooksLU): |
4836 |
"""Query for resources/items of a certain kind.
|
4837 |
|
4838 |
"""
|
4839 |
# pylint: disable=W0142
|
4840 |
REQ_BGL = False
|
4841 |
|
4842 |
def CheckArguments(self): |
4843 |
self.qcls = _GetQueryImplementation(self.op.what) |
4844 |
|
4845 |
def ExpandNames(self): |
4846 |
self.needed_locks = {}
|
4847 |
|
4848 |
def Exec(self, feedback_fn): |
4849 |
return query.QueryFields(self.qcls.FIELDS, self.op.fields) |
4850 |
|
4851 |
|
4852 |
class LUNodeModifyStorage(NoHooksLU): |
4853 |
"""Logical unit for modifying a storage volume on a node.
|
4854 |
|
4855 |
"""
|
4856 |
REQ_BGL = False
|
4857 |
|
4858 |
def CheckArguments(self): |
4859 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
4860 |
|
4861 |
storage_type = self.op.storage_type
|
4862 |
|
4863 |
try:
|
4864 |
modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type] |
4865 |
except KeyError: |
4866 |
raise errors.OpPrereqError("Storage units of type '%s' can not be" |
4867 |
" modified" % storage_type,
|
4868 |
errors.ECODE_INVAL) |
4869 |
|
4870 |
diff = set(self.op.changes.keys()) - modifiable |
4871 |
if diff:
|
4872 |
raise errors.OpPrereqError("The following fields can not be modified for" |
4873 |
" storage units of type '%s': %r" %
|
4874 |
(storage_type, list(diff)),
|
4875 |
errors.ECODE_INVAL) |
4876 |
|
4877 |
def ExpandNames(self): |
4878 |
self.needed_locks = {
|
4879 |
locking.LEVEL_NODE: self.op.node_name,
|
4880 |
} |
4881 |
|
4882 |
def Exec(self, feedback_fn): |
4883 |
"""Computes the list of nodes and their attributes.
|
4884 |
|
4885 |
"""
|
4886 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
4887 |
result = self.rpc.call_storage_modify(self.op.node_name, |
4888 |
self.op.storage_type, st_args,
|
4889 |
self.op.name, self.op.changes) |
4890 |
result.Raise("Failed to modify storage unit '%s' on %s" %
|
4891 |
(self.op.name, self.op.node_name)) |
4892 |
|
4893 |
|
4894 |
class LUNodeAdd(LogicalUnit): |
4895 |
"""Logical unit for adding node to the cluster.
|
4896 |
|
4897 |
"""
|
4898 |
HPATH = "node-add"
|
4899 |
HTYPE = constants.HTYPE_NODE |
4900 |
_NFLAGS = ["master_capable", "vm_capable"] |
4901 |
|
4902 |
def CheckArguments(self): |
4903 |
self.primary_ip_family = self.cfg.GetPrimaryIPFamily() |
4904 |
# validate/normalize the node name
|
4905 |
self.hostname = netutils.GetHostname(name=self.op.node_name, |
4906 |
family=self.primary_ip_family)
|
4907 |
self.op.node_name = self.hostname.name |
4908 |
|
4909 |
if self.op.readd and self.op.node_name == self.cfg.GetMasterNode(): |
4910 |
raise errors.OpPrereqError("Cannot readd the master node", |
4911 |
errors.ECODE_STATE) |
4912 |
|
4913 |
if self.op.readd and self.op.group: |
4914 |
raise errors.OpPrereqError("Cannot pass a node group when a node is" |
4915 |
" being readded", errors.ECODE_INVAL)
|
4916 |
|
4917 |
def BuildHooksEnv(self): |
4918 |
"""Build hooks env.
|
4919 |
|
4920 |
This will run on all nodes before, and on all nodes + the new node after.
|
4921 |
|
4922 |
"""
|
4923 |
return {
|
4924 |
"OP_TARGET": self.op.node_name, |
4925 |
"NODE_NAME": self.op.node_name, |
4926 |
"NODE_PIP": self.op.primary_ip, |
4927 |
"NODE_SIP": self.op.secondary_ip, |
4928 |
"MASTER_CAPABLE": str(self.op.master_capable), |
4929 |
"VM_CAPABLE": str(self.op.vm_capable), |
4930 |
} |
4931 |
|
4932 |
def BuildHooksNodes(self): |
4933 |
"""Build hooks nodes.
|
4934 |
|
4935 |
"""
|
4936 |
# Exclude added node
|
4937 |
pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name])) |
4938 |
post_nodes = pre_nodes + [self.op.node_name, ]
|
4939 |
|
4940 |
return (pre_nodes, post_nodes)
|
4941 |
|
4942 |
def CheckPrereq(self): |
4943 |
"""Check prerequisites.
|
4944 |
|
4945 |
This checks:
|
4946 |
- the new node is not already in the config
|
4947 |
- it is resolvable
|
4948 |
- its parameters (single/dual homed) matches the cluster
|
4949 |
|
4950 |
Any errors are signaled by raising errors.OpPrereqError.
|
4951 |
|
4952 |
"""
|
4953 |
cfg = self.cfg
|
4954 |
hostname = self.hostname
|
4955 |
node = hostname.name |
4956 |
primary_ip = self.op.primary_ip = hostname.ip
|
4957 |
if self.op.secondary_ip is None: |
4958 |
if self.primary_ip_family == netutils.IP6Address.family: |
4959 |
raise errors.OpPrereqError("When using a IPv6 primary address, a valid" |
4960 |
" IPv4 address must be given as secondary",
|
4961 |
errors.ECODE_INVAL) |
4962 |
self.op.secondary_ip = primary_ip
|
4963 |
|
4964 |
secondary_ip = self.op.secondary_ip
|
4965 |
if not netutils.IP4Address.IsValid(secondary_ip): |
4966 |
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4" |
4967 |
" address" % secondary_ip, errors.ECODE_INVAL)
|
4968 |
|
4969 |
node_list = cfg.GetNodeList() |
4970 |
if not self.op.readd and node in node_list: |
4971 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
4972 |
node, errors.ECODE_EXISTS) |
4973 |
elif self.op.readd and node not in node_list: |
4974 |
raise errors.OpPrereqError("Node %s is not in the configuration" % node, |
4975 |
errors.ECODE_NOENT) |
4976 |
|
4977 |
self.changed_primary_ip = False |
4978 |
|
4979 |
for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list): |
4980 |
if self.op.readd and node == existing_node_name: |
4981 |
if existing_node.secondary_ip != secondary_ip:
|
4982 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
4983 |
" address configuration as before",
|
4984 |
errors.ECODE_INVAL) |
4985 |
if existing_node.primary_ip != primary_ip:
|
4986 |
self.changed_primary_ip = True |
4987 |
|
4988 |
continue
|
4989 |
|
4990 |
if (existing_node.primary_ip == primary_ip or |
4991 |
existing_node.secondary_ip == primary_ip or
|
4992 |
existing_node.primary_ip == secondary_ip or
|
4993 |
existing_node.secondary_ip == secondary_ip): |
4994 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
4995 |
" existing node %s" % existing_node.name,
|
4996 |
errors.ECODE_NOTUNIQUE) |
4997 |
|
4998 |
# After this 'if' block, None is no longer a valid value for the
|
4999 |
# _capable op attributes
|
5000 |
if self.op.readd: |
5001 |
old_node = self.cfg.GetNodeInfo(node)
|
5002 |
assert old_node is not None, "Can't retrieve locked node %s" % node |
5003 |
for attr in self._NFLAGS: |
5004 |
if getattr(self.op, attr) is None: |
5005 |
setattr(self.op, attr, getattr(old_node, attr)) |
5006 |
else:
|
5007 |
for attr in self._NFLAGS: |
5008 |
if getattr(self.op, attr) is None: |
5009 |
setattr(self.op, attr, True) |
5010 |
|
5011 |
if self.op.readd and not self.op.vm_capable: |
5012 |
pri, sec = cfg.GetNodeInstances(node) |
5013 |
if pri or sec: |
5014 |
raise errors.OpPrereqError("Node %s being re-added with vm_capable" |
5015 |
" flag set to false, but it already holds"
|
5016 |
" instances" % node,
|
5017 |
errors.ECODE_STATE) |
5018 |
|
5019 |
# check that the type of the node (single versus dual homed) is the
|
5020 |
# same as for the master
|
5021 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
5022 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
5023 |
newbie_singlehomed = secondary_ip == primary_ip |
5024 |
if master_singlehomed != newbie_singlehomed:
|
5025 |
if master_singlehomed:
|
5026 |
raise errors.OpPrereqError("The master has no secondary ip but the" |
5027 |
" new node has one",
|
5028 |
errors.ECODE_INVAL) |
5029 |
else:
|
5030 |
raise errors.OpPrereqError("The master has a secondary ip but the" |
5031 |
" new node doesn't have one",
|
5032 |
errors.ECODE_INVAL) |
5033 |
|
5034 |
# checks reachability
|
5035 |
if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
5036 |
raise errors.OpPrereqError("Node not reachable by ping", |
5037 |
errors.ECODE_ENVIRON) |
5038 |
|
5039 |
if not newbie_singlehomed: |
5040 |
# check reachability from my secondary ip to newbie's secondary ip
|
5041 |
if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
5042 |
source=myself.secondary_ip): |
5043 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
5044 |
" based ping to node daemon port",
|
5045 |
errors.ECODE_ENVIRON) |
5046 |
|
5047 |
if self.op.readd: |
5048 |
exceptions = [node] |
5049 |
else:
|
5050 |
exceptions = [] |
5051 |
|
5052 |
if self.op.master_capable: |
5053 |
self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions) |
5054 |
else:
|
5055 |
self.master_candidate = False |
5056 |
|
5057 |
if self.op.readd: |
5058 |
self.new_node = old_node
|
5059 |
else:
|
5060 |
node_group = cfg.LookupNodeGroup(self.op.group)
|
5061 |
self.new_node = objects.Node(name=node,
|
5062 |
primary_ip=primary_ip, |
5063 |
secondary_ip=secondary_ip, |
5064 |
master_candidate=self.master_candidate,
|
5065 |
offline=False, drained=False, |
5066 |
group=node_group) |
5067 |
|
5068 |
if self.op.ndparams: |
5069 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
5070 |
|
5071 |
def Exec(self, feedback_fn): |
5072 |
"""Adds the new node to the cluster.
|
5073 |
|
5074 |
"""
|
5075 |
new_node = self.new_node
|
5076 |
node = new_node.name |
5077 |
|
5078 |
# We adding a new node so we assume it's powered
|
5079 |
new_node.powered = True
|
5080 |
|
5081 |
# for re-adds, reset the offline/drained/master-candidate flags;
|
5082 |
# we need to reset here, otherwise offline would prevent RPC calls
|
5083 |
# later in the procedure; this also means that if the re-add
|
5084 |
# fails, we are left with a non-offlined, broken node
|
5085 |
if self.op.readd: |
5086 |
new_node.drained = new_node.offline = False # pylint: disable=W0201 |
5087 |
self.LogInfo("Readding a node, the offline/drained flags were reset") |
5088 |
# if we demote the node, we do cleanup later in the procedure
|
5089 |
new_node.master_candidate = self.master_candidate
|
5090 |
if self.changed_primary_ip: |
5091 |
new_node.primary_ip = self.op.primary_ip
|
5092 |
|
5093 |
# copy the master/vm_capable flags
|
5094 |
for attr in self._NFLAGS: |
5095 |
setattr(new_node, attr, getattr(self.op, attr)) |
5096 |
|
5097 |
# notify the user about any possible mc promotion
|
5098 |
if new_node.master_candidate:
|
5099 |
self.LogInfo("Node will be a master candidate") |
5100 |
|
5101 |
if self.op.ndparams: |
5102 |
new_node.ndparams = self.op.ndparams
|
5103 |
else:
|
5104 |
new_node.ndparams = {} |
5105 |
|
5106 |
# check connectivity
|
5107 |
result = self.rpc.call_version([node])[node]
|
5108 |
result.Raise("Can't get version information from node %s" % node)
|
5109 |
if constants.PROTOCOL_VERSION == result.payload:
|
5110 |
logging.info("Communication to node %s fine, sw version %s match",
|
5111 |
node, result.payload) |
5112 |
else:
|
5113 |
raise errors.OpExecError("Version mismatch master version %s," |
5114 |
" node version %s" %
|
5115 |
(constants.PROTOCOL_VERSION, result.payload)) |
5116 |
|
5117 |
# Add node to our /etc/hosts, and add key to known_hosts
|
5118 |
if self.cfg.GetClusterInfo().modify_etc_hosts: |
5119 |
master_node = self.cfg.GetMasterNode()
|
5120 |
result = self.rpc.call_etc_hosts_modify(master_node,
|
5121 |
constants.ETC_HOSTS_ADD, |
5122 |
self.hostname.name,
|
5123 |
self.hostname.ip)
|
5124 |
result.Raise("Can't update hosts file with new host data")
|
5125 |
|
5126 |
if new_node.secondary_ip != new_node.primary_ip:
|
5127 |
_CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
|
5128 |
False)
|
5129 |
|
5130 |
node_verify_list = [self.cfg.GetMasterNode()]
|
5131 |
node_verify_param = { |
5132 |
constants.NV_NODELIST: ([node], {}), |
5133 |
# TODO: do a node-net-test as well?
|
5134 |
} |
5135 |
|
5136 |
result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
|
5137 |
self.cfg.GetClusterName())
|
5138 |
for verifier in node_verify_list: |
5139 |
result[verifier].Raise("Cannot communicate with node %s" % verifier)
|
5140 |
nl_payload = result[verifier].payload[constants.NV_NODELIST] |
5141 |
if nl_payload:
|
5142 |
for failed in nl_payload: |
5143 |
feedback_fn("ssh/hostname verification failed"
|
5144 |
" (checking from %s): %s" %
|
5145 |
(verifier, nl_payload[failed])) |
5146 |
raise errors.OpExecError("ssh/hostname verification failed") |
5147 |
|
5148 |
if self.op.readd: |
5149 |
_RedistributeAncillaryFiles(self)
|
5150 |
self.context.ReaddNode(new_node)
|
5151 |
# make sure we redistribute the config
|
5152 |
self.cfg.Update(new_node, feedback_fn)
|
5153 |
# and make sure the new node will not have old files around
|
5154 |
if not new_node.master_candidate: |
5155 |
result = self.rpc.call_node_demote_from_mc(new_node.name)
|
5156 |
msg = result.fail_msg |
5157 |
if msg:
|
5158 |
self.LogWarning("Node failed to demote itself from master" |
5159 |
" candidate status: %s" % msg)
|
5160 |
else:
|
5161 |
_RedistributeAncillaryFiles(self, additional_nodes=[node],
|
5162 |
additional_vm=self.op.vm_capable)
|
5163 |
self.context.AddNode(new_node, self.proc.GetECId()) |
5164 |
|
5165 |
|
5166 |
class LUNodeSetParams(LogicalUnit): |
5167 |
"""Modifies the parameters of a node.
|
5168 |
|
5169 |
@cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
|
5170 |
to the node role (as _ROLE_*)
|
5171 |
@cvar _R2F: a dictionary from node role to tuples of flags
|
5172 |
@cvar _FLAGS: a list of attribute names corresponding to the flags
|
5173 |
|
5174 |
"""
|
5175 |
HPATH = "node-modify"
|
5176 |
HTYPE = constants.HTYPE_NODE |
5177 |
REQ_BGL = False
|
5178 |
(_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4) |
5179 |
_F2R = { |
5180 |
(True, False, False): _ROLE_CANDIDATE, |
5181 |
(False, True, False): _ROLE_DRAINED, |
5182 |
(False, False, True): _ROLE_OFFLINE, |
5183 |
(False, False, False): _ROLE_REGULAR, |
5184 |
} |
5185 |
_R2F = dict((v, k) for k, v in _F2R.items()) |
5186 |
_FLAGS = ["master_candidate", "drained", "offline"] |
5187 |
|
5188 |
def CheckArguments(self): |
5189 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
5190 |
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained, |
5191 |
self.op.master_capable, self.op.vm_capable, |
5192 |
self.op.secondary_ip, self.op.ndparams] |
5193 |
if all_mods.count(None) == len(all_mods): |
5194 |
raise errors.OpPrereqError("Please pass at least one modification", |
5195 |
errors.ECODE_INVAL) |
5196 |
if all_mods.count(True) > 1: |
5197 |
raise errors.OpPrereqError("Can't set the node into more than one" |
5198 |
" state at the same time",
|
5199 |
errors.ECODE_INVAL) |
5200 |
|
5201 |
# Boolean value that tells us whether we might be demoting from MC
|
5202 |
self.might_demote = (self.op.master_candidate == False or |
5203 |
self.op.offline == True or |
5204 |
self.op.drained == True or |
5205 |
self.op.master_capable == False) |
5206 |
|
5207 |
if self.op.secondary_ip: |
5208 |
if not netutils.IP4Address.IsValid(self.op.secondary_ip): |
5209 |
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4" |
5210 |
" address" % self.op.secondary_ip, |
5211 |
errors.ECODE_INVAL) |
5212 |
|
5213 |
self.lock_all = self.op.auto_promote and self.might_demote |
5214 |
self.lock_instances = self.op.secondary_ip is not None |
5215 |
|
5216 |
def ExpandNames(self): |
5217 |
if self.lock_all: |
5218 |
self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
|
5219 |
else:
|
5220 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
5221 |
|
5222 |
if self.lock_instances: |
5223 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
5224 |
|
5225 |
def DeclareLocks(self, level): |
5226 |
# If we have locked all instances, before waiting to lock nodes, release
|
5227 |
# all the ones living on nodes unrelated to the current operation.
|
5228 |
if level == locking.LEVEL_NODE and self.lock_instances: |
5229 |
self.affected_instances = []
|
5230 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
5231 |
instances_keep = [] |
5232 |
|
5233 |
# Build list of instances to release
|
5234 |
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
|
5235 |
for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i): |
5236 |
if (instance.disk_template in constants.DTS_INT_MIRROR and |
5237 |
self.op.node_name in instance.all_nodes): |
5238 |
instances_keep.append(instance_name) |
5239 |
self.affected_instances.append(instance)
|
5240 |
|
5241 |
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
|
5242 |
|
5243 |
assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) == |
5244 |
set(instances_keep))
|
5245 |
|
5246 |
def BuildHooksEnv(self): |
5247 |
"""Build hooks env.
|
5248 |
|
5249 |
This runs on the master node.
|
5250 |
|
5251 |
"""
|
5252 |
return {
|
5253 |
"OP_TARGET": self.op.node_name, |
5254 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
5255 |
"OFFLINE": str(self.op.offline), |
5256 |
"DRAINED": str(self.op.drained), |
5257 |
"MASTER_CAPABLE": str(self.op.master_capable), |
5258 |
"VM_CAPABLE": str(self.op.vm_capable), |
5259 |
} |
5260 |
|
5261 |
def BuildHooksNodes(self): |
5262 |
"""Build hooks nodes.
|
5263 |
|
5264 |
"""
|
5265 |
nl = [self.cfg.GetMasterNode(), self.op.node_name] |
5266 |
return (nl, nl)
|
5267 |
|
5268 |
def CheckPrereq(self): |
5269 |
"""Check prerequisites.
|
5270 |
|
5271 |
This only checks the instance list against the existing names.
|
5272 |
|
5273 |
"""
|
5274 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
5275 |
|
5276 |
if (self.op.master_candidate is not None or |
5277 |
self.op.drained is not None or |
5278 |
self.op.offline is not None): |
5279 |
# we can't change the master's node flags
|
5280 |
if self.op.node_name == self.cfg.GetMasterNode(): |
5281 |
raise errors.OpPrereqError("The master role can be changed" |
5282 |
" only via master-failover",
|
5283 |
errors.ECODE_INVAL) |
5284 |
|
5285 |
if self.op.master_candidate and not node.master_capable: |
5286 |
raise errors.OpPrereqError("Node %s is not master capable, cannot make" |
5287 |
" it a master candidate" % node.name,
|
5288 |
errors.ECODE_STATE) |
5289 |
|
5290 |
if self.op.vm_capable == False: |
5291 |
(ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name) |
5292 |
if ipri or isec: |
5293 |
raise errors.OpPrereqError("Node %s hosts instances, cannot unset" |
5294 |
" the vm_capable flag" % node.name,
|
5295 |
errors.ECODE_STATE) |
5296 |
|
5297 |
if node.master_candidate and self.might_demote and not self.lock_all: |
5298 |
assert not self.op.auto_promote, "auto_promote set but lock_all not" |
5299 |
# check if after removing the current node, we're missing master
|
5300 |
# candidates
|
5301 |
(mc_remaining, mc_should, _) = \ |
5302 |
self.cfg.GetMasterCandidateStats(exceptions=[node.name])
|
5303 |
if mc_remaining < mc_should:
|
5304 |
raise errors.OpPrereqError("Not enough master candidates, please" |
5305 |
" pass auto promote option to allow"
|
5306 |
" promotion", errors.ECODE_STATE)
|
5307 |
|
5308 |
self.old_flags = old_flags = (node.master_candidate,
|
5309 |
node.drained, node.offline) |
5310 |
assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags) |
5311 |
self.old_role = old_role = self._F2R[old_flags] |
5312 |
|
5313 |
# Check for ineffective changes
|
5314 |
for attr in self._FLAGS: |
5315 |
if (getattr(self.op, attr) == False and getattr(node, attr) == False): |
5316 |
self.LogInfo("Ignoring request to unset flag %s, already unset", attr) |
5317 |
setattr(self.op, attr, None) |
5318 |
|
5319 |
# Past this point, any flag change to False means a transition
|
5320 |
# away from the respective state, as only real changes are kept
|
5321 |
|
5322 |
# TODO: We might query the real power state if it supports OOB
|
5323 |
if _SupportsOob(self.cfg, node): |
5324 |
if self.op.offline is False and not (node.powered or |
5325 |
self.op.powered == True): |
5326 |
raise errors.OpPrereqError(("Node %s needs to be turned on before its" |
5327 |
" offline status can be reset") %
|
5328 |
self.op.node_name)
|
5329 |
elif self.op.powered is not None: |
5330 |
raise errors.OpPrereqError(("Unable to change powered state for node %s" |
5331 |
" as it does not support out-of-band"
|
5332 |
" handling") % self.op.node_name) |
5333 |
|
5334 |
# If we're being deofflined/drained, we'll MC ourself if needed
|
5335 |
if (self.op.drained == False or self.op.offline == False or |
5336 |
(self.op.master_capable and not node.master_capable)): |
5337 |
if _DecideSelfPromotion(self): |
5338 |
self.op.master_candidate = True |
5339 |
self.LogInfo("Auto-promoting node to master candidate") |
5340 |
|
5341 |
# If we're no longer master capable, we'll demote ourselves from MC
|
5342 |
if self.op.master_capable == False and node.master_candidate: |
5343 |
self.LogInfo("Demoting from master candidate") |
5344 |
self.op.master_candidate = False |
5345 |
|
5346 |
# Compute new role
|
5347 |
assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1 |
5348 |
if self.op.master_candidate: |
5349 |
new_role = self._ROLE_CANDIDATE
|
5350 |
elif self.op.drained: |
5351 |
new_role = self._ROLE_DRAINED
|
5352 |
elif self.op.offline: |
5353 |
new_role = self._ROLE_OFFLINE
|
5354 |
elif False in [self.op.master_candidate, self.op.drained, self.op.offline]: |
5355 |
# False is still in new flags, which means we're un-setting (the
|
5356 |
# only) True flag
|
5357 |
new_role = self._ROLE_REGULAR
|
5358 |
else: # no new flags, nothing, keep old role |
5359 |
new_role = old_role |
5360 |
|
5361 |
self.new_role = new_role
|
5362 |
|
5363 |
if old_role == self._ROLE_OFFLINE and new_role != old_role: |
5364 |
# Trying to transition out of offline status
|
5365 |
result = self.rpc.call_version([node.name])[node.name]
|
5366 |
if result.fail_msg:
|
5367 |
raise errors.OpPrereqError("Node %s is being de-offlined but fails" |
5368 |
" to report its version: %s" %
|
5369 |
(node.name, result.fail_msg), |
5370 |
errors.ECODE_STATE) |
5371 |
else:
|
5372 |
self.LogWarning("Transitioning node from offline to online state" |
5373 |
" without using re-add. Please make sure the node"
|
5374 |
" is healthy!")
|
5375 |
|
5376 |
if self.op.secondary_ip: |
5377 |
# Ok even without locking, because this can't be changed by any LU
|
5378 |
master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode()) |
5379 |
master_singlehomed = master.secondary_ip == master.primary_ip |
5380 |
if master_singlehomed and self.op.secondary_ip: |
5381 |
raise errors.OpPrereqError("Cannot change the secondary ip on a single" |
5382 |
" homed cluster", errors.ECODE_INVAL)
|
5383 |
|
5384 |
if node.offline:
|
5385 |
if self.affected_instances: |
5386 |
raise errors.OpPrereqError("Cannot change secondary ip: offline" |
5387 |
" node has instances (%s) configured"
|
5388 |
" to use it" % self.affected_instances) |
5389 |
else:
|
5390 |
# On online nodes, check that no instances are running, and that
|
5391 |
# the node has the new ip and we can reach it.
|
5392 |
for instance in self.affected_instances: |
5393 |
_CheckInstanceDown(self, instance, "cannot change secondary ip") |
5394 |
|
5395 |
_CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True) |
5396 |
if master.name != node.name:
|
5397 |
# check reachability from master secondary ip to new secondary ip
|
5398 |
if not netutils.TcpPing(self.op.secondary_ip, |
5399 |
constants.DEFAULT_NODED_PORT, |
5400 |
source=master.secondary_ip): |
5401 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
5402 |
" based ping to node daemon port",
|
5403 |
errors.ECODE_ENVIRON) |
5404 |
|
5405 |
if self.op.ndparams: |
5406 |
new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams) |
5407 |
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES) |
5408 |
self.new_ndparams = new_ndparams
|
5409 |
|
5410 |
def Exec(self, feedback_fn): |
5411 |
"""Modifies a node.
|
5412 |
|
5413 |
"""
|
5414 |
node = self.node
|
5415 |
old_role = self.old_role
|
5416 |
new_role = self.new_role
|
5417 |
|
5418 |
result = [] |
5419 |
|
5420 |
if self.op.ndparams: |
5421 |
node.ndparams = self.new_ndparams
|
5422 |
|
5423 |
if self.op.powered is not None: |
5424 |
node.powered = self.op.powered
|
5425 |
|
5426 |
for attr in ["master_capable", "vm_capable"]: |
5427 |
val = getattr(self.op, attr) |
5428 |
if val is not None: |
5429 |
setattr(node, attr, val)
|
5430 |
result.append((attr, str(val)))
|
5431 |
|
5432 |
if new_role != old_role:
|
5433 |
# Tell the node to demote itself, if no longer MC and not offline
|
5434 |
if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE: |
5435 |
msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
|
5436 |
if msg:
|
5437 |
self.LogWarning("Node failed to demote itself: %s", msg) |
5438 |
|
5439 |
new_flags = self._R2F[new_role]
|
5440 |
for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS): |
5441 |
if of != nf:
|
5442 |
result.append((desc, str(nf)))
|
5443 |
(node.master_candidate, node.drained, node.offline) = new_flags |
5444 |
|
5445 |
# we locked all nodes, we adjust the CP before updating this node
|
5446 |
if self.lock_all: |
5447 |
_AdjustCandidatePool(self, [node.name])
|
5448 |
|
5449 |
if self.op.secondary_ip: |
5450 |
node.secondary_ip = self.op.secondary_ip
|
5451 |
result.append(("secondary_ip", self.op.secondary_ip)) |
5452 |
|
5453 |
# this will trigger configuration file update, if needed
|
5454 |
self.cfg.Update(node, feedback_fn)
|
5455 |
|
5456 |
# this will trigger job queue propagation or cleanup if the mc
|
5457 |
# flag changed
|
5458 |
if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1: |
5459 |
self.context.ReaddNode(node)
|
5460 |
|
5461 |
return result
|
5462 |
|
5463 |
|
5464 |
class LUNodePowercycle(NoHooksLU): |
5465 |
"""Powercycles a node.
|
5466 |
|
5467 |
"""
|
5468 |
REQ_BGL = False
|
5469 |
|
5470 |
def CheckArguments(self): |
5471 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
5472 |
if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force: |
5473 |
raise errors.OpPrereqError("The node is the master and the force" |
5474 |
" parameter was not set",
|
5475 |
errors.ECODE_INVAL) |
5476 |
|
5477 |
def ExpandNames(self): |
5478 |
"""Locking for PowercycleNode.
|
5479 |
|
5480 |
This is a last-resort option and shouldn't block on other
|
5481 |
jobs. Therefore, we grab no locks.
|
5482 |
|
5483 |
"""
|
5484 |
self.needed_locks = {}
|
5485 |
|
5486 |
def Exec(self, feedback_fn): |
5487 |
"""Reboots a node.
|
5488 |
|
5489 |
"""
|
5490 |
result = self.rpc.call_node_powercycle(self.op.node_name, |
5491 |
self.cfg.GetHypervisorType())
|
5492 |
result.Raise("Failed to schedule the reboot")
|
5493 |
return result.payload
|
5494 |
|
5495 |
|
5496 |
class LUClusterQuery(NoHooksLU): |
5497 |
"""Query cluster configuration.
|
5498 |
|
5499 |
"""
|
5500 |
REQ_BGL = False
|
5501 |
|
5502 |
def ExpandNames(self): |
5503 |
self.needed_locks = {}
|
5504 |
|
5505 |
def Exec(self, feedback_fn): |
5506 |
"""Return cluster config.
|
5507 |
|
5508 |
"""
|
5509 |
cluster = self.cfg.GetClusterInfo()
|
5510 |
os_hvp = {} |
5511 |
|
5512 |
# Filter just for enabled hypervisors
|
5513 |
for os_name, hv_dict in cluster.os_hvp.items(): |
5514 |
os_hvp[os_name] = {} |
5515 |
for hv_name, hv_params in hv_dict.items(): |
5516 |
if hv_name in cluster.enabled_hypervisors: |
5517 |
os_hvp[os_name][hv_name] = hv_params |
5518 |
|
5519 |
# Convert ip_family to ip_version
|
5520 |
primary_ip_version = constants.IP4_VERSION |
5521 |
if cluster.primary_ip_family == netutils.IP6Address.family:
|
5522 |
primary_ip_version = constants.IP6_VERSION |
5523 |
|
5524 |
result = { |
5525 |
"software_version": constants.RELEASE_VERSION,
|
5526 |
"protocol_version": constants.PROTOCOL_VERSION,
|
5527 |
"config_version": constants.CONFIG_VERSION,
|
5528 |
"os_api_version": max(constants.OS_API_VERSIONS), |
5529 |
"export_version": constants.EXPORT_VERSION,
|
5530 |
"architecture": (platform.architecture()[0], platform.machine()), |
5531 |
"name": cluster.cluster_name,
|
5532 |
"master": cluster.master_node,
|
5533 |
"default_hypervisor": cluster.enabled_hypervisors[0], |
5534 |
"enabled_hypervisors": cluster.enabled_hypervisors,
|
5535 |
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name]) |
5536 |
for hypervisor_name in cluster.enabled_hypervisors]), |
5537 |
"os_hvp": os_hvp,
|
5538 |
"beparams": cluster.beparams,
|
5539 |
"osparams": cluster.osparams,
|
5540 |
"nicparams": cluster.nicparams,
|
5541 |
"ndparams": cluster.ndparams,
|
5542 |
"candidate_pool_size": cluster.candidate_pool_size,
|
5543 |
"master_netdev": cluster.master_netdev,
|
5544 |
"master_netmask": cluster.master_netmask,
|
5545 |
"volume_group_name": cluster.volume_group_name,
|
5546 |
"drbd_usermode_helper": cluster.drbd_usermode_helper,
|
5547 |
"file_storage_dir": cluster.file_storage_dir,
|
5548 |
"shared_file_storage_dir": cluster.shared_file_storage_dir,
|
5549 |
"maintain_node_health": cluster.maintain_node_health,
|
5550 |
"ctime": cluster.ctime,
|
5551 |
"mtime": cluster.mtime,
|
5552 |
"uuid": cluster.uuid,
|
5553 |
"tags": list(cluster.GetTags()), |
5554 |
"uid_pool": cluster.uid_pool,
|
5555 |
"default_iallocator": cluster.default_iallocator,
|
5556 |
"reserved_lvs": cluster.reserved_lvs,
|
5557 |
"primary_ip_version": primary_ip_version,
|
5558 |
"prealloc_wipe_disks": cluster.prealloc_wipe_disks,
|
5559 |
"hidden_os": cluster.hidden_os,
|
5560 |
"blacklisted_os": cluster.blacklisted_os,
|
5561 |
} |
5562 |
|
5563 |
return result
|
5564 |
|
5565 |
|
5566 |
class LUClusterConfigQuery(NoHooksLU): |
5567 |
"""Return configuration values.
|
5568 |
|
5569 |
"""
|
5570 |
REQ_BGL = False
|
5571 |
_FIELDS_DYNAMIC = utils.FieldSet() |
5572 |
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag", |
5573 |
"watcher_pause", "volume_group_name") |
5574 |
|
5575 |
def CheckArguments(self): |
5576 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
5577 |
dynamic=self._FIELDS_DYNAMIC,
|
5578 |
selected=self.op.output_fields)
|
5579 |
|
5580 |
def ExpandNames(self): |
5581 |
self.needed_locks = {}
|
5582 |
|
5583 |
def Exec(self, feedback_fn): |
5584 |
"""Dump a representation of the cluster config to the standard output.
|
5585 |
|
5586 |
"""
|
5587 |
values = [] |
5588 |
for field in self.op.output_fields: |
5589 |
if field == "cluster_name": |
5590 |
entry = self.cfg.GetClusterName()
|
5591 |
elif field == "master_node": |
5592 |
entry = self.cfg.GetMasterNode()
|
5593 |
elif field == "drain_flag": |
5594 |
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE) |
5595 |
elif field == "watcher_pause": |
5596 |
entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE) |
5597 |
elif field == "volume_group_name": |
5598 |
entry = self.cfg.GetVGName()
|
5599 |
else:
|
5600 |
raise errors.ParameterError(field)
|
5601 |
values.append(entry) |
5602 |
return values
|
5603 |
|
5604 |
|
5605 |
class LUInstanceActivateDisks(NoHooksLU): |
5606 |
"""Bring up an instance's disks.
|
5607 |
|
5608 |
"""
|
5609 |
REQ_BGL = False
|
5610 |
|
5611 |
def ExpandNames(self): |
5612 |
self._ExpandAndLockInstance()
|
5613 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5614 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5615 |
|
5616 |
def DeclareLocks(self, level): |
5617 |
if level == locking.LEVEL_NODE:
|
5618 |
self._LockInstancesNodes()
|
5619 |
|
5620 |
def CheckPrereq(self): |
5621 |
"""Check prerequisites.
|
5622 |
|
5623 |
This checks that the instance is in the cluster.
|
5624 |
|
5625 |
"""
|
5626 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
5627 |
assert self.instance is not None, \ |
5628 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
5629 |
_CheckNodeOnline(self, self.instance.primary_node) |
5630 |
|
5631 |
def Exec(self, feedback_fn): |
5632 |
"""Activate the disks.
|
5633 |
|
5634 |
"""
|
5635 |
disks_ok, disks_info = \ |
5636 |
_AssembleInstanceDisks(self, self.instance, |
5637 |
ignore_size=self.op.ignore_size)
|
5638 |
if not disks_ok: |
5639 |
raise errors.OpExecError("Cannot activate block devices") |
5640 |
|
5641 |
return disks_info
|
5642 |
|
5643 |
|
5644 |
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False, |
5645 |
ignore_size=False):
|
5646 |
"""Prepare the block devices for an instance.
|
5647 |
|
5648 |
This sets up the block devices on all nodes.
|
5649 |
|
5650 |
@type lu: L{LogicalUnit}
|
5651 |
@param lu: the logical unit on whose behalf we execute
|
5652 |
@type instance: L{objects.Instance}
|
5653 |
@param instance: the instance for whose disks we assemble
|
5654 |
@type disks: list of L{objects.Disk} or None
|
5655 |
@param disks: which disks to assemble (or all, if None)
|
5656 |
@type ignore_secondaries: boolean
|
5657 |
@param ignore_secondaries: if true, errors on secondary nodes
|
5658 |
won't result in an error return from the function
|
5659 |
@type ignore_size: boolean
|
5660 |
@param ignore_size: if true, the current known size of the disk
|
5661 |
will not be used during the disk activation, useful for cases
|
5662 |
when the size is wrong
|
5663 |
@return: False if the operation failed, otherwise a list of
|
5664 |
(host, instance_visible_name, node_visible_name)
|
5665 |
with the mapping from node devices to instance devices
|
5666 |
|
5667 |
"""
|
5668 |
device_info = [] |
5669 |
disks_ok = True
|
5670 |
iname = instance.name |
5671 |
disks = _ExpandCheckDisks(instance, disks) |
5672 |
|
5673 |
# With the two passes mechanism we try to reduce the window of
|
5674 |
# opportunity for the race condition of switching DRBD to primary
|
5675 |
# before handshaking occured, but we do not eliminate it
|
5676 |
|
5677 |
# The proper fix would be to wait (with some limits) until the
|
5678 |
# connection has been made and drbd transitions from WFConnection
|
5679 |
# into any other network-connected state (Connected, SyncTarget,
|
5680 |
# SyncSource, etc.)
|
5681 |
|
5682 |
# 1st pass, assemble on all nodes in secondary mode
|
5683 |
for idx, inst_disk in enumerate(disks): |
5684 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
5685 |
if ignore_size:
|
5686 |
node_disk = node_disk.Copy() |
5687 |
node_disk.UnsetSize() |
5688 |
lu.cfg.SetDiskID(node_disk, node) |
5689 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
|
5690 |
msg = result.fail_msg |
5691 |
if msg:
|
5692 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
5693 |
" (is_primary=False, pass=1): %s",
|
5694 |
inst_disk.iv_name, node, msg) |
5695 |
if not ignore_secondaries: |
5696 |
disks_ok = False
|
5697 |
|
5698 |
# FIXME: race condition on drbd migration to primary
|
5699 |
|
5700 |
# 2nd pass, do only the primary node
|
5701 |
for idx, inst_disk in enumerate(disks): |
5702 |
dev_path = None
|
5703 |
|
5704 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
5705 |
if node != instance.primary_node:
|
5706 |
continue
|
5707 |
if ignore_size:
|
5708 |
node_disk = node_disk.Copy() |
5709 |
node_disk.UnsetSize() |
5710 |
lu.cfg.SetDiskID(node_disk, node) |
5711 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
|
5712 |
msg = result.fail_msg |
5713 |
if msg:
|
5714 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
5715 |
" (is_primary=True, pass=2): %s",
|
5716 |
inst_disk.iv_name, node, msg) |
5717 |
disks_ok = False
|
5718 |
else:
|
5719 |
dev_path = result.payload |
5720 |
|
5721 |
device_info.append((instance.primary_node, inst_disk.iv_name, dev_path)) |
5722 |
|
5723 |
# leave the disks configured for the primary node
|
5724 |
# this is a workaround that would be fixed better by
|
5725 |
# improving the logical/physical id handling
|
5726 |
for disk in disks: |
5727 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
5728 |
|
5729 |
return disks_ok, device_info
|
5730 |
|
5731 |
|
5732 |
def _StartInstanceDisks(lu, instance, force): |
5733 |
"""Start the disks of an instance.
|
5734 |
|
5735 |
"""
|
5736 |
disks_ok, _ = _AssembleInstanceDisks(lu, instance, |
5737 |
ignore_secondaries=force) |
5738 |
if not disks_ok: |
5739 |
_ShutdownInstanceDisks(lu, instance) |
5740 |
if force is not None and not force: |
5741 |
lu.proc.LogWarning("", hint="If the message above refers to a" |
5742 |
" secondary node,"
|
5743 |
" you can retry the operation using '--force'.")
|
5744 |
raise errors.OpExecError("Disk consistency error") |
5745 |
|
5746 |
|
5747 |
class LUInstanceDeactivateDisks(NoHooksLU): |
5748 |
"""Shutdown an instance's disks.
|
5749 |
|
5750 |
"""
|
5751 |
REQ_BGL = False
|
5752 |
|
5753 |
def ExpandNames(self): |
5754 |
self._ExpandAndLockInstance()
|
5755 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5756 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5757 |
|
5758 |
def DeclareLocks(self, level): |
5759 |
if level == locking.LEVEL_NODE:
|
5760 |
self._LockInstancesNodes()
|
5761 |
|
5762 |
def CheckPrereq(self): |
5763 |
"""Check prerequisites.
|
5764 |
|
5765 |
This checks that the instance is in the cluster.
|
5766 |
|
5767 |
"""
|
5768 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
5769 |
assert self.instance is not None, \ |
5770 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
5771 |
|
5772 |
def Exec(self, feedback_fn): |
5773 |
"""Deactivate the disks
|
5774 |
|
5775 |
"""
|
5776 |
instance = self.instance
|
5777 |
if self.op.force: |
5778 |
_ShutdownInstanceDisks(self, instance)
|
5779 |
else:
|
5780 |
_SafeShutdownInstanceDisks(self, instance)
|
5781 |
|
5782 |
|
5783 |
def _SafeShutdownInstanceDisks(lu, instance, disks=None): |
5784 |
"""Shutdown block devices of an instance.
|
5785 |
|
5786 |
This function checks if an instance is running, before calling
|
5787 |
_ShutdownInstanceDisks.
|
5788 |
|
5789 |
"""
|
5790 |
_CheckInstanceDown(lu, instance, "cannot shutdown disks")
|
5791 |
_ShutdownInstanceDisks(lu, instance, disks=disks) |
5792 |
|
5793 |
|
5794 |
def _ExpandCheckDisks(instance, disks): |
5795 |
"""Return the instance disks selected by the disks list
|
5796 |
|
5797 |
@type disks: list of L{objects.Disk} or None
|
5798 |
@param disks: selected disks
|
5799 |
@rtype: list of L{objects.Disk}
|
5800 |
@return: selected instance disks to act on
|
5801 |
|
5802 |
"""
|
5803 |
if disks is None: |
5804 |
return instance.disks
|
5805 |
else:
|
5806 |
if not set(disks).issubset(instance.disks): |
5807 |
raise errors.ProgrammerError("Can only act on disks belonging to the" |
5808 |
" target instance")
|
5809 |
return disks
|
5810 |
|
5811 |
|
5812 |
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False): |
5813 |
"""Shutdown block devices of an instance.
|
5814 |
|
5815 |
This does the shutdown on all nodes of the instance.
|
5816 |
|
5817 |
If the ignore_primary is false, errors on the primary node are
|
5818 |
ignored.
|
5819 |
|
5820 |
"""
|
5821 |
all_result = True
|
5822 |
disks = _ExpandCheckDisks(instance, disks) |
5823 |
|
5824 |
for disk in disks: |
5825 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
5826 |
lu.cfg.SetDiskID(top_disk, node) |
5827 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
5828 |
msg = result.fail_msg |
5829 |
if msg:
|
5830 |
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
|
5831 |
disk.iv_name, node, msg) |
5832 |
if ((node == instance.primary_node and not ignore_primary) or |
5833 |
(node != instance.primary_node and not result.offline)): |
5834 |
all_result = False
|
5835 |
return all_result
|
5836 |
|
5837 |
|
5838 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): |
5839 |
"""Checks if a node has enough free memory.
|
5840 |
|
5841 |
This function check if a given node has the needed amount of free
|
5842 |
memory. In case the node has less memory or we cannot get the
|
5843 |
information from the node, this function raise an OpPrereqError
|
5844 |
exception.
|
5845 |
|
5846 |
@type lu: C{LogicalUnit}
|
5847 |
@param lu: a logical unit from which we get configuration data
|
5848 |
@type node: C{str}
|
5849 |
@param node: the node to check
|
5850 |
@type reason: C{str}
|
5851 |
@param reason: string to use in the error message
|
5852 |
@type requested: C{int}
|
5853 |
@param requested: the amount of memory in MiB to check for
|
5854 |
@type hypervisor_name: C{str}
|
5855 |
@param hypervisor_name: the hypervisor to ask for memory stats
|
5856 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
5857 |
we cannot check the node
|
5858 |
|
5859 |
"""
|
5860 |
nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
|
5861 |
nodeinfo[node].Raise("Can't get data from node %s" % node,
|
5862 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
5863 |
free_mem = nodeinfo[node].payload.get("memory_free", None) |
5864 |
if not isinstance(free_mem, int): |
5865 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
5866 |
" was '%s'" % (node, free_mem),
|
5867 |
errors.ECODE_ENVIRON) |
5868 |
if requested > free_mem:
|
5869 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
5870 |
" needed %s MiB, available %s MiB" %
|
5871 |
(node, reason, requested, free_mem), |
5872 |
errors.ECODE_NORES) |
5873 |
|
5874 |
|
5875 |
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes): |
5876 |
"""Checks if nodes have enough free disk space in the all VGs.
|
5877 |
|
5878 |
This function check if all given nodes have the needed amount of
|
5879 |
free disk. In case any node has less disk or we cannot get the
|
5880 |
information from the node, this function raise an OpPrereqError
|
5881 |
exception.
|
5882 |
|
5883 |
@type lu: C{LogicalUnit}
|
5884 |
@param lu: a logical unit from which we get configuration data
|
5885 |
@type nodenames: C{list}
|
5886 |
@param nodenames: the list of node names to check
|
5887 |
@type req_sizes: C{dict}
|
5888 |
@param req_sizes: the hash of vg and corresponding amount of disk in
|
5889 |
MiB to check for
|
5890 |
@raise errors.OpPrereqError: if the node doesn't have enough disk,
|
5891 |
or we cannot check the node
|
5892 |
|
5893 |
"""
|
5894 |
for vg, req_size in req_sizes.items(): |
5895 |
_CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size) |
5896 |
|
5897 |
|
5898 |
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested): |
5899 |
"""Checks if nodes have enough free disk space in the specified VG.
|
5900 |
|
5901 |
This function check if all given nodes have the needed amount of
|
5902 |
free disk. In case any node has less disk or we cannot get the
|
5903 |
information from the node, this function raise an OpPrereqError
|
5904 |
exception.
|
5905 |
|
5906 |
@type lu: C{LogicalUnit}
|
5907 |
@param lu: a logical unit from which we get configuration data
|
5908 |
@type nodenames: C{list}
|
5909 |
@param nodenames: the list of node names to check
|
5910 |
@type vg: C{str}
|
5911 |
@param vg: the volume group to check
|
5912 |
@type requested: C{int}
|
5913 |
@param requested: the amount of disk in MiB to check for
|
5914 |
@raise errors.OpPrereqError: if the node doesn't have enough disk,
|
5915 |
or we cannot check the node
|
5916 |
|
5917 |
"""
|
5918 |
nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
|
5919 |
for node in nodenames: |
5920 |
info = nodeinfo[node] |
5921 |
info.Raise("Cannot get current information from node %s" % node,
|
5922 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
5923 |
vg_free = info.payload.get("vg_free", None) |
5924 |
if not isinstance(vg_free, int): |
5925 |
raise errors.OpPrereqError("Can't compute free disk space on node" |
5926 |
" %s for vg %s, result was '%s'" %
|
5927 |
(node, vg, vg_free), errors.ECODE_ENVIRON) |
5928 |
if requested > vg_free:
|
5929 |
raise errors.OpPrereqError("Not enough disk space on target node %s" |
5930 |
" vg %s: required %d MiB, available %d MiB" %
|
5931 |
(node, vg, requested, vg_free), |
5932 |
errors.ECODE_NORES) |
5933 |
|
5934 |
|
5935 |
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name): |
5936 |
"""Checks if nodes have enough physical CPUs
|
5937 |
|
5938 |
This function checks if all given nodes have the needed number of
|
5939 |
physical CPUs. In case any node has less CPUs or we cannot get the
|
5940 |
information from the node, this function raises an OpPrereqError
|
5941 |
exception.
|
5942 |
|
5943 |
@type lu: C{LogicalUnit}
|
5944 |
@param lu: a logical unit from which we get configuration data
|
5945 |
@type nodenames: C{list}
|
5946 |
@param nodenames: the list of node names to check
|
5947 |
@type requested: C{int}
|
5948 |
@param requested: the minimum acceptable number of physical CPUs
|
5949 |
@raise errors.OpPrereqError: if the node doesn't have enough CPUs,
|
5950 |
or we cannot check the node
|
5951 |
|
5952 |
"""
|
5953 |
nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
|
5954 |
for node in nodenames: |
5955 |
info = nodeinfo[node] |
5956 |
info.Raise("Cannot get current information from node %s" % node,
|
5957 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
5958 |
num_cpus = info.payload.get("cpu_total", None) |
5959 |
if not isinstance(num_cpus, int): |
5960 |
raise errors.OpPrereqError("Can't compute the number of physical CPUs" |
5961 |
" on node %s, result was '%s'" %
|
5962 |
(node, num_cpus), errors.ECODE_ENVIRON) |
5963 |
if requested > num_cpus:
|
5964 |
raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " |
5965 |
"required" % (node, num_cpus, requested),
|
5966 |
errors.ECODE_NORES) |
5967 |
|
5968 |
|
5969 |
class LUInstanceStartup(LogicalUnit): |
5970 |
"""Starts an instance.
|
5971 |
|
5972 |
"""
|
5973 |
HPATH = "instance-start"
|
5974 |
HTYPE = constants.HTYPE_INSTANCE |
5975 |
REQ_BGL = False
|
5976 |
|
5977 |
def CheckArguments(self): |
5978 |
# extra beparams
|
5979 |
if self.op.beparams: |
5980 |
# fill the beparams dict
|
5981 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
5982 |
|
5983 |
def ExpandNames(self): |
5984 |
self._ExpandAndLockInstance()
|
5985 |
|
5986 |
def BuildHooksEnv(self): |
5987 |
"""Build hooks env.
|
5988 |
|
5989 |
This runs on master, primary and secondary nodes of the instance.
|
5990 |
|
5991 |
"""
|
5992 |
env = { |
5993 |
"FORCE": self.op.force, |
5994 |
} |
5995 |
|
5996 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
5997 |
|
5998 |
return env
|
5999 |
|
6000 |
def BuildHooksNodes(self): |
6001 |
"""Build hooks nodes.
|
6002 |
|
6003 |
"""
|
6004 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6005 |
return (nl, nl)
|
6006 |
|
6007 |
def CheckPrereq(self): |
6008 |
"""Check prerequisites.
|
6009 |
|
6010 |
This checks that the instance is in the cluster.
|
6011 |
|
6012 |
"""
|
6013 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6014 |
assert self.instance is not None, \ |
6015 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6016 |
|
6017 |
# extra hvparams
|
6018 |
if self.op.hvparams: |
6019 |
# check hypervisor parameter syntax (locally)
|
6020 |
cluster = self.cfg.GetClusterInfo()
|
6021 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
|
6022 |
filled_hvp = cluster.FillHV(instance) |
6023 |
filled_hvp.update(self.op.hvparams)
|
6024 |
hv_type = hypervisor.GetHypervisor(instance.hypervisor) |
6025 |
hv_type.CheckParameterSyntax(filled_hvp) |
6026 |
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
|
6027 |
|
6028 |
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline |
6029 |
|
6030 |
if self.primary_offline and self.op.ignore_offline_nodes: |
6031 |
self.proc.LogWarning("Ignoring offline primary node") |
6032 |
|
6033 |
if self.op.hvparams or self.op.beparams: |
6034 |
self.proc.LogWarning("Overridden parameters are ignored") |
6035 |
else:
|
6036 |
_CheckNodeOnline(self, instance.primary_node)
|
6037 |
|
6038 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
6039 |
|
6040 |
# check bridges existence
|
6041 |
_CheckInstanceBridgesExist(self, instance)
|
6042 |
|
6043 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
6044 |
instance.name, |
6045 |
instance.hypervisor) |
6046 |
remote_info.Raise("Error checking node %s" % instance.primary_node,
|
6047 |
prereq=True, ecode=errors.ECODE_ENVIRON)
|
6048 |
if not remote_info.payload: # not running already |
6049 |
_CheckNodeFreeMemory(self, instance.primary_node,
|
6050 |
"starting instance %s" % instance.name,
|
6051 |
bep[constants.BE_MEMORY], instance.hypervisor) |
6052 |
|
6053 |
def Exec(self, feedback_fn): |
6054 |
"""Start the instance.
|
6055 |
|
6056 |
"""
|
6057 |
instance = self.instance
|
6058 |
force = self.op.force
|
6059 |
|
6060 |
if not self.op.no_remember: |
6061 |
self.cfg.MarkInstanceUp(instance.name)
|
6062 |
|
6063 |
if self.primary_offline: |
6064 |
assert self.op.ignore_offline_nodes |
6065 |
self.proc.LogInfo("Primary node offline, marked instance as started") |
6066 |
else:
|
6067 |
node_current = instance.primary_node |
6068 |
|
6069 |
_StartInstanceDisks(self, instance, force)
|
6070 |
|
6071 |
result = self.rpc.call_instance_start(node_current, instance,
|
6072 |
self.op.hvparams, self.op.beparams, |
6073 |
self.op.startup_paused)
|
6074 |
msg = result.fail_msg |
6075 |
if msg:
|
6076 |
_ShutdownInstanceDisks(self, instance)
|
6077 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
6078 |
|
6079 |
|
6080 |
class LUInstanceReboot(LogicalUnit): |
6081 |
"""Reboot an instance.
|
6082 |
|
6083 |
"""
|
6084 |
HPATH = "instance-reboot"
|
6085 |
HTYPE = constants.HTYPE_INSTANCE |
6086 |
REQ_BGL = False
|
6087 |
|
6088 |
def ExpandNames(self): |
6089 |
self._ExpandAndLockInstance()
|
6090 |
|
6091 |
def BuildHooksEnv(self): |
6092 |
"""Build hooks env.
|
6093 |
|
6094 |
This runs on master, primary and secondary nodes of the instance.
|
6095 |
|
6096 |
"""
|
6097 |
env = { |
6098 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
6099 |
"REBOOT_TYPE": self.op.reboot_type, |
6100 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
6101 |
} |
6102 |
|
6103 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
6104 |
|
6105 |
return env
|
6106 |
|
6107 |
def BuildHooksNodes(self): |
6108 |
"""Build hooks nodes.
|
6109 |
|
6110 |
"""
|
6111 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6112 |
return (nl, nl)
|
6113 |
|
6114 |
def CheckPrereq(self): |
6115 |
"""Check prerequisites.
|
6116 |
|
6117 |
This checks that the instance is in the cluster.
|
6118 |
|
6119 |
"""
|
6120 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6121 |
assert self.instance is not None, \ |
6122 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6123 |
|
6124 |
_CheckNodeOnline(self, instance.primary_node)
|
6125 |
|
6126 |
# check bridges existence
|
6127 |
_CheckInstanceBridgesExist(self, instance)
|
6128 |
|
6129 |
def Exec(self, feedback_fn): |
6130 |
"""Reboot the instance.
|
6131 |
|
6132 |
"""
|
6133 |
instance = self.instance
|
6134 |
ignore_secondaries = self.op.ignore_secondaries
|
6135 |
reboot_type = self.op.reboot_type
|
6136 |
|
6137 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
6138 |
instance.name, |
6139 |
instance.hypervisor) |
6140 |
remote_info.Raise("Error checking node %s" % instance.primary_node)
|
6141 |
instance_running = bool(remote_info.payload)
|
6142 |
|
6143 |
node_current = instance.primary_node |
6144 |
|
6145 |
if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
6146 |
constants.INSTANCE_REBOOT_HARD]: |
6147 |
for disk in instance.disks: |
6148 |
self.cfg.SetDiskID(disk, node_current)
|
6149 |
result = self.rpc.call_instance_reboot(node_current, instance,
|
6150 |
reboot_type, |
6151 |
self.op.shutdown_timeout)
|
6152 |
result.Raise("Could not reboot instance")
|
6153 |
else:
|
6154 |
if instance_running:
|
6155 |
result = self.rpc.call_instance_shutdown(node_current, instance,
|
6156 |
self.op.shutdown_timeout)
|
6157 |
result.Raise("Could not shutdown instance for full reboot")
|
6158 |
_ShutdownInstanceDisks(self, instance)
|
6159 |
else:
|
6160 |
self.LogInfo("Instance %s was already stopped, starting now", |
6161 |
instance.name) |
6162 |
_StartInstanceDisks(self, instance, ignore_secondaries)
|
6163 |
result = self.rpc.call_instance_start(node_current, instance,
|
6164 |
None, None, False) |
6165 |
msg = result.fail_msg |
6166 |
if msg:
|
6167 |
_ShutdownInstanceDisks(self, instance)
|
6168 |
raise errors.OpExecError("Could not start instance for" |
6169 |
" full reboot: %s" % msg)
|
6170 |
|
6171 |
self.cfg.MarkInstanceUp(instance.name)
|
6172 |
|
6173 |
|
6174 |
class LUInstanceShutdown(LogicalUnit): |
6175 |
"""Shutdown an instance.
|
6176 |
|
6177 |
"""
|
6178 |
HPATH = "instance-stop"
|
6179 |
HTYPE = constants.HTYPE_INSTANCE |
6180 |
REQ_BGL = False
|
6181 |
|
6182 |
def ExpandNames(self): |
6183 |
self._ExpandAndLockInstance()
|
6184 |
|
6185 |
def BuildHooksEnv(self): |
6186 |
"""Build hooks env.
|
6187 |
|
6188 |
This runs on master, primary and secondary nodes of the instance.
|
6189 |
|
6190 |
"""
|
6191 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
6192 |
env["TIMEOUT"] = self.op.timeout |
6193 |
return env
|
6194 |
|
6195 |
def BuildHooksNodes(self): |
6196 |
"""Build hooks nodes.
|
6197 |
|
6198 |
"""
|
6199 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6200 |
return (nl, nl)
|
6201 |
|
6202 |
def CheckPrereq(self): |
6203 |
"""Check prerequisites.
|
6204 |
|
6205 |
This checks that the instance is in the cluster.
|
6206 |
|
6207 |
"""
|
6208 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6209 |
assert self.instance is not None, \ |
6210 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6211 |
|
6212 |
self.primary_offline = \
|
6213 |
self.cfg.GetNodeInfo(self.instance.primary_node).offline |
6214 |
|
6215 |
if self.primary_offline and self.op.ignore_offline_nodes: |
6216 |
self.proc.LogWarning("Ignoring offline primary node") |
6217 |
else:
|
6218 |
_CheckNodeOnline(self, self.instance.primary_node) |
6219 |
|
6220 |
def Exec(self, feedback_fn): |
6221 |
"""Shutdown the instance.
|
6222 |
|
6223 |
"""
|
6224 |
instance = self.instance
|
6225 |
node_current = instance.primary_node |
6226 |
timeout = self.op.timeout
|
6227 |
|
6228 |
if not self.op.no_remember: |
6229 |
self.cfg.MarkInstanceDown(instance.name)
|
6230 |
|
6231 |
if self.primary_offline: |
6232 |
assert self.op.ignore_offline_nodes |
6233 |
self.proc.LogInfo("Primary node offline, marked instance as stopped") |
6234 |
else:
|
6235 |
result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
|
6236 |
msg = result.fail_msg |
6237 |
if msg:
|
6238 |
self.proc.LogWarning("Could not shutdown instance: %s" % msg) |
6239 |
|
6240 |
_ShutdownInstanceDisks(self, instance)
|
6241 |
|
6242 |
|
6243 |
class LUInstanceReinstall(LogicalUnit): |
6244 |
"""Reinstall an instance.
|
6245 |
|
6246 |
"""
|
6247 |
HPATH = "instance-reinstall"
|
6248 |
HTYPE = constants.HTYPE_INSTANCE |
6249 |
REQ_BGL = False
|
6250 |
|
6251 |
def ExpandNames(self): |
6252 |
self._ExpandAndLockInstance()
|
6253 |
|
6254 |
def BuildHooksEnv(self): |
6255 |
"""Build hooks env.
|
6256 |
|
6257 |
This runs on master, primary and secondary nodes of the instance.
|
6258 |
|
6259 |
"""
|
6260 |
return _BuildInstanceHookEnvByObject(self, self.instance) |
6261 |
|
6262 |
def BuildHooksNodes(self): |
6263 |
"""Build hooks nodes.
|
6264 |
|
6265 |
"""
|
6266 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6267 |
return (nl, nl)
|
6268 |
|
6269 |
def CheckPrereq(self): |
6270 |
"""Check prerequisites.
|
6271 |
|
6272 |
This checks that the instance is in the cluster and is not running.
|
6273 |
|
6274 |
"""
|
6275 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6276 |
assert instance is not None, \ |
6277 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6278 |
_CheckNodeOnline(self, instance.primary_node, "Instance primary node" |
6279 |
" offline, cannot reinstall")
|
6280 |
for node in instance.secondary_nodes: |
6281 |
_CheckNodeOnline(self, node, "Instance secondary node offline," |
6282 |
" cannot reinstall")
|
6283 |
|
6284 |
if instance.disk_template == constants.DT_DISKLESS:
|
6285 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
6286 |
self.op.instance_name,
|
6287 |
errors.ECODE_INVAL) |
6288 |
_CheckInstanceDown(self, instance, "cannot reinstall") |
6289 |
|
6290 |
if self.op.os_type is not None: |
6291 |
# OS verification
|
6292 |
pnode = _ExpandNodeName(self.cfg, instance.primary_node)
|
6293 |
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant) |
6294 |
instance_os = self.op.os_type
|
6295 |
else:
|
6296 |
instance_os = instance.os |
6297 |
|
6298 |
nodelist = list(instance.all_nodes)
|
6299 |
|
6300 |
if self.op.osparams: |
6301 |
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
|
6302 |
_CheckOSParams(self, True, nodelist, instance_os, i_osdict) |
6303 |
self.os_inst = i_osdict # the new dict (without defaults) |
6304 |
else:
|
6305 |
self.os_inst = None |
6306 |
|
6307 |
self.instance = instance
|
6308 |
|
6309 |
def Exec(self, feedback_fn): |
6310 |
"""Reinstall the instance.
|
6311 |
|
6312 |
"""
|
6313 |
inst = self.instance
|
6314 |
|
6315 |
if self.op.os_type is not None: |
6316 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
6317 |
inst.os = self.op.os_type
|
6318 |
# Write to configuration
|
6319 |
self.cfg.Update(inst, feedback_fn)
|
6320 |
|
6321 |
_StartInstanceDisks(self, inst, None) |
6322 |
try:
|
6323 |
feedback_fn("Running the instance OS create scripts...")
|
6324 |
# FIXME: pass debug option from opcode to backend
|
6325 |
result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, |
6326 |
self.op.debug_level,
|
6327 |
osparams=self.os_inst)
|
6328 |
result.Raise("Could not install OS for instance %s on node %s" %
|
6329 |
(inst.name, inst.primary_node)) |
6330 |
finally:
|
6331 |
_ShutdownInstanceDisks(self, inst)
|
6332 |
|
6333 |
|
6334 |
class LUInstanceRecreateDisks(LogicalUnit): |
6335 |
"""Recreate an instance's missing disks.
|
6336 |
|
6337 |
"""
|
6338 |
HPATH = "instance-recreate-disks"
|
6339 |
HTYPE = constants.HTYPE_INSTANCE |
6340 |
REQ_BGL = False
|
6341 |
|
6342 |
def CheckArguments(self): |
6343 |
# normalise the disk list
|
6344 |
self.op.disks = sorted(frozenset(self.op.disks)) |
6345 |
|
6346 |
def ExpandNames(self): |
6347 |
self._ExpandAndLockInstance()
|
6348 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
6349 |
if self.op.nodes: |
6350 |
self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes] |
6351 |
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes) |
6352 |
else:
|
6353 |
self.needed_locks[locking.LEVEL_NODE] = []
|
6354 |
|
6355 |
def DeclareLocks(self, level): |
6356 |
if level == locking.LEVEL_NODE:
|
6357 |
# if we replace the nodes, we only need to lock the old primary,
|
6358 |
# otherwise we need to lock all nodes for disk re-creation
|
6359 |
primary_only = bool(self.op.nodes) |
6360 |
self._LockInstancesNodes(primary_only=primary_only)
|
6361 |
|
6362 |
def BuildHooksEnv(self): |
6363 |
"""Build hooks env.
|
6364 |
|
6365 |
This runs on master, primary and secondary nodes of the instance.
|
6366 |
|
6367 |
"""
|
6368 |
return _BuildInstanceHookEnvByObject(self, self.instance) |
6369 |
|
6370 |
def BuildHooksNodes(self): |
6371 |
"""Build hooks nodes.
|
6372 |
|
6373 |
"""
|
6374 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6375 |
return (nl, nl)
|
6376 |
|
6377 |
def CheckPrereq(self): |
6378 |
"""Check prerequisites.
|
6379 |
|
6380 |
This checks that the instance is in the cluster and is not running.
|
6381 |
|
6382 |
"""
|
6383 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6384 |
assert instance is not None, \ |
6385 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6386 |
if self.op.nodes: |
6387 |
if len(self.op.nodes) != len(instance.all_nodes): |
6388 |
raise errors.OpPrereqError("Instance %s currently has %d nodes, but" |
6389 |
" %d replacement nodes were specified" %
|
6390 |
(instance.name, len(instance.all_nodes),
|
6391 |
len(self.op.nodes)), |
6392 |
errors.ECODE_INVAL) |
6393 |
assert instance.disk_template != constants.DT_DRBD8 or \ |
6394 |
len(self.op.nodes) == 2 |
6395 |
assert instance.disk_template != constants.DT_PLAIN or \ |
6396 |
len(self.op.nodes) == 1 |
6397 |
primary_node = self.op.nodes[0] |
6398 |
else:
|
6399 |
primary_node = instance.primary_node |
6400 |
_CheckNodeOnline(self, primary_node)
|
6401 |
|
6402 |
if instance.disk_template == constants.DT_DISKLESS:
|
6403 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
6404 |
self.op.instance_name, errors.ECODE_INVAL)
|
6405 |
# if we replace nodes *and* the old primary is offline, we don't
|
6406 |
# check
|
6407 |
assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE] |
6408 |
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
|
6409 |
if not (self.op.nodes and old_pnode.offline): |
6410 |
_CheckInstanceDown(self, instance, "cannot recreate disks") |
6411 |
|
6412 |
if not self.op.disks: |
6413 |
self.op.disks = range(len(instance.disks)) |
6414 |
else:
|
6415 |
for idx in self.op.disks: |
6416 |
if idx >= len(instance.disks): |
6417 |
raise errors.OpPrereqError("Invalid disk index '%s'" % idx, |
6418 |
errors.ECODE_INVAL) |
6419 |
if self.op.disks != range(len(instance.disks)) and self.op.nodes: |
6420 |
raise errors.OpPrereqError("Can't recreate disks partially and" |
6421 |
" change the nodes at the same time",
|
6422 |
errors.ECODE_INVAL) |
6423 |
self.instance = instance
|
6424 |
|
6425 |
def Exec(self, feedback_fn): |
6426 |
"""Recreate the disks.
|
6427 |
|
6428 |
"""
|
6429 |
instance = self.instance
|
6430 |
|
6431 |
to_skip = [] |
6432 |
mods = [] # keeps track of needed logical_id changes
|
6433 |
|
6434 |
for idx, disk in enumerate(instance.disks): |
6435 |
if idx not in self.op.disks: # disk idx has not been passed in |
6436 |
to_skip.append(idx) |
6437 |
continue
|
6438 |
# update secondaries for disks, if needed
|
6439 |
if self.op.nodes: |
6440 |
if disk.dev_type == constants.LD_DRBD8:
|
6441 |
# need to update the nodes and minors
|
6442 |
assert len(self.op.nodes) == 2 |
6443 |
assert len(disk.logical_id) == 6 # otherwise disk internals |
6444 |
# have changed
|
6445 |
(_, _, old_port, _, _, old_secret) = disk.logical_id |
6446 |
new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name) |
6447 |
new_id = (self.op.nodes[0], self.op.nodes[1], old_port, |
6448 |
new_minors[0], new_minors[1], old_secret) |
6449 |
assert len(disk.logical_id) == len(new_id) |
6450 |
mods.append((idx, new_id)) |
6451 |
|
6452 |
# now that we have passed all asserts above, we can apply the mods
|
6453 |
# in a single run (to avoid partial changes)
|
6454 |
for idx, new_id in mods: |
6455 |
instance.disks[idx].logical_id = new_id |
6456 |
|
6457 |
# change primary node, if needed
|
6458 |
if self.op.nodes: |
6459 |
instance.primary_node = self.op.nodes[0] |
6460 |
self.LogWarning("Changing the instance's nodes, you will have to" |
6461 |
" remove any disks left on the older nodes manually")
|
6462 |
|
6463 |
if self.op.nodes: |
6464 |
self.cfg.Update(instance, feedback_fn)
|
6465 |
|
6466 |
_CreateDisks(self, instance, to_skip=to_skip)
|
6467 |
|
6468 |
|
6469 |
class LUInstanceRename(LogicalUnit): |
6470 |
"""Rename an instance.
|
6471 |
|
6472 |
"""
|
6473 |
HPATH = "instance-rename"
|
6474 |
HTYPE = constants.HTYPE_INSTANCE |
6475 |
|
6476 |
def CheckArguments(self): |
6477 |
"""Check arguments.
|
6478 |
|
6479 |
"""
|
6480 |
if self.op.ip_check and not self.op.name_check: |
6481 |
# TODO: make the ip check more flexible and not depend on the name check
|
6482 |
raise errors.OpPrereqError("IP address check requires a name check", |
6483 |
errors.ECODE_INVAL) |
6484 |
|
6485 |
def BuildHooksEnv(self): |
6486 |
"""Build hooks env.
|
6487 |
|
6488 |
This runs on master, primary and secondary nodes of the instance.
|
6489 |
|
6490 |
"""
|
6491 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
6492 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
6493 |
return env
|
6494 |
|
6495 |
def BuildHooksNodes(self): |
6496 |
"""Build hooks nodes.
|
6497 |
|
6498 |
"""
|
6499 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
6500 |
return (nl, nl)
|
6501 |
|
6502 |
def CheckPrereq(self): |
6503 |
"""Check prerequisites.
|
6504 |
|
6505 |
This checks that the instance is in the cluster and is not running.
|
6506 |
|
6507 |
"""
|
6508 |
self.op.instance_name = _ExpandInstanceName(self.cfg, |
6509 |
self.op.instance_name)
|
6510 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6511 |
assert instance is not None |
6512 |
_CheckNodeOnline(self, instance.primary_node)
|
6513 |
_CheckInstanceDown(self, instance, "cannot rename") |
6514 |
self.instance = instance
|
6515 |
|
6516 |
new_name = self.op.new_name
|
6517 |
if self.op.name_check: |
6518 |
hostname = netutils.GetHostname(name=new_name) |
6519 |
if hostname != new_name:
|
6520 |
self.LogInfo("Resolved given name '%s' to '%s'", new_name, |
6521 |
hostname.name) |
6522 |
if not utils.MatchNameComponent(self.op.new_name, [hostname.name]): |
6523 |
raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" |
6524 |
" same as given hostname '%s'") %
|
6525 |
(hostname.name, self.op.new_name),
|
6526 |
errors.ECODE_INVAL) |
6527 |
new_name = self.op.new_name = hostname.name
|
6528 |
if (self.op.ip_check and |
6529 |
netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): |
6530 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
6531 |
(hostname.ip, new_name), |
6532 |
errors.ECODE_NOTUNIQUE) |
6533 |
|
6534 |
instance_list = self.cfg.GetInstanceList()
|
6535 |
if new_name in instance_list and new_name != instance.name: |
6536 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
6537 |
new_name, errors.ECODE_EXISTS) |
6538 |
|
6539 |
def Exec(self, feedback_fn): |
6540 |
"""Rename the instance.
|
6541 |
|
6542 |
"""
|
6543 |
inst = self.instance
|
6544 |
old_name = inst.name |
6545 |
|
6546 |
rename_file_storage = False
|
6547 |
if (inst.disk_template in constants.DTS_FILEBASED and |
6548 |
self.op.new_name != inst.name):
|
6549 |
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
6550 |
rename_file_storage = True
|
6551 |
|
6552 |
self.cfg.RenameInstance(inst.name, self.op.new_name) |
6553 |
# Change the instance lock. This is definitely safe while we hold the BGL.
|
6554 |
# Otherwise the new lock would have to be added in acquired mode.
|
6555 |
assert self.REQ_BGL |
6556 |
self.glm.remove(locking.LEVEL_INSTANCE, old_name)
|
6557 |
self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) |
6558 |
|
6559 |
# re-read the instance from the configuration after rename
|
6560 |
inst = self.cfg.GetInstanceInfo(self.op.new_name) |
6561 |
|
6562 |
if rename_file_storage:
|
6563 |
new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
6564 |
result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
|
6565 |
old_file_storage_dir, |
6566 |
new_file_storage_dir) |
6567 |
result.Raise("Could not rename on node %s directory '%s' to '%s'"
|
6568 |
" (but the instance has been renamed in Ganeti)" %
|
6569 |
(inst.primary_node, old_file_storage_dir, |
6570 |
new_file_storage_dir)) |
6571 |
|
6572 |
_StartInstanceDisks(self, inst, None) |
6573 |
try:
|
6574 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
|
6575 |
old_name, self.op.debug_level)
|
6576 |
msg = result.fail_msg |
6577 |
if msg:
|
6578 |
msg = ("Could not run OS rename script for instance %s on node %s"
|
6579 |
" (but the instance has been renamed in Ganeti): %s" %
|
6580 |
(inst.name, inst.primary_node, msg)) |
6581 |
self.proc.LogWarning(msg)
|
6582 |
finally:
|
6583 |
_ShutdownInstanceDisks(self, inst)
|
6584 |
|
6585 |
return inst.name
|
6586 |
|
6587 |
|
6588 |
class LUInstanceRemove(LogicalUnit): |
6589 |
"""Remove an instance.
|
6590 |
|
6591 |
"""
|
6592 |
HPATH = "instance-remove"
|
6593 |
HTYPE = constants.HTYPE_INSTANCE |
6594 |
REQ_BGL = False
|
6595 |
|
6596 |
def ExpandNames(self): |
6597 |
self._ExpandAndLockInstance()
|
6598 |
self.needed_locks[locking.LEVEL_NODE] = []
|
6599 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
6600 |
|
6601 |
def DeclareLocks(self, level): |
6602 |
if level == locking.LEVEL_NODE:
|
6603 |
self._LockInstancesNodes()
|
6604 |
|
6605 |
def BuildHooksEnv(self): |
6606 |
"""Build hooks env.
|
6607 |
|
6608 |
This runs on master, primary and secondary nodes of the instance.
|
6609 |
|
6610 |
"""
|
6611 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
6612 |
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout |
6613 |
return env
|
6614 |
|
6615 |
def BuildHooksNodes(self): |
6616 |
"""Build hooks nodes.
|
6617 |
|
6618 |
"""
|
6619 |
nl = [self.cfg.GetMasterNode()]
|
6620 |
nl_post = list(self.instance.all_nodes) + nl |
6621 |
return (nl, nl_post)
|
6622 |
|
6623 |
def CheckPrereq(self): |
6624 |
"""Check prerequisites.
|
6625 |
|
6626 |
This checks that the instance is in the cluster.
|
6627 |
|
6628 |
"""
|
6629 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6630 |
assert self.instance is not None, \ |
6631 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6632 |
|
6633 |
def Exec(self, feedback_fn): |
6634 |
"""Remove the instance.
|
6635 |
|
6636 |
"""
|
6637 |
instance = self.instance
|
6638 |
logging.info("Shutting down instance %s on node %s",
|
6639 |
instance.name, instance.primary_node) |
6640 |
|
6641 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
|
6642 |
self.op.shutdown_timeout)
|
6643 |
msg = result.fail_msg |
6644 |
if msg:
|
6645 |
if self.op.ignore_failures: |
6646 |
feedback_fn("Warning: can't shutdown instance: %s" % msg)
|
6647 |
else:
|
6648 |
raise errors.OpExecError("Could not shutdown instance %s on" |
6649 |
" node %s: %s" %
|
6650 |
(instance.name, instance.primary_node, msg)) |
6651 |
|
6652 |
_RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures) |
6653 |
|
6654 |
|
6655 |
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures): |
6656 |
"""Utility function to remove an instance.
|
6657 |
|
6658 |
"""
|
6659 |
logging.info("Removing block devices for instance %s", instance.name)
|
6660 |
|
6661 |
if not _RemoveDisks(lu, instance): |
6662 |
if not ignore_failures: |
6663 |
raise errors.OpExecError("Can't remove instance's disks") |
6664 |
feedback_fn("Warning: can't remove instance's disks")
|
6665 |
|
6666 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
6667 |
|
6668 |
lu.cfg.RemoveInstance(instance.name) |
6669 |
|
6670 |
assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \ |
6671 |
"Instance lock removal conflict"
|
6672 |
|
6673 |
# Remove lock for the instance
|
6674 |
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name |
6675 |
|
6676 |
|
6677 |
class LUInstanceQuery(NoHooksLU): |
6678 |
"""Logical unit for querying instances.
|
6679 |
|
6680 |
"""
|
6681 |
# pylint: disable=W0142
|
6682 |
REQ_BGL = False
|
6683 |
|
6684 |
def CheckArguments(self): |
6685 |
self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), |
6686 |
self.op.output_fields, self.op.use_locking) |
6687 |
|
6688 |
def ExpandNames(self): |
6689 |
self.iq.ExpandNames(self) |
6690 |
|
6691 |
def DeclareLocks(self, level): |
6692 |
self.iq.DeclareLocks(self, level) |
6693 |
|
6694 |
def Exec(self, feedback_fn): |
6695 |
return self.iq.OldStyleQuery(self) |
6696 |
|
6697 |
|
6698 |
class LUInstanceFailover(LogicalUnit): |
6699 |
"""Failover an instance.
|
6700 |
|
6701 |
"""
|
6702 |
HPATH = "instance-failover"
|
6703 |
HTYPE = constants.HTYPE_INSTANCE |
6704 |
REQ_BGL = False
|
6705 |
|
6706 |
def CheckArguments(self): |
6707 |
"""Check the arguments.
|
6708 |
|
6709 |
"""
|
6710 |
self.iallocator = getattr(self.op, "iallocator", None) |
6711 |
self.target_node = getattr(self.op, "target_node", None) |
6712 |
|
6713 |
def ExpandNames(self): |
6714 |
self._ExpandAndLockInstance()
|
6715 |
|
6716 |
if self.op.target_node is not None: |
6717 |
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
6718 |
|
6719 |
self.needed_locks[locking.LEVEL_NODE] = []
|
6720 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
6721 |
|
6722 |
ignore_consistency = self.op.ignore_consistency
|
6723 |
shutdown_timeout = self.op.shutdown_timeout
|
6724 |
self._migrater = TLMigrateInstance(self, self.op.instance_name, |
6725 |
cleanup=False,
|
6726 |
failover=True,
|
6727 |
ignore_consistency=ignore_consistency, |
6728 |
shutdown_timeout=shutdown_timeout) |
6729 |
self.tasklets = [self._migrater] |
6730 |
|
6731 |
def DeclareLocks(self, level): |
6732 |
if level == locking.LEVEL_NODE:
|
6733 |
instance = self.context.cfg.GetInstanceInfo(self.op.instance_name) |
6734 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
6735 |
if self.op.target_node is None: |
6736 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6737 |
else:
|
6738 |
self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
|
6739 |
self.op.target_node]
|
6740 |
del self.recalculate_locks[locking.LEVEL_NODE] |
6741 |
else:
|
6742 |
self._LockInstancesNodes()
|
6743 |
|
6744 |
def BuildHooksEnv(self): |
6745 |
"""Build hooks env.
|
6746 |
|
6747 |
This runs on master, primary and secondary nodes of the instance.
|
6748 |
|
6749 |
"""
|
6750 |
instance = self._migrater.instance
|
6751 |
source_node = instance.primary_node |
6752 |
target_node = self.op.target_node
|
6753 |
env = { |
6754 |
"IGNORE_CONSISTENCY": self.op.ignore_consistency, |
6755 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
6756 |
"OLD_PRIMARY": source_node,
|
6757 |
"NEW_PRIMARY": target_node,
|
6758 |
} |
6759 |
|
6760 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
6761 |
env["OLD_SECONDARY"] = instance.secondary_nodes[0] |
6762 |
env["NEW_SECONDARY"] = source_node
|
6763 |
else:
|
6764 |
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = "" |
6765 |
|
6766 |
env.update(_BuildInstanceHookEnvByObject(self, instance))
|
6767 |
|
6768 |
return env
|
6769 |
|
6770 |
def BuildHooksNodes(self): |
6771 |
"""Build hooks nodes.
|
6772 |
|
6773 |
"""
|
6774 |
instance = self._migrater.instance
|
6775 |
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) |
6776 |
return (nl, nl + [instance.primary_node])
|
6777 |
|
6778 |
|
6779 |
class LUInstanceMigrate(LogicalUnit): |
6780 |
"""Migrate an instance.
|
6781 |
|
6782 |
This is migration without shutting down, compared to the failover,
|
6783 |
which is done with shutdown.
|
6784 |
|
6785 |
"""
|
6786 |
HPATH = "instance-migrate"
|
6787 |
HTYPE = constants.HTYPE_INSTANCE |
6788 |
REQ_BGL = False
|
6789 |
|
6790 |
def ExpandNames(self): |
6791 |
self._ExpandAndLockInstance()
|
6792 |
|
6793 |
if self.op.target_node is not None: |
6794 |
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
6795 |
|
6796 |
self.needed_locks[locking.LEVEL_NODE] = []
|
6797 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
6798 |
|
6799 |
self._migrater = TLMigrateInstance(self, self.op.instance_name, |
6800 |
cleanup=self.op.cleanup,
|
6801 |
failover=False,
|
6802 |
fallback=self.op.allow_failover)
|
6803 |
self.tasklets = [self._migrater] |
6804 |
|
6805 |
def DeclareLocks(self, level): |
6806 |
if level == locking.LEVEL_NODE:
|
6807 |
instance = self.context.cfg.GetInstanceInfo(self.op.instance_name) |
6808 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
6809 |
if self.op.target_node is None: |
6810 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6811 |
else:
|
6812 |
self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
|
6813 |
self.op.target_node]
|
6814 |
del self.recalculate_locks[locking.LEVEL_NODE] |
6815 |
else:
|
6816 |
self._LockInstancesNodes()
|
6817 |
|
6818 |
def BuildHooksEnv(self): |
6819 |
"""Build hooks env.
|
6820 |
|
6821 |
This runs on master, primary and secondary nodes of the instance.
|
6822 |
|
6823 |
"""
|
6824 |
instance = self._migrater.instance
|
6825 |
source_node = instance.primary_node |
6826 |
target_node = self.op.target_node
|
6827 |
env = _BuildInstanceHookEnvByObject(self, instance)
|
6828 |
env.update({ |
6829 |
"MIGRATE_LIVE": self._migrater.live, |
6830 |
"MIGRATE_CLEANUP": self.op.cleanup, |
6831 |
"OLD_PRIMARY": source_node,
|
6832 |
"NEW_PRIMARY": target_node,
|
6833 |
}) |
6834 |
|
6835 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
6836 |
env["OLD_SECONDARY"] = target_node
|
6837 |
env["NEW_SECONDARY"] = source_node
|
6838 |
else:
|
6839 |
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None |
6840 |
|
6841 |
return env
|
6842 |
|
6843 |
def BuildHooksNodes(self): |
6844 |
"""Build hooks nodes.
|
6845 |
|
6846 |
"""
|
6847 |
instance = self._migrater.instance
|
6848 |
nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes) |
6849 |
return (nl, nl + [instance.primary_node])
|
6850 |
|
6851 |
|
6852 |
class LUInstanceMove(LogicalUnit): |
6853 |
"""Move an instance by data-copying.
|
6854 |
|
6855 |
"""
|
6856 |
HPATH = "instance-move"
|
6857 |
HTYPE = constants.HTYPE_INSTANCE |
6858 |
REQ_BGL = False
|
6859 |
|
6860 |
def ExpandNames(self): |
6861 |
self._ExpandAndLockInstance()
|
6862 |
target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
6863 |
self.op.target_node = target_node
|
6864 |
self.needed_locks[locking.LEVEL_NODE] = [target_node]
|
6865 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
6866 |
|
6867 |
def DeclareLocks(self, level): |
6868 |
if level == locking.LEVEL_NODE:
|
6869 |
self._LockInstancesNodes(primary_only=True) |
6870 |
|
6871 |
def BuildHooksEnv(self): |
6872 |
"""Build hooks env.
|
6873 |
|
6874 |
This runs on master, primary and secondary nodes of the instance.
|
6875 |
|
6876 |
"""
|
6877 |
env = { |
6878 |
"TARGET_NODE": self.op.target_node, |
6879 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
6880 |
} |
6881 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
6882 |
return env
|
6883 |
|
6884 |
def BuildHooksNodes(self): |
6885 |
"""Build hooks nodes.
|
6886 |
|
6887 |
"""
|
6888 |
nl = [ |
6889 |
self.cfg.GetMasterNode(),
|
6890 |
self.instance.primary_node,
|
6891 |
self.op.target_node,
|
6892 |
] |
6893 |
return (nl, nl)
|
6894 |
|
6895 |
def CheckPrereq(self): |
6896 |
"""Check prerequisites.
|
6897 |
|
6898 |
This checks that the instance is in the cluster.
|
6899 |
|
6900 |
"""
|
6901 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
6902 |
assert self.instance is not None, \ |
6903 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6904 |
|
6905 |
node = self.cfg.GetNodeInfo(self.op.target_node) |
6906 |
assert node is not None, \ |
6907 |
"Cannot retrieve locked node %s" % self.op.target_node |
6908 |
|
6909 |
self.target_node = target_node = node.name
|
6910 |
|
6911 |
if target_node == instance.primary_node:
|
6912 |
raise errors.OpPrereqError("Instance %s is already on the node %s" % |
6913 |
(instance.name, target_node), |
6914 |
errors.ECODE_STATE) |
6915 |
|
6916 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
6917 |
|
6918 |
for idx, dsk in enumerate(instance.disks): |
6919 |
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE): |
6920 |
raise errors.OpPrereqError("Instance disk %d has a complex layout," |
6921 |
" cannot copy" % idx, errors.ECODE_STATE)
|
6922 |
|
6923 |
_CheckNodeOnline(self, target_node)
|
6924 |
_CheckNodeNotDrained(self, target_node)
|
6925 |
_CheckNodeVmCapable(self, target_node)
|
6926 |
|
6927 |
if instance.admin_up:
|
6928 |
# check memory requirements on the secondary node
|
6929 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
6930 |
instance.name, bep[constants.BE_MEMORY], |
6931 |
instance.hypervisor) |
6932 |
else:
|
6933 |
self.LogInfo("Not checking memory on the secondary node as" |
6934 |
" instance will not be started")
|
6935 |
|
6936 |
# check bridge existance
|
6937 |
_CheckInstanceBridgesExist(self, instance, node=target_node)
|
6938 |
|
6939 |
def Exec(self, feedback_fn): |
6940 |
"""Move an instance.
|
6941 |
|
6942 |
The move is done by shutting it down on its present node, copying
|
6943 |
the data over (slow) and starting it on the new node.
|
6944 |
|
6945 |
"""
|
6946 |
instance = self.instance
|
6947 |
|
6948 |
source_node = instance.primary_node |
6949 |
target_node = self.target_node
|
6950 |
|
6951 |
self.LogInfo("Shutting down instance %s on source node %s", |
6952 |
instance.name, source_node) |
6953 |
|
6954 |
result = self.rpc.call_instance_shutdown(source_node, instance,
|
6955 |
self.op.shutdown_timeout)
|
6956 |
msg = result.fail_msg |
6957 |
if msg:
|
6958 |
if self.op.ignore_consistency: |
6959 |
self.proc.LogWarning("Could not shutdown instance %s on node %s." |
6960 |
" Proceeding anyway. Please make sure node"
|
6961 |
" %s is down. Error details: %s",
|
6962 |
instance.name, source_node, source_node, msg) |
6963 |
else:
|
6964 |
raise errors.OpExecError("Could not shutdown instance %s on" |
6965 |
" node %s: %s" %
|
6966 |
(instance.name, source_node, msg)) |
6967 |
|
6968 |
# create the target disks
|
6969 |
try:
|
6970 |
_CreateDisks(self, instance, target_node=target_node)
|
6971 |
except errors.OpExecError:
|
6972 |
self.LogWarning("Device creation failed, reverting...") |
6973 |
try:
|
6974 |
_RemoveDisks(self, instance, target_node=target_node)
|
6975 |
finally:
|
6976 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
6977 |
raise
|
6978 |
|
6979 |
cluster_name = self.cfg.GetClusterInfo().cluster_name
|
6980 |
|
6981 |
errs = [] |
6982 |
# activate, get path, copy the data over
|
6983 |
for idx, disk in enumerate(instance.disks): |
6984 |
self.LogInfo("Copying data for disk %d", idx) |
6985 |
result = self.rpc.call_blockdev_assemble(target_node, disk,
|
6986 |
instance.name, True, idx)
|
6987 |
if result.fail_msg:
|
6988 |
self.LogWarning("Can't assemble newly created disk %d: %s", |
6989 |
idx, result.fail_msg) |
6990 |
errs.append(result.fail_msg) |
6991 |
break
|
6992 |
dev_path = result.payload |
6993 |
result = self.rpc.call_blockdev_export(source_node, disk,
|
6994 |
target_node, dev_path, |
6995 |
cluster_name) |
6996 |
if result.fail_msg:
|
6997 |
self.LogWarning("Can't copy data over for disk %d: %s", |
6998 |
idx, result.fail_msg) |
6999 |
errs.append(result.fail_msg) |
7000 |
break
|
7001 |
|
7002 |
if errs:
|
7003 |
self.LogWarning("Some disks failed to copy, aborting") |
7004 |
try:
|
7005 |
_RemoveDisks(self, instance, target_node=target_node)
|
7006 |
finally:
|
7007 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
7008 |
raise errors.OpExecError("Errors during disk copy: %s" % |
7009 |
(",".join(errs),))
|
7010 |
|
7011 |
instance.primary_node = target_node |
7012 |
self.cfg.Update(instance, feedback_fn)
|
7013 |
|
7014 |
self.LogInfo("Removing the disks on the original node") |
7015 |
_RemoveDisks(self, instance, target_node=source_node)
|
7016 |
|
7017 |
# Only start the instance if it's marked as up
|
7018 |
if instance.admin_up:
|
7019 |
self.LogInfo("Starting instance %s on node %s", |
7020 |
instance.name, target_node) |
7021 |
|
7022 |
disks_ok, _ = _AssembleInstanceDisks(self, instance,
|
7023 |
ignore_secondaries=True)
|
7024 |
if not disks_ok: |
7025 |
_ShutdownInstanceDisks(self, instance)
|
7026 |
raise errors.OpExecError("Can't activate the instance's disks") |
7027 |
|
7028 |
result = self.rpc.call_instance_start(target_node, instance,
|
7029 |
None, None, False) |
7030 |
msg = result.fail_msg |
7031 |
if msg:
|
7032 |
_ShutdownInstanceDisks(self, instance)
|
7033 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
7034 |
(instance.name, target_node, msg)) |
7035 |
|
7036 |
|
7037 |
class LUNodeMigrate(LogicalUnit): |
7038 |
"""Migrate all instances from a node.
|
7039 |
|
7040 |
"""
|
7041 |
HPATH = "node-migrate"
|
7042 |
HTYPE = constants.HTYPE_NODE |
7043 |
REQ_BGL = False
|
7044 |
|
7045 |
def CheckArguments(self): |
7046 |
pass
|
7047 |
|
7048 |
def ExpandNames(self): |
7049 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
7050 |
|
7051 |
self.share_locks = _ShareAll()
|
7052 |
self.needed_locks = {
|
7053 |
locking.LEVEL_NODE: [self.op.node_name],
|
7054 |
} |
7055 |
|
7056 |
def BuildHooksEnv(self): |
7057 |
"""Build hooks env.
|
7058 |
|
7059 |
This runs on the master, the primary and all the secondaries.
|
7060 |
|
7061 |
"""
|
7062 |
return {
|
7063 |
"NODE_NAME": self.op.node_name, |
7064 |
} |
7065 |
|
7066 |
def BuildHooksNodes(self): |
7067 |
"""Build hooks nodes.
|
7068 |
|
7069 |
"""
|
7070 |
nl = [self.cfg.GetMasterNode()]
|
7071 |
return (nl, nl)
|
7072 |
|
7073 |
def CheckPrereq(self): |
7074 |
pass
|
7075 |
|
7076 |
def Exec(self, feedback_fn): |
7077 |
# Prepare jobs for migration instances
|
7078 |
jobs = [ |
7079 |
[opcodes.OpInstanceMigrate(instance_name=inst.name, |
7080 |
mode=self.op.mode,
|
7081 |
live=self.op.live,
|
7082 |
iallocator=self.op.iallocator,
|
7083 |
target_node=self.op.target_node)]
|
7084 |
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name) |
7085 |
] |
7086 |
|
7087 |
# TODO: Run iallocator in this opcode and pass correct placement options to
|
7088 |
# OpInstanceMigrate. Since other jobs can modify the cluster between
|
7089 |
# running the iallocator and the actual migration, a good consistency model
|
7090 |
# will have to be found.
|
7091 |
|
7092 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) == |
7093 |
frozenset([self.op.node_name])) |
7094 |
|
7095 |
return ResultWithJobs(jobs)
|
7096 |
|
7097 |
|
7098 |
class TLMigrateInstance(Tasklet): |
7099 |
"""Tasklet class for instance migration.
|
7100 |
|
7101 |
@type live: boolean
|
7102 |
@ivar live: whether the migration will be done live or non-live;
|
7103 |
this variable is initalized only after CheckPrereq has run
|
7104 |
@type cleanup: boolean
|
7105 |
@ivar cleanup: Wheater we cleanup from a failed migration
|
7106 |
@type iallocator: string
|
7107 |
@ivar iallocator: The iallocator used to determine target_node
|
7108 |
@type target_node: string
|
7109 |
@ivar target_node: If given, the target_node to reallocate the instance to
|
7110 |
@type failover: boolean
|
7111 |
@ivar failover: Whether operation results in failover or migration
|
7112 |
@type fallback: boolean
|
7113 |
@ivar fallback: Whether fallback to failover is allowed if migration not
|
7114 |
possible
|
7115 |
@type ignore_consistency: boolean
|
7116 |
@ivar ignore_consistency: Wheter we should ignore consistency between source
|
7117 |
and target node
|
7118 |
@type shutdown_timeout: int
|
7119 |
@ivar shutdown_timeout: In case of failover timeout of the shutdown
|
7120 |
|
7121 |
"""
|
7122 |
|
7123 |
# Constants
|
7124 |
_MIGRATION_POLL_INTERVAL = 1 # seconds |
7125 |
_MIGRATION_FEEDBACK_INTERVAL = 10 # seconds |
7126 |
|
7127 |
def __init__(self, lu, instance_name, cleanup=False, |
7128 |
failover=False, fallback=False, |
7129 |
ignore_consistency=False,
|
7130 |
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT): |
7131 |
"""Initializes this class.
|
7132 |
|
7133 |
"""
|
7134 |
Tasklet.__init__(self, lu)
|
7135 |
|
7136 |
# Parameters
|
7137 |
self.instance_name = instance_name
|
7138 |
self.cleanup = cleanup
|
7139 |
self.live = False # will be overridden later |
7140 |
self.failover = failover
|
7141 |
self.fallback = fallback
|
7142 |
self.ignore_consistency = ignore_consistency
|
7143 |
self.shutdown_timeout = shutdown_timeout
|
7144 |
|
7145 |
def CheckPrereq(self): |
7146 |
"""Check prerequisites.
|
7147 |
|
7148 |
This checks that the instance is in the cluster.
|
7149 |
|
7150 |
"""
|
7151 |
instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) |
7152 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
7153 |
assert instance is not None |
7154 |
self.instance = instance
|
7155 |
|
7156 |
if (not self.cleanup and not instance.admin_up and not self.failover and |
7157 |
self.fallback):
|
7158 |
self.lu.LogInfo("Instance is marked down, fallback allowed, switching" |
7159 |
" to failover")
|
7160 |
self.failover = True |
7161 |
|
7162 |
if instance.disk_template not in constants.DTS_MIRRORED: |
7163 |
if self.failover: |
7164 |
text = "failovers"
|
7165 |
else:
|
7166 |
text = "migrations"
|
7167 |
raise errors.OpPrereqError("Instance's disk layout '%s' does not allow" |
7168 |
" %s" % (instance.disk_template, text),
|
7169 |
errors.ECODE_STATE) |
7170 |
|
7171 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
7172 |
_CheckIAllocatorOrNode(self.lu, "iallocator", "target_node") |
7173 |
|
7174 |
if self.lu.op.iallocator: |
7175 |
self._RunAllocator()
|
7176 |
else:
|
7177 |
# We set set self.target_node as it is required by
|
7178 |
# BuildHooksEnv
|
7179 |
self.target_node = self.lu.op.target_node |
7180 |
|
7181 |
# self.target_node is already populated, either directly or by the
|
7182 |
# iallocator run
|
7183 |
target_node = self.target_node
|
7184 |
if self.target_node == instance.primary_node: |
7185 |
raise errors.OpPrereqError("Cannot migrate instance %s" |
7186 |
" to its primary (%s)" %
|
7187 |
(instance.name, instance.primary_node)) |
7188 |
|
7189 |
if len(self.lu.tasklets) == 1: |
7190 |
# It is safe to release locks only when we're the only tasklet
|
7191 |
# in the LU
|
7192 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE,
|
7193 |
keep=[instance.primary_node, self.target_node])
|
7194 |
|
7195 |
else:
|
7196 |
secondary_nodes = instance.secondary_nodes |
7197 |
if not secondary_nodes: |
7198 |
raise errors.ConfigurationError("No secondary node but using" |
7199 |
" %s disk template" %
|
7200 |
instance.disk_template) |
7201 |
target_node = secondary_nodes[0]
|
7202 |
if self.lu.op.iallocator or (self.lu.op.target_node and |
7203 |
self.lu.op.target_node != target_node):
|
7204 |
if self.failover: |
7205 |
text = "failed over"
|
7206 |
else:
|
7207 |
text = "migrated"
|
7208 |
raise errors.OpPrereqError("Instances with disk template %s cannot" |
7209 |
" be %s to arbitrary nodes"
|
7210 |
" (neither an iallocator nor a target"
|
7211 |
" node can be passed)" %
|
7212 |
(instance.disk_template, text), |
7213 |
errors.ECODE_INVAL) |
7214 |
|
7215 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
7216 |
|
7217 |
# check memory requirements on the secondary node
|
7218 |
if not self.failover or instance.admin_up: |
7219 |
_CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" % |
7220 |
instance.name, i_be[constants.BE_MEMORY], |
7221 |
instance.hypervisor) |
7222 |
else:
|
7223 |
self.lu.LogInfo("Not checking memory on the secondary node as" |
7224 |
" instance will not be started")
|
7225 |
|
7226 |
# check bridge existance
|
7227 |
_CheckInstanceBridgesExist(self.lu, instance, node=target_node)
|
7228 |
|
7229 |
if not self.cleanup: |
7230 |
_CheckNodeNotDrained(self.lu, target_node)
|
7231 |
if not self.failover: |
7232 |
result = self.rpc.call_instance_migratable(instance.primary_node,
|
7233 |
instance) |
7234 |
if result.fail_msg and self.fallback: |
7235 |
self.lu.LogInfo("Can't migrate, instance offline, fallback to" |
7236 |
" failover")
|
7237 |
self.failover = True |
7238 |
else:
|
7239 |
result.Raise("Can't migrate, please use failover",
|
7240 |
prereq=True, ecode=errors.ECODE_STATE)
|
7241 |
|
7242 |
assert not (self.failover and self.cleanup) |
7243 |
|
7244 |
if not self.failover: |
7245 |
if self.lu.op.live is not None and self.lu.op.mode is not None: |
7246 |
raise errors.OpPrereqError("Only one of the 'live' and 'mode'" |
7247 |
" parameters are accepted",
|
7248 |
errors.ECODE_INVAL) |
7249 |
if self.lu.op.live is not None: |
7250 |
if self.lu.op.live: |
7251 |
self.lu.op.mode = constants.HT_MIGRATION_LIVE
|
7252 |
else:
|
7253 |
self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
|
7254 |
# reset the 'live' parameter to None so that repeated
|
7255 |
# invocations of CheckPrereq do not raise an exception
|
7256 |
self.lu.op.live = None |
7257 |
elif self.lu.op.mode is None: |
7258 |
# read the default value from the hypervisor
|
7259 |
i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, |
7260 |
skip_globals=False)
|
7261 |
self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
|
7262 |
|
7263 |
self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE |
7264 |
else:
|
7265 |
# Failover is never live
|
7266 |
self.live = False |
7267 |
|
7268 |
def _RunAllocator(self): |
7269 |
"""Run the allocator based on input opcode.
|
7270 |
|
7271 |
"""
|
7272 |
ial = IAllocator(self.cfg, self.rpc, |
7273 |
mode=constants.IALLOCATOR_MODE_RELOC, |
7274 |
name=self.instance_name,
|
7275 |
# TODO See why hail breaks with a single node below
|
7276 |
relocate_from=[self.instance.primary_node,
|
7277 |
self.instance.primary_node],
|
7278 |
) |
7279 |
|
7280 |
ial.Run(self.lu.op.iallocator)
|
7281 |
|
7282 |
if not ial.success: |
7283 |
raise errors.OpPrereqError("Can't compute nodes using" |
7284 |
" iallocator '%s': %s" %
|
7285 |
(self.lu.op.iallocator, ial.info),
|
7286 |
errors.ECODE_NORES) |
7287 |
if len(ial.result) != ial.required_nodes: |
7288 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
7289 |
" of nodes (%s), required %s" %
|
7290 |
(self.lu.op.iallocator, len(ial.result), |
7291 |
ial.required_nodes), errors.ECODE_FAULT) |
7292 |
self.target_node = ial.result[0] |
7293 |
self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
7294 |
self.instance_name, self.lu.op.iallocator, |
7295 |
utils.CommaJoin(ial.result)) |
7296 |
|
7297 |
def _WaitUntilSync(self): |
7298 |
"""Poll with custom rpc for disk sync.
|
7299 |
|
7300 |
This uses our own step-based rpc call.
|
7301 |
|
7302 |
"""
|
7303 |
self.feedback_fn("* wait until resync is done") |
7304 |
all_done = False
|
7305 |
while not all_done: |
7306 |
all_done = True
|
7307 |
result = self.rpc.call_drbd_wait_sync(self.all_nodes, |
7308 |
self.nodes_ip,
|
7309 |
self.instance.disks)
|
7310 |
min_percent = 100
|
7311 |
for node, nres in result.items(): |
7312 |
nres.Raise("Cannot resync disks on node %s" % node)
|
7313 |
node_done, node_percent = nres.payload |
7314 |
all_done = all_done and node_done
|
7315 |
if node_percent is not None: |
7316 |
min_percent = min(min_percent, node_percent)
|
7317 |
if not all_done: |
7318 |
if min_percent < 100: |
7319 |
self.feedback_fn(" - progress: %.1f%%" % min_percent) |
7320 |
time.sleep(2)
|
7321 |
|
7322 |
def _EnsureSecondary(self, node): |
7323 |
"""Demote a node to secondary.
|
7324 |
|
7325 |
"""
|
7326 |
self.feedback_fn("* switching node %s to secondary mode" % node) |
7327 |
|
7328 |
for dev in self.instance.disks: |
7329 |
self.cfg.SetDiskID(dev, node)
|
7330 |
|
7331 |
result = self.rpc.call_blockdev_close(node, self.instance.name, |
7332 |
self.instance.disks)
|
7333 |
result.Raise("Cannot change disk to secondary on node %s" % node)
|
7334 |
|
7335 |
def _GoStandalone(self): |
7336 |
"""Disconnect from the network.
|
7337 |
|
7338 |
"""
|
7339 |
self.feedback_fn("* changing into standalone mode") |
7340 |
result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip, |
7341 |
self.instance.disks)
|
7342 |
for node, nres in result.items(): |
7343 |
nres.Raise("Cannot disconnect disks node %s" % node)
|
7344 |
|
7345 |
def _GoReconnect(self, multimaster): |
7346 |
"""Reconnect to the network.
|
7347 |
|
7348 |
"""
|
7349 |
if multimaster:
|
7350 |
msg = "dual-master"
|
7351 |
else:
|
7352 |
msg = "single-master"
|
7353 |
self.feedback_fn("* changing disks into %s mode" % msg) |
7354 |
result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip, |
7355 |
self.instance.disks,
|
7356 |
self.instance.name, multimaster)
|
7357 |
for node, nres in result.items(): |
7358 |
nres.Raise("Cannot change disks config on node %s" % node)
|
7359 |
|
7360 |
def _ExecCleanup(self): |
7361 |
"""Try to cleanup after a failed migration.
|
7362 |
|
7363 |
The cleanup is done by:
|
7364 |
- check that the instance is running only on one node
|
7365 |
(and update the config if needed)
|
7366 |
- change disks on its secondary node to secondary
|
7367 |
- wait until disks are fully synchronized
|
7368 |
- disconnect from the network
|
7369 |
- change disks into single-master mode
|
7370 |
- wait again until disks are fully synchronized
|
7371 |
|
7372 |
"""
|
7373 |
instance = self.instance
|
7374 |
target_node = self.target_node
|
7375 |
source_node = self.source_node
|
7376 |
|
7377 |
# check running on only one node
|
7378 |
self.feedback_fn("* checking where the instance actually runs" |
7379 |
" (if this hangs, the hypervisor might be in"
|
7380 |
" a bad state)")
|
7381 |
ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) |
7382 |
for node, result in ins_l.items(): |
7383 |
result.Raise("Can't contact node %s" % node)
|
7384 |
|
7385 |
runningon_source = instance.name in ins_l[source_node].payload
|
7386 |
runningon_target = instance.name in ins_l[target_node].payload
|
7387 |
|
7388 |
if runningon_source and runningon_target: |
7389 |
raise errors.OpExecError("Instance seems to be running on two nodes," |
7390 |
" or the hypervisor is confused; you will have"
|
7391 |
" to ensure manually that it runs only on one"
|
7392 |
" and restart this operation")
|
7393 |
|
7394 |
if not (runningon_source or runningon_target): |
7395 |
raise errors.OpExecError("Instance does not seem to be running at all;" |
7396 |
" in this case it's safer to repair by"
|
7397 |
" running 'gnt-instance stop' to ensure disk"
|
7398 |
" shutdown, and then restarting it")
|
7399 |
|
7400 |
if runningon_target:
|
7401 |
# the migration has actually succeeded, we need to update the config
|
7402 |
self.feedback_fn("* instance running on secondary node (%s)," |
7403 |
" updating config" % target_node)
|
7404 |
instance.primary_node = target_node |
7405 |
self.cfg.Update(instance, self.feedback_fn) |
7406 |
demoted_node = source_node |
7407 |
else:
|
7408 |
self.feedback_fn("* instance confirmed to be running on its" |
7409 |
" primary node (%s)" % source_node)
|
7410 |
demoted_node = target_node |
7411 |
|
7412 |
if instance.disk_template in constants.DTS_INT_MIRROR: |
7413 |
self._EnsureSecondary(demoted_node)
|
7414 |
try:
|
7415 |
self._WaitUntilSync()
|
7416 |
except errors.OpExecError:
|
7417 |
# we ignore here errors, since if the device is standalone, it
|
7418 |
# won't be able to sync
|
7419 |
pass
|
7420 |
self._GoStandalone()
|
7421 |
self._GoReconnect(False) |
7422 |
self._WaitUntilSync()
|
7423 |
|
7424 |
self.feedback_fn("* done") |
7425 |
|
7426 |
def _RevertDiskStatus(self): |
7427 |
"""Try to revert the disk status after a failed migration.
|
7428 |
|
7429 |
"""
|
7430 |
target_node = self.target_node
|
7431 |
if self.instance.disk_template in constants.DTS_EXT_MIRROR: |
7432 |
return
|
7433 |
|
7434 |
try:
|
7435 |
self._EnsureSecondary(target_node)
|
7436 |
self._GoStandalone()
|
7437 |
self._GoReconnect(False) |
7438 |
self._WaitUntilSync()
|
7439 |
except errors.OpExecError, err:
|
7440 |
self.lu.LogWarning("Migration failed and I can't reconnect the drives," |
7441 |
" please try to recover the instance manually;"
|
7442 |
" error '%s'" % str(err)) |
7443 |
|
7444 |
def _AbortMigration(self): |
7445 |
"""Call the hypervisor code to abort a started migration.
|
7446 |
|
7447 |
"""
|
7448 |
instance = self.instance
|
7449 |
target_node = self.target_node
|
7450 |
source_node = self.source_node
|
7451 |
migration_info = self.migration_info
|
7452 |
|
7453 |
abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
|
7454 |
instance, |
7455 |
migration_info, |
7456 |
False)
|
7457 |
abort_msg = abort_result.fail_msg |
7458 |
if abort_msg:
|
7459 |
logging.error("Aborting migration failed on target node %s: %s",
|
7460 |
target_node, abort_msg) |
7461 |
# Don't raise an exception here, as we stil have to try to revert the
|
7462 |
# disk status, even if this step failed.
|
7463 |
|
7464 |
abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
|
7465 |
instance, False, self.live) |
7466 |
abort_msg = abort_result.fail_msg |
7467 |
if abort_msg:
|
7468 |
logging.error("Aborting migration failed on source node %s: %s",
|
7469 |
source_node, abort_msg) |
7470 |
|
7471 |
def _ExecMigration(self): |
7472 |
"""Migrate an instance.
|
7473 |
|
7474 |
The migrate is done by:
|
7475 |
- change the disks into dual-master mode
|
7476 |
- wait until disks are fully synchronized again
|
7477 |
- migrate the instance
|
7478 |
- change disks on the new secondary node (the old primary) to secondary
|
7479 |
- wait until disks are fully synchronized
|
7480 |
- change disks into single-master mode
|
7481 |
|
7482 |
"""
|
7483 |
instance = self.instance
|
7484 |
target_node = self.target_node
|
7485 |
source_node = self.source_node
|
7486 |
|
7487 |
# Check for hypervisor version mismatch and warn the user.
|
7488 |
nodeinfo = self.rpc.call_node_info([source_node, target_node],
|
7489 |
None, self.instance.hypervisor) |
7490 |
src_info = nodeinfo[source_node] |
7491 |
dst_info = nodeinfo[target_node] |
7492 |
|
7493 |
if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and |
7494 |
(constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
|
7495 |
src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION] |
7496 |
dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION] |
7497 |
if src_version != dst_version:
|
7498 |
self.feedback_fn("* warning: hypervisor version mismatch between" |
7499 |
" source (%s) and target (%s) node" %
|
7500 |
(src_version, dst_version)) |
7501 |
|
7502 |
self.feedback_fn("* checking disk consistency between source and target") |
7503 |
for dev in instance.disks: |
7504 |
if not _CheckDiskConsistency(self.lu, dev, target_node, False): |
7505 |
raise errors.OpExecError("Disk %s is degraded or not fully" |
7506 |
" synchronized on target node,"
|
7507 |
" aborting migration" % dev.iv_name)
|
7508 |
|
7509 |
# First get the migration information from the remote node
|
7510 |
result = self.rpc.call_migration_info(source_node, instance)
|
7511 |
msg = result.fail_msg |
7512 |
if msg:
|
7513 |
log_err = ("Failed fetching source migration information from %s: %s" %
|
7514 |
(source_node, msg)) |
7515 |
logging.error(log_err) |
7516 |
raise errors.OpExecError(log_err)
|
7517 |
|
7518 |
self.migration_info = migration_info = result.payload
|
7519 |
|
7520 |
if self.instance.disk_template not in constants.DTS_EXT_MIRROR: |
7521 |
# Then switch the disks to master/master mode
|
7522 |
self._EnsureSecondary(target_node)
|
7523 |
self._GoStandalone()
|
7524 |
self._GoReconnect(True) |
7525 |
self._WaitUntilSync()
|
7526 |
|
7527 |
self.feedback_fn("* preparing %s to accept the instance" % target_node) |
7528 |
result = self.rpc.call_accept_instance(target_node,
|
7529 |
instance, |
7530 |
migration_info, |
7531 |
self.nodes_ip[target_node])
|
7532 |
|
7533 |
msg = result.fail_msg |
7534 |
if msg:
|
7535 |
logging.error("Instance pre-migration failed, trying to revert"
|
7536 |
" disk status: %s", msg)
|
7537 |
self.feedback_fn("Pre-migration failed, aborting") |
7538 |
self._AbortMigration()
|
7539 |
self._RevertDiskStatus()
|
7540 |
raise errors.OpExecError("Could not pre-migrate instance %s: %s" % |
7541 |
(instance.name, msg)) |
7542 |
|
7543 |
self.feedback_fn("* migrating instance to %s" % target_node) |
7544 |
result = self.rpc.call_instance_migrate(source_node, instance,
|
7545 |
self.nodes_ip[target_node],
|
7546 |
self.live)
|
7547 |
msg = result.fail_msg |
7548 |
if msg:
|
7549 |
logging.error("Instance migration failed, trying to revert"
|
7550 |
" disk status: %s", msg)
|
7551 |
self.feedback_fn("Migration failed, aborting") |
7552 |
self._AbortMigration()
|
7553 |
self._RevertDiskStatus()
|
7554 |
raise errors.OpExecError("Could not migrate instance %s: %s" % |
7555 |
(instance.name, msg)) |
7556 |
|
7557 |
self.feedback_fn("* starting memory transfer") |
7558 |
last_feedback = time.time() |
7559 |
while True: |
7560 |
result = self.rpc.call_instance_get_migration_status(source_node,
|
7561 |
instance) |
7562 |
msg = result.fail_msg |
7563 |
ms = result.payload # MigrationStatus instance
|
7564 |
if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES): |
7565 |
logging.error("Instance migration failed, trying to revert"
|
7566 |
" disk status: %s", msg)
|
7567 |
self.feedback_fn("Migration failed, aborting") |
7568 |
self._AbortMigration()
|
7569 |
self._RevertDiskStatus()
|
7570 |
raise errors.OpExecError("Could not migrate instance %s: %s" % |
7571 |
(instance.name, msg)) |
7572 |
|
7573 |
if result.payload.status != constants.HV_MIGRATION_ACTIVE:
|
7574 |
self.feedback_fn("* memory transfer complete") |
7575 |
break
|
7576 |
|
7577 |
if (utils.TimeoutExpired(last_feedback,
|
7578 |
self._MIGRATION_FEEDBACK_INTERVAL) and |
7579 |
ms.transferred_ram is not None): |
7580 |
mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram) |
7581 |
self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress) |
7582 |
last_feedback = time.time() |
7583 |
|
7584 |
time.sleep(self._MIGRATION_POLL_INTERVAL)
|
7585 |
|
7586 |
result = self.rpc.call_instance_finalize_migration_src(source_node,
|
7587 |
instance, |
7588 |
True,
|
7589 |
self.live)
|
7590 |
msg = result.fail_msg |
7591 |
if msg:
|
7592 |
logging.error("Instance migration succeeded, but finalization failed"
|
7593 |
" on the source node: %s", msg)
|
7594 |
raise errors.OpExecError("Could not finalize instance migration: %s" % |
7595 |
msg) |
7596 |
|
7597 |
instance.primary_node = target_node |
7598 |
|
7599 |
# distribute new instance config to the other nodes
|
7600 |
self.cfg.Update(instance, self.feedback_fn) |
7601 |
|
7602 |
result = self.rpc.call_instance_finalize_migration_dst(target_node,
|
7603 |
instance, |
7604 |
migration_info, |
7605 |
True)
|
7606 |
msg = result.fail_msg |
7607 |
if msg:
|
7608 |
logging.error("Instance migration succeeded, but finalization failed"
|
7609 |
" on the target node: %s", msg)
|
7610 |
raise errors.OpExecError("Could not finalize instance migration: %s" % |
7611 |
msg) |
7612 |
|
7613 |
if self.instance.disk_template not in constants.DTS_EXT_MIRROR: |
7614 |
self._EnsureSecondary(source_node)
|
7615 |
self._WaitUntilSync()
|
7616 |
self._GoStandalone()
|
7617 |
self._GoReconnect(False) |
7618 |
self._WaitUntilSync()
|
7619 |
|
7620 |
self.feedback_fn("* done") |
7621 |
|
7622 |
def _ExecFailover(self): |
7623 |
"""Failover an instance.
|
7624 |
|
7625 |
The failover is done by shutting it down on its present node and
|
7626 |
starting it on the secondary.
|
7627 |
|
7628 |
"""
|
7629 |
instance = self.instance
|
7630 |
primary_node = self.cfg.GetNodeInfo(instance.primary_node)
|
7631 |
|
7632 |
source_node = instance.primary_node |
7633 |
target_node = self.target_node
|
7634 |
|
7635 |
if instance.admin_up:
|
7636 |
self.feedback_fn("* checking disk consistency between source and target") |
7637 |
for dev in instance.disks: |
7638 |
# for drbd, these are drbd over lvm
|
7639 |
if not _CheckDiskConsistency(self.lu, dev, target_node, False): |
7640 |
if primary_node.offline:
|
7641 |
self.feedback_fn("Node %s is offline, ignoring degraded disk %s on" |
7642 |
" target node %s" %
|
7643 |
(primary_node.name, dev.iv_name, target_node)) |
7644 |
elif not self.ignore_consistency: |
7645 |
raise errors.OpExecError("Disk %s is degraded on target node," |
7646 |
" aborting failover" % dev.iv_name)
|
7647 |
else:
|
7648 |
self.feedback_fn("* not checking disk consistency as instance is not" |
7649 |
" running")
|
7650 |
|
7651 |
self.feedback_fn("* shutting down instance on source node") |
7652 |
logging.info("Shutting down instance %s on node %s",
|
7653 |
instance.name, source_node) |
7654 |
|
7655 |
result = self.rpc.call_instance_shutdown(source_node, instance,
|
7656 |
self.shutdown_timeout)
|
7657 |
msg = result.fail_msg |
7658 |
if msg:
|
7659 |
if self.ignore_consistency or primary_node.offline: |
7660 |
self.lu.LogWarning("Could not shutdown instance %s on node %s," |
7661 |
" proceeding anyway; please make sure node"
|
7662 |
" %s is down; error details: %s",
|
7663 |
instance.name, source_node, source_node, msg) |
7664 |
else:
|
7665 |
raise errors.OpExecError("Could not shutdown instance %s on" |
7666 |
" node %s: %s" %
|
7667 |
(instance.name, source_node, msg)) |
7668 |
|
7669 |
self.feedback_fn("* deactivating the instance's disks on source node") |
7670 |
if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True): |
7671 |
raise errors.OpExecError("Can't shut down the instance's disks") |
7672 |
|
7673 |
instance.primary_node = target_node |
7674 |
# distribute new instance config to the other nodes
|
7675 |
self.cfg.Update(instance, self.feedback_fn) |
7676 |
|
7677 |
# Only start the instance if it's marked as up
|
7678 |
if instance.admin_up:
|
7679 |
self.feedback_fn("* activating the instance's disks on target node %s" % |
7680 |
target_node) |
7681 |
logging.info("Starting instance %s on node %s",
|
7682 |
instance.name, target_node) |
7683 |
|
7684 |
disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
|
7685 |
ignore_secondaries=True)
|
7686 |
if not disks_ok: |
7687 |
_ShutdownInstanceDisks(self.lu, instance)
|
7688 |
raise errors.OpExecError("Can't activate the instance's disks") |
7689 |
|
7690 |
self.feedback_fn("* starting the instance on the target node %s" % |
7691 |
target_node) |
7692 |
result = self.rpc.call_instance_start(target_node, instance, None, None, |
7693 |
False)
|
7694 |
msg = result.fail_msg |
7695 |
if msg:
|
7696 |
_ShutdownInstanceDisks(self.lu, instance)
|
7697 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
7698 |
(instance.name, target_node, msg)) |
7699 |
|
7700 |
def Exec(self, feedback_fn): |
7701 |
"""Perform the migration.
|
7702 |
|
7703 |
"""
|
7704 |
self.feedback_fn = feedback_fn
|
7705 |
self.source_node = self.instance.primary_node |
7706 |
|
7707 |
# FIXME: if we implement migrate-to-any in DRBD, this needs fixing
|
7708 |
if self.instance.disk_template in constants.DTS_INT_MIRROR: |
7709 |
self.target_node = self.instance.secondary_nodes[0] |
7710 |
# Otherwise self.target_node has been populated either
|
7711 |
# directly, or through an iallocator.
|
7712 |
|
7713 |
self.all_nodes = [self.source_node, self.target_node] |
7714 |
self.nodes_ip = dict((name, node.secondary_ip) for (name, node) |
7715 |
in self.cfg.GetMultiNodeInfo(self.all_nodes)) |
7716 |
|
7717 |
if self.failover: |
7718 |
feedback_fn("Failover instance %s" % self.instance.name) |
7719 |
self._ExecFailover()
|
7720 |
else:
|
7721 |
feedback_fn("Migrating instance %s" % self.instance.name) |
7722 |
|
7723 |
if self.cleanup: |
7724 |
return self._ExecCleanup() |
7725 |
else:
|
7726 |
return self._ExecMigration() |
7727 |
|
7728 |
|
7729 |
def _CreateBlockDev(lu, node, instance, device, force_create, |
7730 |
info, force_open): |
7731 |
"""Create a tree of block devices on a given node.
|
7732 |
|
7733 |
If this device type has to be created on secondaries, create it and
|
7734 |
all its children.
|
7735 |
|
7736 |
If not, just recurse to children keeping the same 'force' value.
|
7737 |
|
7738 |
@param lu: the lu on whose behalf we execute
|
7739 |
@param node: the node on which to create the device
|
7740 |
@type instance: L{objects.Instance}
|
7741 |
@param instance: the instance which owns the device
|
7742 |
@type device: L{objects.Disk}
|
7743 |
@param device: the device to create
|
7744 |
@type force_create: boolean
|
7745 |
@param force_create: whether to force creation of this device; this
|
7746 |
will be change to True whenever we find a device which has
|
7747 |
CreateOnSecondary() attribute
|
7748 |
@param info: the extra 'metadata' we should attach to the device
|
7749 |
(this will be represented as a LVM tag)
|
7750 |
@type force_open: boolean
|
7751 |
@param force_open: this parameter will be passes to the
|
7752 |
L{backend.BlockdevCreate} function where it specifies
|
7753 |
whether we run on primary or not, and it affects both
|
7754 |
the child assembly and the device own Open() execution
|
7755 |
|
7756 |
"""
|
7757 |
if device.CreateOnSecondary():
|
7758 |
force_create = True
|
7759 |
|
7760 |
if device.children:
|
7761 |
for child in device.children: |
7762 |
_CreateBlockDev(lu, node, instance, child, force_create, |
7763 |
info, force_open) |
7764 |
|
7765 |
if not force_create: |
7766 |
return
|
7767 |
|
7768 |
_CreateSingleBlockDev(lu, node, instance, device, info, force_open) |
7769 |
|
7770 |
|
7771 |
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): |
7772 |
"""Create a single block device on a given node.
|
7773 |
|
7774 |
This will not recurse over children of the device, so they must be
|
7775 |
created in advance.
|
7776 |
|
7777 |
@param lu: the lu on whose behalf we execute
|
7778 |
@param node: the node on which to create the device
|
7779 |
@type instance: L{objects.Instance}
|
7780 |
@param instance: the instance which owns the device
|
7781 |
@type device: L{objects.Disk}
|
7782 |
@param device: the device to create
|
7783 |
@param info: the extra 'metadata' we should attach to the device
|
7784 |
(this will be represented as a LVM tag)
|
7785 |
@type force_open: boolean
|
7786 |
@param force_open: this parameter will be passes to the
|
7787 |
L{backend.BlockdevCreate} function where it specifies
|
7788 |
whether we run on primary or not, and it affects both
|
7789 |
the child assembly and the device own Open() execution
|
7790 |
|
7791 |
"""
|
7792 |
lu.cfg.SetDiskID(device, node) |
7793 |
result = lu.rpc.call_blockdev_create(node, device, device.size, |
7794 |
instance.name, force_open, info) |
7795 |
result.Raise("Can't create block device %s on"
|
7796 |
" node %s for instance %s" % (device, node, instance.name))
|
7797 |
if device.physical_id is None: |
7798 |
device.physical_id = result.payload |
7799 |
|
7800 |
|
7801 |
def _GenerateUniqueNames(lu, exts): |
7802 |
"""Generate a suitable LV name.
|
7803 |
|
7804 |
This will generate a logical volume name for the given instance.
|
7805 |
|
7806 |
"""
|
7807 |
results = [] |
7808 |
for val in exts: |
7809 |
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId()) |
7810 |
results.append("%s%s" % (new_id, val))
|
7811 |
return results
|
7812 |
|
7813 |
|
7814 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names, |
7815 |
iv_name, p_minor, s_minor): |
7816 |
"""Generate a drbd8 device complete with its children.
|
7817 |
|
7818 |
"""
|
7819 |
assert len(vgnames) == len(names) == 2 |
7820 |
port = lu.cfg.AllocatePort() |
7821 |
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId()) |
7822 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
7823 |
logical_id=(vgnames[0], names[0])) |
7824 |
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
7825 |
logical_id=(vgnames[1], names[1])) |
7826 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, |
7827 |
logical_id=(primary, secondary, port, |
7828 |
p_minor, s_minor, |
7829 |
shared_secret), |
7830 |
children=[dev_data, dev_meta], |
7831 |
iv_name=iv_name) |
7832 |
return drbd_dev
|
7833 |
|
7834 |
|
7835 |
def _GenerateDiskTemplate(lu, template_name, |
7836 |
instance_name, primary_node, |
7837 |
secondary_nodes, disk_info, |
7838 |
file_storage_dir, file_driver, |
7839 |
base_index, feedback_fn): |
7840 |
"""Generate the entire disk layout for a given template type.
|
7841 |
|
7842 |
"""
|
7843 |
#TODO: compute space requirements
|
7844 |
|
7845 |
vgname = lu.cfg.GetVGName() |
7846 |
disk_count = len(disk_info)
|
7847 |
disks = [] |
7848 |
if template_name == constants.DT_DISKLESS:
|
7849 |
pass
|
7850 |
elif template_name == constants.DT_PLAIN:
|
7851 |
if len(secondary_nodes) != 0: |
7852 |
raise errors.ProgrammerError("Wrong template configuration") |
7853 |
|
7854 |
names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
|
7855 |
for i in range(disk_count)]) |
7856 |
for idx, disk in enumerate(disk_info): |
7857 |
disk_index = idx + base_index |
7858 |
vg = disk.get(constants.IDISK_VG, vgname) |
7859 |
feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
|
7860 |
disk_dev = objects.Disk(dev_type=constants.LD_LV, |
7861 |
size=disk[constants.IDISK_SIZE], |
7862 |
logical_id=(vg, names[idx]), |
7863 |
iv_name="disk/%d" % disk_index,
|
7864 |
mode=disk[constants.IDISK_MODE]) |
7865 |
disks.append(disk_dev) |
7866 |
elif template_name == constants.DT_DRBD8:
|
7867 |
if len(secondary_nodes) != 1: |
7868 |
raise errors.ProgrammerError("Wrong template configuration") |
7869 |
remote_node = secondary_nodes[0]
|
7870 |
minors = lu.cfg.AllocateDRBDMinor( |
7871 |
[primary_node, remote_node] * len(disk_info), instance_name)
|
7872 |
|
7873 |
names = [] |
7874 |
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i) |
7875 |
for i in range(disk_count)]): |
7876 |
names.append(lv_prefix + "_data")
|
7877 |
names.append(lv_prefix + "_meta")
|
7878 |
for idx, disk in enumerate(disk_info): |
7879 |
disk_index = idx + base_index |
7880 |
data_vg = disk.get(constants.IDISK_VG, vgname) |
7881 |
meta_vg = disk.get(constants.IDISK_METAVG, data_vg) |
7882 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, |
7883 |
disk[constants.IDISK_SIZE], |
7884 |
[data_vg, meta_vg], |
7885 |
names[idx * 2:idx * 2 + 2], |
7886 |
"disk/%d" % disk_index,
|
7887 |
minors[idx * 2], minors[idx * 2 + 1]) |
7888 |
disk_dev.mode = disk[constants.IDISK_MODE] |
7889 |
disks.append(disk_dev) |
7890 |
elif template_name == constants.DT_FILE:
|
7891 |
if len(secondary_nodes) != 0: |
7892 |
raise errors.ProgrammerError("Wrong template configuration") |
7893 |
|
7894 |
opcodes.RequireFileStorage() |
7895 |
|
7896 |
for idx, disk in enumerate(disk_info): |
7897 |
disk_index = idx + base_index |
7898 |
disk_dev = objects.Disk(dev_type=constants.LD_FILE, |
7899 |
size=disk[constants.IDISK_SIZE], |
7900 |
iv_name="disk/%d" % disk_index,
|
7901 |
logical_id=(file_driver, |
7902 |
"%s/disk%d" % (file_storage_dir,
|
7903 |
disk_index)), |
7904 |
mode=disk[constants.IDISK_MODE]) |
7905 |
disks.append(disk_dev) |
7906 |
elif template_name == constants.DT_SHARED_FILE:
|
7907 |
if len(secondary_nodes) != 0: |
7908 |
raise errors.ProgrammerError("Wrong template configuration") |
7909 |
|
7910 |
opcodes.RequireSharedFileStorage() |
7911 |
|
7912 |
for idx, disk in enumerate(disk_info): |
7913 |
disk_index = idx + base_index |
7914 |
disk_dev = objects.Disk(dev_type=constants.LD_FILE, |
7915 |
size=disk[constants.IDISK_SIZE], |
7916 |
iv_name="disk/%d" % disk_index,
|
7917 |
logical_id=(file_driver, |
7918 |
"%s/disk%d" % (file_storage_dir,
|
7919 |
disk_index)), |
7920 |
mode=disk[constants.IDISK_MODE]) |
7921 |
disks.append(disk_dev) |
7922 |
elif template_name == constants.DT_BLOCK:
|
7923 |
if len(secondary_nodes) != 0: |
7924 |
raise errors.ProgrammerError("Wrong template configuration") |
7925 |
|
7926 |
for idx, disk in enumerate(disk_info): |
7927 |
disk_index = idx + base_index |
7928 |
disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, |
7929 |
size=disk[constants.IDISK_SIZE], |
7930 |
logical_id=(constants.BLOCKDEV_DRIVER_MANUAL, |
7931 |
disk[constants.IDISK_ADOPT]), |
7932 |
iv_name="disk/%d" % disk_index,
|
7933 |
mode=disk[constants.IDISK_MODE]) |
7934 |
disks.append(disk_dev) |
7935 |
|
7936 |
else:
|
7937 |
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
7938 |
return disks
|
7939 |
|
7940 |
|
7941 |
def _GetInstanceInfoText(instance): |
7942 |
"""Compute that text that should be added to the disk's metadata.
|
7943 |
|
7944 |
"""
|
7945 |
return "originstname+%s" % instance.name |
7946 |
|
7947 |
|
7948 |
def _CalcEta(time_taken, written, total_size): |
7949 |
"""Calculates the ETA based on size written and total size.
|
7950 |
|
7951 |
@param time_taken: The time taken so far
|
7952 |
@param written: amount written so far
|
7953 |
@param total_size: The total size of data to be written
|
7954 |
@return: The remaining time in seconds
|
7955 |
|
7956 |
"""
|
7957 |
avg_time = time_taken / float(written)
|
7958 |
return (total_size - written) * avg_time
|
7959 |
|
7960 |
|
7961 |
def _WipeDisks(lu, instance): |
7962 |
"""Wipes instance disks.
|
7963 |
|
7964 |
@type lu: L{LogicalUnit}
|
7965 |
@param lu: the logical unit on whose behalf we execute
|
7966 |
@type instance: L{objects.Instance}
|
7967 |
@param instance: the instance whose disks we should create
|
7968 |
@return: the success of the wipe
|
7969 |
|
7970 |
"""
|
7971 |
node = instance.primary_node |
7972 |
|
7973 |
for device in instance.disks: |
7974 |
lu.cfg.SetDiskID(device, node) |
7975 |
|
7976 |
logging.info("Pause sync of instance %s disks", instance.name)
|
7977 |
result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
|
7978 |
|
7979 |
for idx, success in enumerate(result.payload): |
7980 |
if not success: |
7981 |
logging.warn("pause-sync of instance %s for disks %d failed",
|
7982 |
instance.name, idx) |
7983 |
|
7984 |
try:
|
7985 |
for idx, device in enumerate(instance.disks): |
7986 |
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
|
7987 |
# MAX_WIPE_CHUNK at max
|
7988 |
wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 * |
7989 |
constants.MIN_WIPE_CHUNK_PERCENT) |
7990 |
# we _must_ make this an int, otherwise rounding errors will
|
7991 |
# occur
|
7992 |
wipe_chunk_size = int(wipe_chunk_size)
|
7993 |
|
7994 |
lu.LogInfo("* Wiping disk %d", idx)
|
7995 |
logging.info("Wiping disk %d for instance %s, node %s using"
|
7996 |
" chunk size %s", idx, instance.name, node, wipe_chunk_size)
|
7997 |
|
7998 |
offset = 0
|
7999 |
size = device.size |
8000 |
last_output = 0
|
8001 |
start_time = time.time() |
8002 |
|
8003 |
while offset < size:
|
8004 |
wipe_size = min(wipe_chunk_size, size - offset)
|
8005 |
logging.debug("Wiping disk %d, offset %s, chunk %s",
|
8006 |
idx, offset, wipe_size) |
8007 |
result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size) |
8008 |
result.Raise("Could not wipe disk %d at offset %d for size %d" %
|
8009 |
(idx, offset, wipe_size)) |
8010 |
now = time.time() |
8011 |
offset += wipe_size |
8012 |
if now - last_output >= 60: |
8013 |
eta = _CalcEta(now - start_time, offset, size) |
8014 |
lu.LogInfo(" - done: %.1f%% ETA: %s" %
|
8015 |
(offset / float(size) * 100, utils.FormatSeconds(eta))) |
8016 |
last_output = now |
8017 |
finally:
|
8018 |
logging.info("Resume sync of instance %s disks", instance.name)
|
8019 |
|
8020 |
result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
|
8021 |
|
8022 |
for idx, success in enumerate(result.payload): |
8023 |
if not success: |
8024 |
lu.LogWarning("Resume sync of disk %d failed, please have a"
|
8025 |
" look at the status and troubleshoot the issue", idx)
|
8026 |
logging.warn("resume-sync of instance %s for disks %d failed",
|
8027 |
instance.name, idx) |
8028 |
|
8029 |
|
8030 |
def _CreateDisks(lu, instance, to_skip=None, target_node=None): |
8031 |
"""Create all disks for an instance.
|
8032 |
|
8033 |
This abstracts away some work from AddInstance.
|
8034 |
|
8035 |
@type lu: L{LogicalUnit}
|
8036 |
@param lu: the logical unit on whose behalf we execute
|
8037 |
@type instance: L{objects.Instance}
|
8038 |
@param instance: the instance whose disks we should create
|
8039 |
@type to_skip: list
|
8040 |
@param to_skip: list of indices to skip
|
8041 |
@type target_node: string
|
8042 |
@param target_node: if passed, overrides the target node for creation
|
8043 |
@rtype: boolean
|
8044 |
@return: the success of the creation
|
8045 |
|
8046 |
"""
|
8047 |
info = _GetInstanceInfoText(instance) |
8048 |
if target_node is None: |
8049 |
pnode = instance.primary_node |
8050 |
all_nodes = instance.all_nodes |
8051 |
else:
|
8052 |
pnode = target_node |
8053 |
all_nodes = [pnode] |
8054 |
|
8055 |
if instance.disk_template in constants.DTS_FILEBASED: |
8056 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
8057 |
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) |
8058 |
|
8059 |
result.Raise("Failed to create directory '%s' on"
|
8060 |
" node %s" % (file_storage_dir, pnode))
|
8061 |
|
8062 |
# Note: this needs to be kept in sync with adding of disks in
|
8063 |
# LUInstanceSetParams
|
8064 |
for idx, device in enumerate(instance.disks): |
8065 |
if to_skip and idx in to_skip: |
8066 |
continue
|
8067 |
logging.info("Creating volume %s for instance %s",
|
8068 |
device.iv_name, instance.name) |
8069 |
#HARDCODE
|
8070 |
for node in all_nodes: |
8071 |
f_create = node == pnode |
8072 |
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create) |
8073 |
|
8074 |
|
8075 |
def _RemoveDisks(lu, instance, target_node=None): |
8076 |
"""Remove all disks for an instance.
|
8077 |
|
8078 |
This abstracts away some work from `AddInstance()` and
|
8079 |
`RemoveInstance()`. Note that in case some of the devices couldn't
|
8080 |
be removed, the removal will continue with the other ones (compare
|
8081 |
with `_CreateDisks()`).
|
8082 |
|
8083 |
@type lu: L{LogicalUnit}
|
8084 |
@param lu: the logical unit on whose behalf we execute
|
8085 |
@type instance: L{objects.Instance}
|
8086 |
@param instance: the instance whose disks we should remove
|
8087 |
@type target_node: string
|
8088 |
@param target_node: used to override the node on which to remove the disks
|
8089 |
@rtype: boolean
|
8090 |
@return: the success of the removal
|
8091 |
|
8092 |
"""
|
8093 |
logging.info("Removing block devices for instance %s", instance.name)
|
8094 |
|
8095 |
all_result = True
|
8096 |
for device in instance.disks: |
8097 |
if target_node:
|
8098 |
edata = [(target_node, device)] |
8099 |
else:
|
8100 |
edata = device.ComputeNodeTree(instance.primary_node) |
8101 |
for node, disk in edata: |
8102 |
lu.cfg.SetDiskID(disk, node) |
8103 |
msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg |
8104 |
if msg:
|
8105 |
lu.LogWarning("Could not remove block device %s on node %s,"
|
8106 |
" continuing anyway: %s", device.iv_name, node, msg)
|
8107 |
all_result = False
|
8108 |
|
8109 |
if instance.disk_template == constants.DT_FILE:
|
8110 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
8111 |
if target_node:
|
8112 |
tgt = target_node |
8113 |
else:
|
8114 |
tgt = instance.primary_node |
8115 |
result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) |
8116 |
if result.fail_msg:
|
8117 |
lu.LogWarning("Could not remove directory '%s' on node %s: %s",
|
8118 |
file_storage_dir, instance.primary_node, result.fail_msg) |
8119 |
all_result = False
|
8120 |
|
8121 |
return all_result
|
8122 |
|
8123 |
|
8124 |
def _ComputeDiskSizePerVG(disk_template, disks): |
8125 |
"""Compute disk size requirements in the volume group
|
8126 |
|
8127 |
"""
|
8128 |
def _compute(disks, payload): |
8129 |
"""Universal algorithm.
|
8130 |
|
8131 |
"""
|
8132 |
vgs = {} |
8133 |
for disk in disks: |
8134 |
vgs[disk[constants.IDISK_VG]] = \ |
8135 |
vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
|
8136 |
|
8137 |
return vgs
|
8138 |
|
8139 |
# Required free disk space as a function of disk and swap space
|
8140 |
req_size_dict = { |
8141 |
constants.DT_DISKLESS: {}, |
8142 |
constants.DT_PLAIN: _compute(disks, 0),
|
8143 |
# 128 MB are added for drbd metadata for each disk
|
8144 |
constants.DT_DRBD8: _compute(disks, 128),
|
8145 |
constants.DT_FILE: {}, |
8146 |
constants.DT_SHARED_FILE: {}, |
8147 |
} |
8148 |
|
8149 |
if disk_template not in req_size_dict: |
8150 |
raise errors.ProgrammerError("Disk template '%s' size requirement" |
8151 |
" is unknown" % disk_template)
|
8152 |
|
8153 |
return req_size_dict[disk_template]
|
8154 |
|
8155 |
|
8156 |
def _ComputeDiskSize(disk_template, disks): |
8157 |
"""Compute disk size requirements in the volume group
|
8158 |
|
8159 |
"""
|
8160 |
# Required free disk space as a function of disk and swap space
|
8161 |
req_size_dict = { |
8162 |
constants.DT_DISKLESS: None,
|
8163 |
constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks), |
8164 |
# 128 MB are added for drbd metadata for each disk
|
8165 |
constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks), |
8166 |
constants.DT_FILE: None,
|
8167 |
constants.DT_SHARED_FILE: 0,
|
8168 |
constants.DT_BLOCK: 0,
|
8169 |
} |
8170 |
|
8171 |
if disk_template not in req_size_dict: |
8172 |
raise errors.ProgrammerError("Disk template '%s' size requirement" |
8173 |
" is unknown" % disk_template)
|
8174 |
|
8175 |
return req_size_dict[disk_template]
|
8176 |
|
8177 |
|
8178 |
def _FilterVmNodes(lu, nodenames): |
8179 |
"""Filters out non-vm_capable nodes from a list.
|
8180 |
|
8181 |
@type lu: L{LogicalUnit}
|
8182 |
@param lu: the logical unit for which we check
|
8183 |
@type nodenames: list
|
8184 |
@param nodenames: the list of nodes on which we should check
|
8185 |
@rtype: list
|
8186 |
@return: the list of vm-capable nodes
|
8187 |
|
8188 |
"""
|
8189 |
vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
|
8190 |
return [name for name in nodenames if name not in vm_nodes] |
8191 |
|
8192 |
|
8193 |
def _CheckHVParams(lu, nodenames, hvname, hvparams): |
8194 |
"""Hypervisor parameter validation.
|
8195 |
|
8196 |
This function abstract the hypervisor parameter validation to be
|
8197 |
used in both instance create and instance modify.
|
8198 |
|
8199 |
@type lu: L{LogicalUnit}
|
8200 |
@param lu: the logical unit for which we check
|
8201 |
@type nodenames: list
|
8202 |
@param nodenames: the list of nodes on which we should check
|
8203 |
@type hvname: string
|
8204 |
@param hvname: the name of the hypervisor we should use
|
8205 |
@type hvparams: dict
|
8206 |
@param hvparams: the parameters which we need to check
|
8207 |
@raise errors.OpPrereqError: if the parameters are not valid
|
8208 |
|
8209 |
"""
|
8210 |
nodenames = _FilterVmNodes(lu, nodenames) |
8211 |
hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, |
8212 |
hvname, |
8213 |
hvparams) |
8214 |
for node in nodenames: |
8215 |
info = hvinfo[node] |
8216 |
if info.offline:
|
8217 |
continue
|
8218 |
info.Raise("Hypervisor parameter validation failed on node %s" % node)
|
8219 |
|
8220 |
|
8221 |
def _CheckOSParams(lu, required, nodenames, osname, osparams): |
8222 |
"""OS parameters validation.
|
8223 |
|
8224 |
@type lu: L{LogicalUnit}
|
8225 |
@param lu: the logical unit for which we check
|
8226 |
@type required: boolean
|
8227 |
@param required: whether the validation should fail if the OS is not
|
8228 |
found
|
8229 |
@type nodenames: list
|
8230 |
@param nodenames: the list of nodes on which we should check
|
8231 |
@type osname: string
|
8232 |
@param osname: the name of the hypervisor we should use
|
8233 |
@type osparams: dict
|
8234 |
@param osparams: the parameters which we need to check
|
8235 |
@raise errors.OpPrereqError: if the parameters are not valid
|
8236 |
|
8237 |
"""
|
8238 |
nodenames = _FilterVmNodes(lu, nodenames) |
8239 |
result = lu.rpc.call_os_validate(required, nodenames, osname, |
8240 |
[constants.OS_VALIDATE_PARAMETERS], |
8241 |
osparams) |
8242 |
for node, nres in result.items(): |
8243 |
# we don't check for offline cases since this should be run only
|
8244 |
# against the master node and/or an instance's nodes
|
8245 |
nres.Raise("OS Parameters validation failed on node %s" % node)
|
8246 |
if not nres.payload: |
8247 |
lu.LogInfo("OS %s not found on node %s, validation skipped",
|
8248 |
osname, node) |
8249 |
|
8250 |
|
8251 |
class LUInstanceCreate(LogicalUnit): |
8252 |
"""Create an instance.
|
8253 |
|
8254 |
"""
|
8255 |
HPATH = "instance-add"
|
8256 |
HTYPE = constants.HTYPE_INSTANCE |
8257 |
REQ_BGL = False
|
8258 |
|
8259 |
def CheckArguments(self): |
8260 |
"""Check arguments.
|
8261 |
|
8262 |
"""
|
8263 |
# do not require name_check to ease forward/backward compatibility
|
8264 |
# for tools
|
8265 |
if self.op.no_install and self.op.start: |
8266 |
self.LogInfo("No-installation mode selected, disabling startup") |
8267 |
self.op.start = False |
8268 |
# validate/normalize the instance name
|
8269 |
self.op.instance_name = \
|
8270 |
netutils.Hostname.GetNormalizedName(self.op.instance_name)
|
8271 |
|
8272 |
if self.op.ip_check and not self.op.name_check: |
8273 |
# TODO: make the ip check more flexible and not depend on the name check
|
8274 |
raise errors.OpPrereqError("Cannot do IP address check without a name" |
8275 |
" check", errors.ECODE_INVAL)
|
8276 |
|
8277 |
# check nics' parameter names
|
8278 |
for nic in self.op.nics: |
8279 |
utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) |
8280 |
|
8281 |
# check disks. parameter names and consistent adopt/no-adopt strategy
|
8282 |
has_adopt = has_no_adopt = False
|
8283 |
for disk in self.op.disks: |
8284 |
utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) |
8285 |
if constants.IDISK_ADOPT in disk: |
8286 |
has_adopt = True
|
8287 |
else:
|
8288 |
has_no_adopt = True
|
8289 |
if has_adopt and has_no_adopt: |
8290 |
raise errors.OpPrereqError("Either all disks are adopted or none is", |
8291 |
errors.ECODE_INVAL) |
8292 |
if has_adopt:
|
8293 |
if self.op.disk_template not in constants.DTS_MAY_ADOPT: |
8294 |
raise errors.OpPrereqError("Disk adoption is not supported for the" |
8295 |
" '%s' disk template" %
|
8296 |
self.op.disk_template,
|
8297 |
errors.ECODE_INVAL) |
8298 |
if self.op.iallocator is not None: |
8299 |
raise errors.OpPrereqError("Disk adoption not allowed with an" |
8300 |
" iallocator script", errors.ECODE_INVAL)
|
8301 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8302 |
raise errors.OpPrereqError("Disk adoption not allowed for" |
8303 |
" instance import", errors.ECODE_INVAL)
|
8304 |
else:
|
8305 |
if self.op.disk_template in constants.DTS_MUST_ADOPT: |
8306 |
raise errors.OpPrereqError("Disk template %s requires disk adoption," |
8307 |
" but no 'adopt' parameter given" %
|
8308 |
self.op.disk_template,
|
8309 |
errors.ECODE_INVAL) |
8310 |
|
8311 |
self.adopt_disks = has_adopt
|
8312 |
|
8313 |
# instance name verification
|
8314 |
if self.op.name_check: |
8315 |
self.hostname1 = netutils.GetHostname(name=self.op.instance_name) |
8316 |
self.op.instance_name = self.hostname1.name |
8317 |
# used in CheckPrereq for ip ping check
|
8318 |
self.check_ip = self.hostname1.ip |
8319 |
else:
|
8320 |
self.check_ip = None |
8321 |
|
8322 |
# file storage checks
|
8323 |
if (self.op.file_driver and |
8324 |
not self.op.file_driver in constants.FILE_DRIVER): |
8325 |
raise errors.OpPrereqError("Invalid file driver name '%s'" % |
8326 |
self.op.file_driver, errors.ECODE_INVAL)
|
8327 |
|
8328 |
if self.op.disk_template == constants.DT_FILE: |
8329 |
opcodes.RequireFileStorage() |
8330 |
elif self.op.disk_template == constants.DT_SHARED_FILE: |
8331 |
opcodes.RequireSharedFileStorage() |
8332 |
|
8333 |
### Node/iallocator related checks
|
8334 |
_CheckIAllocatorOrNode(self, "iallocator", "pnode") |
8335 |
|
8336 |
if self.op.pnode is not None: |
8337 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
8338 |
if self.op.snode is None: |
8339 |
raise errors.OpPrereqError("The networked disk templates need" |
8340 |
" a mirror node", errors.ECODE_INVAL)
|
8341 |
elif self.op.snode: |
8342 |
self.LogWarning("Secondary node will be ignored on non-mirrored disk" |
8343 |
" template")
|
8344 |
self.op.snode = None |
8345 |
|
8346 |
self._cds = _GetClusterDomainSecret()
|
8347 |
|
8348 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8349 |
# On import force_variant must be True, because if we forced it at
|
8350 |
# initial install, our only chance when importing it back is that it
|
8351 |
# works again!
|
8352 |
self.op.force_variant = True |
8353 |
|
8354 |
if self.op.no_install: |
8355 |
self.LogInfo("No-installation mode has no effect during import") |
8356 |
|
8357 |
elif self.op.mode == constants.INSTANCE_CREATE: |
8358 |
if self.op.os_type is None: |
8359 |
raise errors.OpPrereqError("No guest OS specified", |
8360 |
errors.ECODE_INVAL) |
8361 |
if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: |
8362 |
raise errors.OpPrereqError("Guest OS '%s' is not allowed for" |
8363 |
" installation" % self.op.os_type, |
8364 |
errors.ECODE_STATE) |
8365 |
if self.op.disk_template is None: |
8366 |
raise errors.OpPrereqError("No disk template specified", |
8367 |
errors.ECODE_INVAL) |
8368 |
|
8369 |
elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: |
8370 |
# Check handshake to ensure both clusters have the same domain secret
|
8371 |
src_handshake = self.op.source_handshake
|
8372 |
if not src_handshake: |
8373 |
raise errors.OpPrereqError("Missing source handshake", |
8374 |
errors.ECODE_INVAL) |
8375 |
|
8376 |
errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
|
8377 |
src_handshake) |
8378 |
if errmsg:
|
8379 |
raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, |
8380 |
errors.ECODE_INVAL) |
8381 |
|
8382 |
# Load and check source CA
|
8383 |
self.source_x509_ca_pem = self.op.source_x509_ca |
8384 |
if not self.source_x509_ca_pem: |
8385 |
raise errors.OpPrereqError("Missing source X509 CA", |
8386 |
errors.ECODE_INVAL) |
8387 |
|
8388 |
try:
|
8389 |
(cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
|
8390 |
self._cds)
|
8391 |
except OpenSSL.crypto.Error, err:
|
8392 |
raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % |
8393 |
(err, ), errors.ECODE_INVAL) |
8394 |
|
8395 |
(errcode, msg) = utils.VerifyX509Certificate(cert, None, None) |
8396 |
if errcode is not None: |
8397 |
raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), |
8398 |
errors.ECODE_INVAL) |
8399 |
|
8400 |
self.source_x509_ca = cert
|
8401 |
|
8402 |
src_instance_name = self.op.source_instance_name
|
8403 |
if not src_instance_name: |
8404 |
raise errors.OpPrereqError("Missing source instance name", |
8405 |
errors.ECODE_INVAL) |
8406 |
|
8407 |
self.source_instance_name = \
|
8408 |
netutils.GetHostname(name=src_instance_name).name |
8409 |
|
8410 |
else:
|
8411 |
raise errors.OpPrereqError("Invalid instance creation mode %r" % |
8412 |
self.op.mode, errors.ECODE_INVAL)
|
8413 |
|
8414 |
def ExpandNames(self): |
8415 |
"""ExpandNames for CreateInstance.
|
8416 |
|
8417 |
Figure out the right locks for instance creation.
|
8418 |
|
8419 |
"""
|
8420 |
self.needed_locks = {}
|
8421 |
|
8422 |
instance_name = self.op.instance_name
|
8423 |
# this is just a preventive check, but someone might still add this
|
8424 |
# instance in the meantime, and creation will fail at lock-add time
|
8425 |
if instance_name in self.cfg.GetInstanceList(): |
8426 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
8427 |
instance_name, errors.ECODE_EXISTS) |
8428 |
|
8429 |
self.add_locks[locking.LEVEL_INSTANCE] = instance_name
|
8430 |
|
8431 |
if self.op.iallocator: |
8432 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
8433 |
else:
|
8434 |
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode) |
8435 |
nodelist = [self.op.pnode]
|
8436 |
if self.op.snode is not None: |
8437 |
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode) |
8438 |
nodelist.append(self.op.snode)
|
8439 |
self.needed_locks[locking.LEVEL_NODE] = nodelist
|
8440 |
|
8441 |
# in case of import lock the source node too
|
8442 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8443 |
src_node = self.op.src_node
|
8444 |
src_path = self.op.src_path
|
8445 |
|
8446 |
if src_path is None: |
8447 |
self.op.src_path = src_path = self.op.instance_name |
8448 |
|
8449 |
if src_node is None: |
8450 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
8451 |
self.op.src_node = None |
8452 |
if os.path.isabs(src_path):
|
8453 |
raise errors.OpPrereqError("Importing an instance from a path" |
8454 |
" requires a source node option",
|
8455 |
errors.ECODE_INVAL) |
8456 |
else:
|
8457 |
self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node) |
8458 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
8459 |
self.needed_locks[locking.LEVEL_NODE].append(src_node)
|
8460 |
if not os.path.isabs(src_path): |
8461 |
self.op.src_path = src_path = \
|
8462 |
utils.PathJoin(constants.EXPORT_DIR, src_path) |
8463 |
|
8464 |
def _RunAllocator(self): |
8465 |
"""Run the allocator based on input opcode.
|
8466 |
|
8467 |
"""
|
8468 |
nics = [n.ToDict() for n in self.nics] |
8469 |
ial = IAllocator(self.cfg, self.rpc, |
8470 |
mode=constants.IALLOCATOR_MODE_ALLOC, |
8471 |
name=self.op.instance_name,
|
8472 |
disk_template=self.op.disk_template,
|
8473 |
tags=self.op.tags,
|
8474 |
os=self.op.os_type,
|
8475 |
vcpus=self.be_full[constants.BE_VCPUS],
|
8476 |
memory=self.be_full[constants.BE_MEMORY],
|
8477 |
disks=self.disks,
|
8478 |
nics=nics, |
8479 |
hypervisor=self.op.hypervisor,
|
8480 |
) |
8481 |
|
8482 |
ial.Run(self.op.iallocator)
|
8483 |
|
8484 |
if not ial.success: |
8485 |
raise errors.OpPrereqError("Can't compute nodes using" |
8486 |
" iallocator '%s': %s" %
|
8487 |
(self.op.iallocator, ial.info),
|
8488 |
errors.ECODE_NORES) |
8489 |
if len(ial.result) != ial.required_nodes: |
8490 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
8491 |
" of nodes (%s), required %s" %
|
8492 |
(self.op.iallocator, len(ial.result), |
8493 |
ial.required_nodes), errors.ECODE_FAULT) |
8494 |
self.op.pnode = ial.result[0] |
8495 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
8496 |
self.op.instance_name, self.op.iallocator, |
8497 |
utils.CommaJoin(ial.result)) |
8498 |
if ial.required_nodes == 2: |
8499 |
self.op.snode = ial.result[1] |
8500 |
|
8501 |
def BuildHooksEnv(self): |
8502 |
"""Build hooks env.
|
8503 |
|
8504 |
This runs on master, primary and secondary nodes of the instance.
|
8505 |
|
8506 |
"""
|
8507 |
env = { |
8508 |
"ADD_MODE": self.op.mode, |
8509 |
} |
8510 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8511 |
env["SRC_NODE"] = self.op.src_node |
8512 |
env["SRC_PATH"] = self.op.src_path |
8513 |
env["SRC_IMAGES"] = self.src_images |
8514 |
|
8515 |
env.update(_BuildInstanceHookEnv( |
8516 |
name=self.op.instance_name,
|
8517 |
primary_node=self.op.pnode,
|
8518 |
secondary_nodes=self.secondaries,
|
8519 |
status=self.op.start,
|
8520 |
os_type=self.op.os_type,
|
8521 |
memory=self.be_full[constants.BE_MEMORY],
|
8522 |
vcpus=self.be_full[constants.BE_VCPUS],
|
8523 |
nics=_NICListToTuple(self, self.nics), |
8524 |
disk_template=self.op.disk_template,
|
8525 |
disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE]) |
8526 |
for d in self.disks], |
8527 |
bep=self.be_full,
|
8528 |
hvp=self.hv_full,
|
8529 |
hypervisor_name=self.op.hypervisor,
|
8530 |
tags=self.op.tags,
|
8531 |
)) |
8532 |
|
8533 |
return env
|
8534 |
|
8535 |
def BuildHooksNodes(self): |
8536 |
"""Build hooks nodes.
|
8537 |
|
8538 |
"""
|
8539 |
nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries |
8540 |
return nl, nl
|
8541 |
|
8542 |
def _ReadExportInfo(self): |
8543 |
"""Reads the export information from disk.
|
8544 |
|
8545 |
It will override the opcode source node and path with the actual
|
8546 |
information, if these two were not specified before.
|
8547 |
|
8548 |
@return: the export information
|
8549 |
|
8550 |
"""
|
8551 |
assert self.op.mode == constants.INSTANCE_IMPORT |
8552 |
|
8553 |
src_node = self.op.src_node
|
8554 |
src_path = self.op.src_path
|
8555 |
|
8556 |
if src_node is None: |
8557 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
|
8558 |
exp_list = self.rpc.call_export_list(locked_nodes)
|
8559 |
found = False
|
8560 |
for node in exp_list: |
8561 |
if exp_list[node].fail_msg:
|
8562 |
continue
|
8563 |
if src_path in exp_list[node].payload: |
8564 |
found = True
|
8565 |
self.op.src_node = src_node = node
|
8566 |
self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
|
8567 |
src_path) |
8568 |
break
|
8569 |
if not found: |
8570 |
raise errors.OpPrereqError("No export found for relative path %s" % |
8571 |
src_path, errors.ECODE_INVAL) |
8572 |
|
8573 |
_CheckNodeOnline(self, src_node)
|
8574 |
result = self.rpc.call_export_info(src_node, src_path)
|
8575 |
result.Raise("No export or invalid export found in dir %s" % src_path)
|
8576 |
|
8577 |
export_info = objects.SerializableConfigParser.Loads(str(result.payload))
|
8578 |
if not export_info.has_section(constants.INISECT_EXP): |
8579 |
raise errors.ProgrammerError("Corrupted export config", |
8580 |
errors.ECODE_ENVIRON) |
8581 |
|
8582 |
ei_version = export_info.get(constants.INISECT_EXP, "version")
|
8583 |
if (int(ei_version) != constants.EXPORT_VERSION): |
8584 |
raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % |
8585 |
(ei_version, constants.EXPORT_VERSION), |
8586 |
errors.ECODE_ENVIRON) |
8587 |
return export_info
|
8588 |
|
8589 |
def _ReadExportParams(self, einfo): |
8590 |
"""Use export parameters as defaults.
|
8591 |
|
8592 |
In case the opcode doesn't specify (as in override) some instance
|
8593 |
parameters, then try to use them from the export information, if
|
8594 |
that declares them.
|
8595 |
|
8596 |
"""
|
8597 |
self.op.os_type = einfo.get(constants.INISECT_EXP, "os") |
8598 |
|
8599 |
if self.op.disk_template is None: |
8600 |
if einfo.has_option(constants.INISECT_INS, "disk_template"): |
8601 |
self.op.disk_template = einfo.get(constants.INISECT_INS,
|
8602 |
"disk_template")
|
8603 |
if self.op.disk_template not in constants.DISK_TEMPLATES: |
8604 |
raise errors.OpPrereqError("Disk template specified in configuration" |
8605 |
" file is not one of the allowed values:"
|
8606 |
" %s" % " ".join(constants.DISK_TEMPLATES)) |
8607 |
else:
|
8608 |
raise errors.OpPrereqError("No disk template specified and the export" |
8609 |
" is missing the disk_template information",
|
8610 |
errors.ECODE_INVAL) |
8611 |
|
8612 |
if not self.op.disks: |
8613 |
disks = [] |
8614 |
# TODO: import the disk iv_name too
|
8615 |
for idx in range(constants.MAX_DISKS): |
8616 |
if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): |
8617 |
disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
|
8618 |
disks.append({constants.IDISK_SIZE: disk_sz}) |
8619 |
self.op.disks = disks
|
8620 |
if not disks and self.op.disk_template != constants.DT_DISKLESS: |
8621 |
raise errors.OpPrereqError("No disk info specified and the export" |
8622 |
" is missing the disk information",
|
8623 |
errors.ECODE_INVAL) |
8624 |
|
8625 |
if not self.op.nics: |
8626 |
nics = [] |
8627 |
for idx in range(constants.MAX_NICS): |
8628 |
if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): |
8629 |
ndict = {} |
8630 |
for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]: |
8631 |
v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
|
8632 |
ndict[name] = v |
8633 |
nics.append(ndict) |
8634 |
else:
|
8635 |
break
|
8636 |
self.op.nics = nics
|
8637 |
|
8638 |
if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): |
8639 |
self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() |
8640 |
|
8641 |
if (self.op.hypervisor is None and |
8642 |
einfo.has_option(constants.INISECT_INS, "hypervisor")):
|
8643 |
self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") |
8644 |
|
8645 |
if einfo.has_section(constants.INISECT_HYP):
|
8646 |
# use the export parameters but do not override the ones
|
8647 |
# specified by the user
|
8648 |
for name, value in einfo.items(constants.INISECT_HYP): |
8649 |
if name not in self.op.hvparams: |
8650 |
self.op.hvparams[name] = value
|
8651 |
|
8652 |
if einfo.has_section(constants.INISECT_BEP):
|
8653 |
# use the parameters, without overriding
|
8654 |
for name, value in einfo.items(constants.INISECT_BEP): |
8655 |
if name not in self.op.beparams: |
8656 |
self.op.beparams[name] = value
|
8657 |
else:
|
8658 |
# try to read the parameters old style, from the main section
|
8659 |
for name in constants.BES_PARAMETERS: |
8660 |
if (name not in self.op.beparams and |
8661 |
einfo.has_option(constants.INISECT_INS, name)): |
8662 |
self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
|
8663 |
|
8664 |
if einfo.has_section(constants.INISECT_OSP):
|
8665 |
# use the parameters, without overriding
|
8666 |
for name, value in einfo.items(constants.INISECT_OSP): |
8667 |
if name not in self.op.osparams: |
8668 |
self.op.osparams[name] = value
|
8669 |
|
8670 |
def _RevertToDefaults(self, cluster): |
8671 |
"""Revert the instance parameters to the default values.
|
8672 |
|
8673 |
"""
|
8674 |
# hvparams
|
8675 |
hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) |
8676 |
for name in self.op.hvparams.keys(): |
8677 |
if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: |
8678 |
del self.op.hvparams[name] |
8679 |
# beparams
|
8680 |
be_defs = cluster.SimpleFillBE({}) |
8681 |
for name in self.op.beparams.keys(): |
8682 |
if name in be_defs and be_defs[name] == self.op.beparams[name]: |
8683 |
del self.op.beparams[name] |
8684 |
# nic params
|
8685 |
nic_defs = cluster.SimpleFillNIC({}) |
8686 |
for nic in self.op.nics: |
8687 |
for name in constants.NICS_PARAMETERS: |
8688 |
if name in nic and name in nic_defs and nic[name] == nic_defs[name]: |
8689 |
del nic[name]
|
8690 |
# osparams
|
8691 |
os_defs = cluster.SimpleFillOS(self.op.os_type, {})
|
8692 |
for name in self.op.osparams.keys(): |
8693 |
if name in os_defs and os_defs[name] == self.op.osparams[name]: |
8694 |
del self.op.osparams[name] |
8695 |
|
8696 |
def _CalculateFileStorageDir(self): |
8697 |
"""Calculate final instance file storage dir.
|
8698 |
|
8699 |
"""
|
8700 |
# file storage dir calculation/check
|
8701 |
self.instance_file_storage_dir = None |
8702 |
if self.op.disk_template in constants.DTS_FILEBASED: |
8703 |
# build the full file storage dir path
|
8704 |
joinargs = [] |
8705 |
|
8706 |
if self.op.disk_template == constants.DT_SHARED_FILE: |
8707 |
get_fsd_fn = self.cfg.GetSharedFileStorageDir
|
8708 |
else:
|
8709 |
get_fsd_fn = self.cfg.GetFileStorageDir
|
8710 |
|
8711 |
cfg_storagedir = get_fsd_fn() |
8712 |
if not cfg_storagedir: |
8713 |
raise errors.OpPrereqError("Cluster file storage dir not defined") |
8714 |
joinargs.append(cfg_storagedir) |
8715 |
|
8716 |
if self.op.file_storage_dir is not None: |
8717 |
joinargs.append(self.op.file_storage_dir)
|
8718 |
|
8719 |
joinargs.append(self.op.instance_name)
|
8720 |
|
8721 |
# pylint: disable=W0142
|
8722 |
self.instance_file_storage_dir = utils.PathJoin(*joinargs)
|
8723 |
|
8724 |
def CheckPrereq(self): |
8725 |
"""Check prerequisites.
|
8726 |
|
8727 |
"""
|
8728 |
self._CalculateFileStorageDir()
|
8729 |
|
8730 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8731 |
export_info = self._ReadExportInfo()
|
8732 |
self._ReadExportParams(export_info)
|
8733 |
|
8734 |
if (not self.cfg.GetVGName() and |
8735 |
self.op.disk_template not in constants.DTS_NOT_LVM): |
8736 |
raise errors.OpPrereqError("Cluster does not support lvm-based" |
8737 |
" instances", errors.ECODE_STATE)
|
8738 |
|
8739 |
if (self.op.hypervisor is None or |
8740 |
self.op.hypervisor == constants.VALUE_AUTO):
|
8741 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
8742 |
|
8743 |
cluster = self.cfg.GetClusterInfo()
|
8744 |
enabled_hvs = cluster.enabled_hypervisors |
8745 |
if self.op.hypervisor not in enabled_hvs: |
8746 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
8747 |
" cluster (%s)" % (self.op.hypervisor, |
8748 |
",".join(enabled_hvs)),
|
8749 |
errors.ECODE_STATE) |
8750 |
|
8751 |
# Check tag validity
|
8752 |
for tag in self.op.tags: |
8753 |
objects.TaggableObject.ValidateTag(tag) |
8754 |
|
8755 |
# check hypervisor parameter syntax (locally)
|
8756 |
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
|
8757 |
filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, |
8758 |
self.op.hvparams)
|
8759 |
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
|
8760 |
hv_type.CheckParameterSyntax(filled_hvp) |
8761 |
self.hv_full = filled_hvp
|
8762 |
# check that we don't specify global parameters on an instance
|
8763 |
_CheckGlobalHvParams(self.op.hvparams)
|
8764 |
|
8765 |
# fill and remember the beparams dict
|
8766 |
default_beparams = cluster.beparams[constants.PP_DEFAULT] |
8767 |
for param, value in self.op.beparams.iteritems(): |
8768 |
if value == constants.VALUE_AUTO:
|
8769 |
self.op.beparams[param] = default_beparams[param]
|
8770 |
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
|
8771 |
self.be_full = cluster.SimpleFillBE(self.op.beparams) |
8772 |
|
8773 |
# build os parameters
|
8774 |
self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams) |
8775 |
|
8776 |
# now that hvp/bep are in final format, let's reset to defaults,
|
8777 |
# if told to do so
|
8778 |
if self.op.identify_defaults: |
8779 |
self._RevertToDefaults(cluster)
|
8780 |
|
8781 |
# NIC buildup
|
8782 |
self.nics = []
|
8783 |
for idx, nic in enumerate(self.op.nics): |
8784 |
nic_mode_req = nic.get(constants.INIC_MODE, None)
|
8785 |
nic_mode = nic_mode_req |
8786 |
if nic_mode is None or nic_mode == constants.VALUE_AUTO: |
8787 |
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] |
8788 |
|
8789 |
# in routed mode, for the first nic, the default ip is 'auto'
|
8790 |
if nic_mode == constants.NIC_MODE_ROUTED and idx == 0: |
8791 |
default_ip_mode = constants.VALUE_AUTO |
8792 |
else:
|
8793 |
default_ip_mode = constants.VALUE_NONE |
8794 |
|
8795 |
# ip validity checks
|
8796 |
ip = nic.get(constants.INIC_IP, default_ip_mode) |
8797 |
if ip is None or ip.lower() == constants.VALUE_NONE: |
8798 |
nic_ip = None
|
8799 |
elif ip.lower() == constants.VALUE_AUTO:
|
8800 |
if not self.op.name_check: |
8801 |
raise errors.OpPrereqError("IP address set to auto but name checks" |
8802 |
" have been skipped",
|
8803 |
errors.ECODE_INVAL) |
8804 |
nic_ip = self.hostname1.ip
|
8805 |
else:
|
8806 |
if not netutils.IPAddress.IsValid(ip): |
8807 |
raise errors.OpPrereqError("Invalid IP address '%s'" % ip, |
8808 |
errors.ECODE_INVAL) |
8809 |
nic_ip = ip |
8810 |
|
8811 |
# TODO: check the ip address for uniqueness
|
8812 |
if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: |
8813 |
raise errors.OpPrereqError("Routed nic mode requires an ip address", |
8814 |
errors.ECODE_INVAL) |
8815 |
|
8816 |
# MAC address verification
|
8817 |
mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) |
8818 |
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
8819 |
mac = utils.NormalizeAndValidateMac(mac) |
8820 |
|
8821 |
try:
|
8822 |
self.cfg.ReserveMAC(mac, self.proc.GetECId()) |
8823 |
except errors.ReservationError:
|
8824 |
raise errors.OpPrereqError("MAC address %s already in use" |
8825 |
" in cluster" % mac,
|
8826 |
errors.ECODE_NOTUNIQUE) |
8827 |
|
8828 |
# Build nic parameters
|
8829 |
link = nic.get(constants.INIC_LINK, None)
|
8830 |
if link == constants.VALUE_AUTO:
|
8831 |
link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] |
8832 |
nicparams = {} |
8833 |
if nic_mode_req:
|
8834 |
nicparams[constants.NIC_MODE] = nic_mode |
8835 |
if link:
|
8836 |
nicparams[constants.NIC_LINK] = link |
8837 |
|
8838 |
check_params = cluster.SimpleFillNIC(nicparams) |
8839 |
objects.NIC.CheckParameterSyntax(check_params) |
8840 |
self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
|
8841 |
|
8842 |
# disk checks/pre-build
|
8843 |
default_vg = self.cfg.GetVGName()
|
8844 |
self.disks = []
|
8845 |
for disk in self.op.disks: |
8846 |
mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR) |
8847 |
if mode not in constants.DISK_ACCESS_SET: |
8848 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % |
8849 |
mode, errors.ECODE_INVAL) |
8850 |
size = disk.get(constants.IDISK_SIZE, None)
|
8851 |
if size is None: |
8852 |
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL) |
8853 |
try:
|
8854 |
size = int(size)
|
8855 |
except (TypeError, ValueError): |
8856 |
raise errors.OpPrereqError("Invalid disk size '%s'" % size, |
8857 |
errors.ECODE_INVAL) |
8858 |
|
8859 |
data_vg = disk.get(constants.IDISK_VG, default_vg) |
8860 |
new_disk = { |
8861 |
constants.IDISK_SIZE: size, |
8862 |
constants.IDISK_MODE: mode, |
8863 |
constants.IDISK_VG: data_vg, |
8864 |
constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg), |
8865 |
} |
8866 |
if constants.IDISK_ADOPT in disk: |
8867 |
new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT] |
8868 |
self.disks.append(new_disk)
|
8869 |
|
8870 |
if self.op.mode == constants.INSTANCE_IMPORT: |
8871 |
disk_images = [] |
8872 |
for idx in range(len(self.disks)): |
8873 |
option = "disk%d_dump" % idx
|
8874 |
if export_info.has_option(constants.INISECT_INS, option):
|
8875 |
# FIXME: are the old os-es, disk sizes, etc. useful?
|
8876 |
export_name = export_info.get(constants.INISECT_INS, option) |
8877 |
image = utils.PathJoin(self.op.src_path, export_name)
|
8878 |
disk_images.append(image) |
8879 |
else:
|
8880 |
disk_images.append(False)
|
8881 |
|
8882 |
self.src_images = disk_images
|
8883 |
|
8884 |
old_name = export_info.get(constants.INISECT_INS, "name")
|
8885 |
if self.op.instance_name == old_name: |
8886 |
for idx, nic in enumerate(self.nics): |
8887 |
if nic.mac == constants.VALUE_AUTO:
|
8888 |
nic_mac_ini = "nic%d_mac" % idx
|
8889 |
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) |
8890 |
|
8891 |
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
|
8892 |
|
8893 |
# ip ping checks (we use the same ip that was resolved in ExpandNames)
|
8894 |
if self.op.ip_check: |
8895 |
if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): |
8896 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
8897 |
(self.check_ip, self.op.instance_name), |
8898 |
errors.ECODE_NOTUNIQUE) |
8899 |
|
8900 |
#### mac address generation
|
8901 |
# By generating here the mac address both the allocator and the hooks get
|
8902 |
# the real final mac address rather than the 'auto' or 'generate' value.
|
8903 |
# There is a race condition between the generation and the instance object
|
8904 |
# creation, which means that we know the mac is valid now, but we're not
|
8905 |
# sure it will be when we actually add the instance. If things go bad
|
8906 |
# adding the instance will abort because of a duplicate mac, and the
|
8907 |
# creation job will fail.
|
8908 |
for nic in self.nics: |
8909 |
if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
8910 |
nic.mac = self.cfg.GenerateMAC(self.proc.GetECId()) |
8911 |
|
8912 |
#### allocator run
|
8913 |
|
8914 |
if self.op.iallocator is not None: |
8915 |
self._RunAllocator()
|
8916 |
|
8917 |
#### node related checks
|
8918 |
|
8919 |
# check primary node
|
8920 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode) |
8921 |
assert self.pnode is not None, \ |
8922 |
"Cannot retrieve locked node %s" % self.op.pnode |
8923 |
if pnode.offline:
|
8924 |
raise errors.OpPrereqError("Cannot use offline primary node '%s'" % |
8925 |
pnode.name, errors.ECODE_STATE) |
8926 |
if pnode.drained:
|
8927 |
raise errors.OpPrereqError("Cannot use drained primary node '%s'" % |
8928 |
pnode.name, errors.ECODE_STATE) |
8929 |
if not pnode.vm_capable: |
8930 |
raise errors.OpPrereqError("Cannot use non-vm_capable primary node" |
8931 |
" '%s'" % pnode.name, errors.ECODE_STATE)
|
8932 |
|
8933 |
self.secondaries = []
|
8934 |
|
8935 |
# mirror node verification
|
8936 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
8937 |
if self.op.snode == pnode.name: |
8938 |
raise errors.OpPrereqError("The secondary node cannot be the" |
8939 |
" primary node", errors.ECODE_INVAL)
|
8940 |
_CheckNodeOnline(self, self.op.snode) |
8941 |
_CheckNodeNotDrained(self, self.op.snode) |
8942 |
_CheckNodeVmCapable(self, self.op.snode) |
8943 |
self.secondaries.append(self.op.snode) |
8944 |
|
8945 |
nodenames = [pnode.name] + self.secondaries
|
8946 |
|
8947 |
if not self.adopt_disks: |
8948 |
# Check lv size requirements, if not adopting
|
8949 |
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) |
8950 |
_CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
|
8951 |
|
8952 |
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data |
8953 |
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], |
8954 |
disk[constants.IDISK_ADOPT]) |
8955 |
for disk in self.disks]) |
8956 |
if len(all_lvs) != len(self.disks): |
8957 |
raise errors.OpPrereqError("Duplicate volume names given for adoption", |
8958 |
errors.ECODE_INVAL) |
8959 |
for lv_name in all_lvs: |
8960 |
try:
|
8961 |
# FIXME: lv_name here is "vg/lv" need to ensure that other calls
|
8962 |
# to ReserveLV uses the same syntax
|
8963 |
self.cfg.ReserveLV(lv_name, self.proc.GetECId()) |
8964 |
except errors.ReservationError:
|
8965 |
raise errors.OpPrereqError("LV named %s used by another instance" % |
8966 |
lv_name, errors.ECODE_NOTUNIQUE) |
8967 |
|
8968 |
vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
|
8969 |
vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
|
8970 |
|
8971 |
node_lvs = self.rpc.call_lv_list([pnode.name],
|
8972 |
vg_names.payload.keys())[pnode.name] |
8973 |
node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
|
8974 |
node_lvs = node_lvs.payload |
8975 |
|
8976 |
delta = all_lvs.difference(node_lvs.keys()) |
8977 |
if delta:
|
8978 |
raise errors.OpPrereqError("Missing logical volume(s): %s" % |
8979 |
utils.CommaJoin(delta), |
8980 |
errors.ECODE_INVAL) |
8981 |
online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] |
8982 |
if online_lvs:
|
8983 |
raise errors.OpPrereqError("Online logical volumes found, cannot" |
8984 |
" adopt: %s" % utils.CommaJoin(online_lvs),
|
8985 |
errors.ECODE_STATE) |
8986 |
# update the size of disk based on what is found
|
8987 |
for dsk in self.disks: |
8988 |
dsk[constants.IDISK_SIZE] = \ |
8989 |
int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], |
8990 |
dsk[constants.IDISK_ADOPT])][0]))
|
8991 |
|
8992 |
elif self.op.disk_template == constants.DT_BLOCK: |
8993 |
# Normalize and de-duplicate device paths
|
8994 |
all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
|
8995 |
for disk in self.disks]) |
8996 |
if len(all_disks) != len(self.disks): |
8997 |
raise errors.OpPrereqError("Duplicate disk names given for adoption", |
8998 |
errors.ECODE_INVAL) |
8999 |
baddisks = [d for d in all_disks |
9000 |
if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] |
9001 |
if baddisks:
|
9002 |
raise errors.OpPrereqError("Device node(s) %s lie outside %s and" |
9003 |
" cannot be adopted" %
|
9004 |
(", ".join(baddisks),
|
9005 |
constants.ADOPTABLE_BLOCKDEV_ROOT), |
9006 |
errors.ECODE_INVAL) |
9007 |
|
9008 |
node_disks = self.rpc.call_bdev_sizes([pnode.name],
|
9009 |
list(all_disks))[pnode.name]
|
9010 |
node_disks.Raise("Cannot get block device information from node %s" %
|
9011 |
pnode.name) |
9012 |
node_disks = node_disks.payload |
9013 |
delta = all_disks.difference(node_disks.keys()) |
9014 |
if delta:
|
9015 |
raise errors.OpPrereqError("Missing block device(s): %s" % |
9016 |
utils.CommaJoin(delta), |
9017 |
errors.ECODE_INVAL) |
9018 |
for dsk in self.disks: |
9019 |
dsk[constants.IDISK_SIZE] = \ |
9020 |
int(float(node_disks[dsk[constants.IDISK_ADOPT]])) |
9021 |
|
9022 |
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) |
9023 |
|
9024 |
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant) |
9025 |
# check OS parameters (remotely)
|
9026 |
_CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full) |
9027 |
|
9028 |
_CheckNicsBridgesExist(self, self.nics, self.pnode.name) |
9029 |
|
9030 |
# memory check on primary node
|
9031 |
if self.op.start: |
9032 |
_CheckNodeFreeMemory(self, self.pnode.name, |
9033 |
"creating instance %s" % self.op.instance_name, |
9034 |
self.be_full[constants.BE_MEMORY],
|
9035 |
self.op.hypervisor)
|
9036 |
|
9037 |
self.dry_run_result = list(nodenames) |
9038 |
|
9039 |
def Exec(self, feedback_fn): |
9040 |
"""Create and add the instance to the cluster.
|
9041 |
|
9042 |
"""
|
9043 |
instance = self.op.instance_name
|
9044 |
pnode_name = self.pnode.name
|
9045 |
|
9046 |
ht_kind = self.op.hypervisor
|
9047 |
if ht_kind in constants.HTS_REQ_PORT: |
9048 |
network_port = self.cfg.AllocatePort()
|
9049 |
else:
|
9050 |
network_port = None
|
9051 |
|
9052 |
disks = _GenerateDiskTemplate(self,
|
9053 |
self.op.disk_template,
|
9054 |
instance, pnode_name, |
9055 |
self.secondaries,
|
9056 |
self.disks,
|
9057 |
self.instance_file_storage_dir,
|
9058 |
self.op.file_driver,
|
9059 |
0,
|
9060 |
feedback_fn) |
9061 |
|
9062 |
iobj = objects.Instance(name=instance, os=self.op.os_type,
|
9063 |
primary_node=pnode_name, |
9064 |
nics=self.nics, disks=disks,
|
9065 |
disk_template=self.op.disk_template,
|
9066 |
admin_up=False,
|
9067 |
network_port=network_port, |
9068 |
beparams=self.op.beparams,
|
9069 |
hvparams=self.op.hvparams,
|
9070 |
hypervisor=self.op.hypervisor,
|
9071 |
osparams=self.op.osparams,
|
9072 |
) |
9073 |
|
9074 |
if self.op.tags: |
9075 |
for tag in self.op.tags: |
9076 |
iobj.AddTag(tag) |
9077 |
|
9078 |
if self.adopt_disks: |
9079 |
if self.op.disk_template == constants.DT_PLAIN: |
9080 |
# rename LVs to the newly-generated names; we need to construct
|
9081 |
# 'fake' LV disks with the old data, plus the new unique_id
|
9082 |
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] |
9083 |
rename_to = [] |
9084 |
for t_dsk, a_dsk in zip(tmp_disks, self.disks): |
9085 |
rename_to.append(t_dsk.logical_id) |
9086 |
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
|
9087 |
self.cfg.SetDiskID(t_dsk, pnode_name)
|
9088 |
result = self.rpc.call_blockdev_rename(pnode_name,
|
9089 |
zip(tmp_disks, rename_to))
|
9090 |
result.Raise("Failed to rename adoped LVs")
|
9091 |
else:
|
9092 |
feedback_fn("* creating instance disks...")
|
9093 |
try:
|
9094 |
_CreateDisks(self, iobj)
|
9095 |
except errors.OpExecError:
|
9096 |
self.LogWarning("Device creation failed, reverting...") |
9097 |
try:
|
9098 |
_RemoveDisks(self, iobj)
|
9099 |
finally:
|
9100 |
self.cfg.ReleaseDRBDMinors(instance)
|
9101 |
raise
|
9102 |
|
9103 |
feedback_fn("adding instance %s to cluster config" % instance)
|
9104 |
|
9105 |
self.cfg.AddInstance(iobj, self.proc.GetECId()) |
9106 |
|
9107 |
# Declare that we don't want to remove the instance lock anymore, as we've
|
9108 |
# added the instance to the config
|
9109 |
del self.remove_locks[locking.LEVEL_INSTANCE] |
9110 |
|
9111 |
if self.op.mode == constants.INSTANCE_IMPORT: |
9112 |
# Release unused nodes
|
9113 |
_ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node]) |
9114 |
else:
|
9115 |
# Release all nodes
|
9116 |
_ReleaseLocks(self, locking.LEVEL_NODE)
|
9117 |
|
9118 |
disk_abort = False
|
9119 |
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: |
9120 |
feedback_fn("* wiping instance disks...")
|
9121 |
try:
|
9122 |
_WipeDisks(self, iobj)
|
9123 |
except errors.OpExecError, err:
|
9124 |
logging.exception("Wiping disks failed")
|
9125 |
self.LogWarning("Wiping instance disks failed (%s)", err) |
9126 |
disk_abort = True
|
9127 |
|
9128 |
if disk_abort:
|
9129 |
# Something is already wrong with the disks, don't do anything else
|
9130 |
pass
|
9131 |
elif self.op.wait_for_sync: |
9132 |
disk_abort = not _WaitForSync(self, iobj) |
9133 |
elif iobj.disk_template in constants.DTS_INT_MIRROR: |
9134 |
# make sure the disks are not degraded (still sync-ing is ok)
|
9135 |
feedback_fn("* checking mirrors status")
|
9136 |
disk_abort = not _WaitForSync(self, iobj, oneshot=True) |
9137 |
else:
|
9138 |
disk_abort = False
|
9139 |
|
9140 |
if disk_abort:
|
9141 |
_RemoveDisks(self, iobj)
|
9142 |
self.cfg.RemoveInstance(iobj.name)
|
9143 |
# Make sure the instance lock gets removed
|
9144 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
|
9145 |
raise errors.OpExecError("There are some degraded disks for" |
9146 |
" this instance")
|
9147 |
|
9148 |
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: |
9149 |
if self.op.mode == constants.INSTANCE_CREATE: |
9150 |
if not self.op.no_install: |
9151 |
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and |
9152 |
not self.op.wait_for_sync) |
9153 |
if pause_sync:
|
9154 |
feedback_fn("* pausing disk sync to install instance OS")
|
9155 |
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
|
9156 |
iobj.disks, True)
|
9157 |
for idx, success in enumerate(result.payload): |
9158 |
if not success: |
9159 |
logging.warn("pause-sync of instance %s for disk %d failed",
|
9160 |
instance, idx) |
9161 |
|
9162 |
feedback_fn("* running the instance OS create scripts...")
|
9163 |
# FIXME: pass debug option from opcode to backend
|
9164 |
os_add_result = \ |
9165 |
self.rpc.call_instance_os_add(pnode_name, iobj, False, |
9166 |
self.op.debug_level)
|
9167 |
if pause_sync:
|
9168 |
feedback_fn("* resuming disk sync")
|
9169 |
result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
|
9170 |
iobj.disks, False)
|
9171 |
for idx, success in enumerate(result.payload): |
9172 |
if not success: |
9173 |
logging.warn("resume-sync of instance %s for disk %d failed",
|
9174 |
instance, idx) |
9175 |
|
9176 |
os_add_result.Raise("Could not add os for instance %s"
|
9177 |
" on node %s" % (instance, pnode_name))
|
9178 |
|
9179 |
elif self.op.mode == constants.INSTANCE_IMPORT: |
9180 |
feedback_fn("* running the instance OS import scripts...")
|
9181 |
|
9182 |
transfers = [] |
9183 |
|
9184 |
for idx, image in enumerate(self.src_images): |
9185 |
if not image: |
9186 |
continue
|
9187 |
|
9188 |
# FIXME: pass debug option from opcode to backend
|
9189 |
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
|
9190 |
constants.IEIO_FILE, (image, ), |
9191 |
constants.IEIO_SCRIPT, |
9192 |
(iobj.disks[idx], idx), |
9193 |
None)
|
9194 |
transfers.append(dt) |
9195 |
|
9196 |
import_result = \ |
9197 |
masterd.instance.TransferInstanceData(self, feedback_fn,
|
9198 |
self.op.src_node, pnode_name,
|
9199 |
self.pnode.secondary_ip,
|
9200 |
iobj, transfers) |
9201 |
if not compat.all(import_result): |
9202 |
self.LogWarning("Some disks for instance %s on node %s were not" |
9203 |
" imported successfully" % (instance, pnode_name))
|
9204 |
|
9205 |
elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: |
9206 |
feedback_fn("* preparing remote import...")
|
9207 |
# The source cluster will stop the instance before attempting to make a
|
9208 |
# connection. In some cases stopping an instance can take a long time,
|
9209 |
# hence the shutdown timeout is added to the connection timeout.
|
9210 |
connect_timeout = (constants.RIE_CONNECT_TIMEOUT + |
9211 |
self.op.source_shutdown_timeout)
|
9212 |
timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) |
9213 |
|
9214 |
assert iobj.primary_node == self.pnode.name |
9215 |
disk_results = \ |
9216 |
masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, |
9217 |
self.source_x509_ca,
|
9218 |
self._cds, timeouts)
|
9219 |
if not compat.all(disk_results): |
9220 |
# TODO: Should the instance still be started, even if some disks
|
9221 |
# failed to import (valid for local imports, too)?
|
9222 |
self.LogWarning("Some disks for instance %s on node %s were not" |
9223 |
" imported successfully" % (instance, pnode_name))
|
9224 |
|
9225 |
# Run rename script on newly imported instance
|
9226 |
assert iobj.name == instance
|
9227 |
feedback_fn("Running rename script for %s" % instance)
|
9228 |
result = self.rpc.call_instance_run_rename(pnode_name, iobj,
|
9229 |
self.source_instance_name,
|
9230 |
self.op.debug_level)
|
9231 |
if result.fail_msg:
|
9232 |
self.LogWarning("Failed to run rename script for %s on node" |
9233 |
" %s: %s" % (instance, pnode_name, result.fail_msg))
|
9234 |
|
9235 |
else:
|
9236 |
# also checked in the prereq part
|
9237 |
raise errors.ProgrammerError("Unknown OS initialization mode '%s'" |
9238 |
% self.op.mode)
|
9239 |
|
9240 |
if self.op.start: |
9241 |
iobj.admin_up = True
|
9242 |
self.cfg.Update(iobj, feedback_fn)
|
9243 |
logging.info("Starting instance %s on node %s", instance, pnode_name)
|
9244 |
feedback_fn("* starting instance...")
|
9245 |
result = self.rpc.call_instance_start(pnode_name, iobj,
|
9246 |
None, None, False) |
9247 |
result.Raise("Could not start instance")
|
9248 |
|
9249 |
return list(iobj.all_nodes) |
9250 |
|
9251 |
|
9252 |
class LUInstanceConsole(NoHooksLU): |
9253 |
"""Connect to an instance's console.
|
9254 |
|
9255 |
This is somewhat special in that it returns the command line that
|
9256 |
you need to run on the master node in order to connect to the
|
9257 |
console.
|
9258 |
|
9259 |
"""
|
9260 |
REQ_BGL = False
|
9261 |
|
9262 |
def ExpandNames(self): |
9263 |
self._ExpandAndLockInstance()
|
9264 |
|
9265 |
def CheckPrereq(self): |
9266 |
"""Check prerequisites.
|
9267 |
|
9268 |
This checks that the instance is in the cluster.
|
9269 |
|
9270 |
"""
|
9271 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
9272 |
assert self.instance is not None, \ |
9273 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
9274 |
_CheckNodeOnline(self, self.instance.primary_node) |
9275 |
|
9276 |
def Exec(self, feedback_fn): |
9277 |
"""Connect to the console of an instance
|
9278 |
|
9279 |
"""
|
9280 |
instance = self.instance
|
9281 |
node = instance.primary_node |
9282 |
|
9283 |
node_insts = self.rpc.call_instance_list([node],
|
9284 |
[instance.hypervisor])[node] |
9285 |
node_insts.Raise("Can't get node information from %s" % node)
|
9286 |
|
9287 |
if instance.name not in node_insts.payload: |
9288 |
if instance.admin_up:
|
9289 |
state = constants.INSTST_ERRORDOWN |
9290 |
else:
|
9291 |
state = constants.INSTST_ADMINDOWN |
9292 |
raise errors.OpExecError("Instance %s is not running (state %s)" % |
9293 |
(instance.name, state)) |
9294 |
|
9295 |
logging.debug("Connecting to console of %s on %s", instance.name, node)
|
9296 |
|
9297 |
return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance) |
9298 |
|
9299 |
|
9300 |
def _GetInstanceConsole(cluster, instance): |
9301 |
"""Returns console information for an instance.
|
9302 |
|
9303 |
@type cluster: L{objects.Cluster}
|
9304 |
@type instance: L{objects.Instance}
|
9305 |
@rtype: dict
|
9306 |
|
9307 |
"""
|
9308 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
9309 |
# beparams and hvparams are passed separately, to avoid editing the
|
9310 |
# instance and then saving the defaults in the instance itself.
|
9311 |
hvparams = cluster.FillHV(instance) |
9312 |
beparams = cluster.FillBE(instance) |
9313 |
console = hyper.GetInstanceConsole(instance, hvparams, beparams) |
9314 |
|
9315 |
assert console.instance == instance.name
|
9316 |
assert console.Validate()
|
9317 |
|
9318 |
return console.ToDict()
|
9319 |
|
9320 |
|
9321 |
class LUInstanceReplaceDisks(LogicalUnit): |
9322 |
"""Replace the disks of an instance.
|
9323 |
|
9324 |
"""
|
9325 |
HPATH = "mirrors-replace"
|
9326 |
HTYPE = constants.HTYPE_INSTANCE |
9327 |
REQ_BGL = False
|
9328 |
|
9329 |
def CheckArguments(self): |
9330 |
TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node, |
9331 |
self.op.iallocator)
|
9332 |
|
9333 |
def ExpandNames(self): |
9334 |
self._ExpandAndLockInstance()
|
9335 |
|
9336 |
assert locking.LEVEL_NODE not in self.needed_locks |
9337 |
assert locking.LEVEL_NODEGROUP not in self.needed_locks |
9338 |
|
9339 |
assert self.op.iallocator is None or self.op.remote_node is None, \ |
9340 |
"Conflicting options"
|
9341 |
|
9342 |
if self.op.remote_node is not None: |
9343 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
9344 |
|
9345 |
# Warning: do not remove the locking of the new secondary here
|
9346 |
# unless DRBD8.AddChildren is changed to work in parallel;
|
9347 |
# currently it doesn't since parallel invocations of
|
9348 |
# FindUnusedMinor will conflict
|
9349 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node] |
9350 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
9351 |
else:
|
9352 |
self.needed_locks[locking.LEVEL_NODE] = []
|
9353 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
9354 |
|
9355 |
if self.op.iallocator is not None: |
9356 |
# iallocator will select a new node in the same group
|
9357 |
self.needed_locks[locking.LEVEL_NODEGROUP] = []
|
9358 |
|
9359 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
9360 |
self.op.iallocator, self.op.remote_node, |
9361 |
self.op.disks, False, self.op.early_release) |
9362 |
|
9363 |
self.tasklets = [self.replacer] |
9364 |
|
9365 |
def DeclareLocks(self, level): |
9366 |
if level == locking.LEVEL_NODEGROUP:
|
9367 |
assert self.op.remote_node is None |
9368 |
assert self.op.iallocator is not None |
9369 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
9370 |
|
9371 |
self.share_locks[locking.LEVEL_NODEGROUP] = 1 |
9372 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
9373 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
9374 |
|
9375 |
elif level == locking.LEVEL_NODE:
|
9376 |
if self.op.iallocator is not None: |
9377 |
assert self.op.remote_node is None |
9378 |
assert not self.needed_locks[locking.LEVEL_NODE] |
9379 |
|
9380 |
# Lock member nodes of all locked groups
|
9381 |
self.needed_locks[locking.LEVEL_NODE] = [node_name
|
9382 |
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
9383 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
9384 |
else:
|
9385 |
self._LockInstancesNodes()
|
9386 |
|
9387 |
def BuildHooksEnv(self): |
9388 |
"""Build hooks env.
|
9389 |
|
9390 |
This runs on the master, the primary and all the secondaries.
|
9391 |
|
9392 |
"""
|
9393 |
instance = self.replacer.instance
|
9394 |
env = { |
9395 |
"MODE": self.op.mode, |
9396 |
"NEW_SECONDARY": self.op.remote_node, |
9397 |
"OLD_SECONDARY": instance.secondary_nodes[0], |
9398 |
} |
9399 |
env.update(_BuildInstanceHookEnvByObject(self, instance))
|
9400 |
return env
|
9401 |
|
9402 |
def BuildHooksNodes(self): |
9403 |
"""Build hooks nodes.
|
9404 |
|
9405 |
"""
|
9406 |
instance = self.replacer.instance
|
9407 |
nl = [ |
9408 |
self.cfg.GetMasterNode(),
|
9409 |
instance.primary_node, |
9410 |
] |
9411 |
if self.op.remote_node is not None: |
9412 |
nl.append(self.op.remote_node)
|
9413 |
return nl, nl
|
9414 |
|
9415 |
def CheckPrereq(self): |
9416 |
"""Check prerequisites.
|
9417 |
|
9418 |
"""
|
9419 |
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or |
9420 |
self.op.iallocator is None) |
9421 |
|
9422 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
|
9423 |
if owned_groups:
|
9424 |
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) |
9425 |
|
9426 |
return LogicalUnit.CheckPrereq(self) |
9427 |
|
9428 |
|
9429 |
class TLReplaceDisks(Tasklet): |
9430 |
"""Replaces disks for an instance.
|
9431 |
|
9432 |
Note: Locking is not within the scope of this class.
|
9433 |
|
9434 |
"""
|
9435 |
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node, |
9436 |
disks, delay_iallocator, early_release): |
9437 |
"""Initializes this class.
|
9438 |
|
9439 |
"""
|
9440 |
Tasklet.__init__(self, lu)
|
9441 |
|
9442 |
# Parameters
|
9443 |
self.instance_name = instance_name
|
9444 |
self.mode = mode
|
9445 |
self.iallocator_name = iallocator_name
|
9446 |
self.remote_node = remote_node
|
9447 |
self.disks = disks
|
9448 |
self.delay_iallocator = delay_iallocator
|
9449 |
self.early_release = early_release
|
9450 |
|
9451 |
# Runtime data
|
9452 |
self.instance = None |
9453 |
self.new_node = None |
9454 |
self.target_node = None |
9455 |
self.other_node = None |
9456 |
self.remote_node_info = None |
9457 |
self.node_secondary_ip = None |
9458 |
|
9459 |
@staticmethod
|
9460 |
def CheckArguments(mode, remote_node, iallocator): |
9461 |
"""Helper function for users of this class.
|
9462 |
|
9463 |
"""
|
9464 |
# check for valid parameter combination
|
9465 |
if mode == constants.REPLACE_DISK_CHG:
|
9466 |
if remote_node is None and iallocator is None: |
9467 |
raise errors.OpPrereqError("When changing the secondary either an" |
9468 |
" iallocator script must be used or the"
|
9469 |
" new node given", errors.ECODE_INVAL)
|
9470 |
|
9471 |
if remote_node is not None and iallocator is not None: |
9472 |
raise errors.OpPrereqError("Give either the iallocator or the new" |
9473 |
" secondary, not both", errors.ECODE_INVAL)
|
9474 |
|
9475 |
elif remote_node is not None or iallocator is not None: |
9476 |
# Not replacing the secondary
|
9477 |
raise errors.OpPrereqError("The iallocator and new node options can" |
9478 |
" only be used when changing the"
|
9479 |
" secondary node", errors.ECODE_INVAL)
|
9480 |
|
9481 |
@staticmethod
|
9482 |
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): |
9483 |
"""Compute a new secondary node using an IAllocator.
|
9484 |
|
9485 |
"""
|
9486 |
ial = IAllocator(lu.cfg, lu.rpc, |
9487 |
mode=constants.IALLOCATOR_MODE_RELOC, |
9488 |
name=instance_name, |
9489 |
relocate_from=list(relocate_from))
|
9490 |
|
9491 |
ial.Run(iallocator_name) |
9492 |
|
9493 |
if not ial.success: |
9494 |
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':" |
9495 |
" %s" % (iallocator_name, ial.info),
|
9496 |
errors.ECODE_NORES) |
9497 |
|
9498 |
if len(ial.result) != ial.required_nodes: |
9499 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
9500 |
" of nodes (%s), required %s" %
|
9501 |
(iallocator_name, |
9502 |
len(ial.result), ial.required_nodes),
|
9503 |
errors.ECODE_FAULT) |
9504 |
|
9505 |
remote_node_name = ial.result[0]
|
9506 |
|
9507 |
lu.LogInfo("Selected new secondary for instance '%s': %s",
|
9508 |
instance_name, remote_node_name) |
9509 |
|
9510 |
return remote_node_name
|
9511 |
|
9512 |
def _FindFaultyDisks(self, node_name): |
9513 |
"""Wrapper for L{_FindFaultyInstanceDisks}.
|
9514 |
|
9515 |
"""
|
9516 |
return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance, |
9517 |
node_name, True)
|
9518 |
|
9519 |
def _CheckDisksActivated(self, instance): |
9520 |
"""Checks if the instance disks are activated.
|
9521 |
|
9522 |
@param instance: The instance to check disks
|
9523 |
@return: True if they are activated, False otherwise
|
9524 |
|
9525 |
"""
|
9526 |
nodes = instance.all_nodes |
9527 |
|
9528 |
for idx, dev in enumerate(instance.disks): |
9529 |
for node in nodes: |
9530 |
self.lu.LogInfo("Checking disk/%d on %s", idx, node) |
9531 |
self.cfg.SetDiskID(dev, node)
|
9532 |
|
9533 |
result = self.rpc.call_blockdev_find(node, dev)
|
9534 |
|
9535 |
if result.offline:
|
9536 |
continue
|
9537 |
elif result.fail_msg or not result.payload: |
9538 |
return False |
9539 |
|
9540 |
return True |
9541 |
|
9542 |
def CheckPrereq(self): |
9543 |
"""Check prerequisites.
|
9544 |
|
9545 |
This checks that the instance is in the cluster.
|
9546 |
|
9547 |
"""
|
9548 |
self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name) |
9549 |
assert instance is not None, \ |
9550 |
"Cannot retrieve locked instance %s" % self.instance_name |
9551 |
|
9552 |
if instance.disk_template != constants.DT_DRBD8:
|
9553 |
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" |
9554 |
" instances", errors.ECODE_INVAL)
|
9555 |
|
9556 |
if len(instance.secondary_nodes) != 1: |
9557 |
raise errors.OpPrereqError("The instance has a strange layout," |
9558 |
" expected one secondary but found %d" %
|
9559 |
len(instance.secondary_nodes),
|
9560 |
errors.ECODE_FAULT) |
9561 |
|
9562 |
if not self.delay_iallocator: |
9563 |
self._CheckPrereq2()
|
9564 |
|
9565 |
def _CheckPrereq2(self): |
9566 |
"""Check prerequisites, second part.
|
9567 |
|
9568 |
This function should always be part of CheckPrereq. It was separated and is
|
9569 |
now called from Exec because during node evacuation iallocator was only
|
9570 |
called with an unmodified cluster model, not taking planned changes into
|
9571 |
account.
|
9572 |
|
9573 |
"""
|
9574 |
instance = self.instance
|
9575 |
secondary_node = instance.secondary_nodes[0]
|
9576 |
|
9577 |
if self.iallocator_name is None: |
9578 |
remote_node = self.remote_node
|
9579 |
else:
|
9580 |
remote_node = self._RunAllocator(self.lu, self.iallocator_name, |
9581 |
instance.name, instance.secondary_nodes) |
9582 |
|
9583 |
if remote_node is None: |
9584 |
self.remote_node_info = None |
9585 |
else:
|
9586 |
assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \ |
9587 |
"Remote node '%s' is not locked" % remote_node
|
9588 |
|
9589 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
9590 |
assert self.remote_node_info is not None, \ |
9591 |
"Cannot retrieve locked node %s" % remote_node
|
9592 |
|
9593 |
if remote_node == self.instance.primary_node: |
9594 |
raise errors.OpPrereqError("The specified node is the primary node of" |
9595 |
" the instance", errors.ECODE_INVAL)
|
9596 |
|
9597 |
if remote_node == secondary_node:
|
9598 |
raise errors.OpPrereqError("The specified node is already the" |
9599 |
" secondary node of the instance",
|
9600 |
errors.ECODE_INVAL) |
9601 |
|
9602 |
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO, |
9603 |
constants.REPLACE_DISK_CHG): |
9604 |
raise errors.OpPrereqError("Cannot specify disks to be replaced", |
9605 |
errors.ECODE_INVAL) |
9606 |
|
9607 |
if self.mode == constants.REPLACE_DISK_AUTO: |
9608 |
if not self._CheckDisksActivated(instance): |
9609 |
raise errors.OpPrereqError("Please run activate-disks on instance %s" |
9610 |
" first" % self.instance_name, |
9611 |
errors.ECODE_STATE) |
9612 |
faulty_primary = self._FindFaultyDisks(instance.primary_node)
|
9613 |
faulty_secondary = self._FindFaultyDisks(secondary_node)
|
9614 |
|
9615 |
if faulty_primary and faulty_secondary: |
9616 |
raise errors.OpPrereqError("Instance %s has faulty disks on more than" |
9617 |
" one node and can not be repaired"
|
9618 |
" automatically" % self.instance_name, |
9619 |
errors.ECODE_STATE) |
9620 |
|
9621 |
if faulty_primary:
|
9622 |
self.disks = faulty_primary
|
9623 |
self.target_node = instance.primary_node
|
9624 |
self.other_node = secondary_node
|
9625 |
check_nodes = [self.target_node, self.other_node] |
9626 |
elif faulty_secondary:
|
9627 |
self.disks = faulty_secondary
|
9628 |
self.target_node = secondary_node
|
9629 |
self.other_node = instance.primary_node
|
9630 |
check_nodes = [self.target_node, self.other_node] |
9631 |
else:
|
9632 |
self.disks = []
|
9633 |
check_nodes = [] |
9634 |
|
9635 |
else:
|
9636 |
# Non-automatic modes
|
9637 |
if self.mode == constants.REPLACE_DISK_PRI: |
9638 |
self.target_node = instance.primary_node
|
9639 |
self.other_node = secondary_node
|
9640 |
check_nodes = [self.target_node, self.other_node] |
9641 |
|
9642 |
elif self.mode == constants.REPLACE_DISK_SEC: |
9643 |
self.target_node = secondary_node
|
9644 |
self.other_node = instance.primary_node
|
9645 |
check_nodes = [self.target_node, self.other_node] |
9646 |
|
9647 |
elif self.mode == constants.REPLACE_DISK_CHG: |
9648 |
self.new_node = remote_node
|
9649 |
self.other_node = instance.primary_node
|
9650 |
self.target_node = secondary_node
|
9651 |
check_nodes = [self.new_node, self.other_node] |
9652 |
|
9653 |
_CheckNodeNotDrained(self.lu, remote_node)
|
9654 |
_CheckNodeVmCapable(self.lu, remote_node)
|
9655 |
|
9656 |
old_node_info = self.cfg.GetNodeInfo(secondary_node)
|
9657 |
assert old_node_info is not None |
9658 |
if old_node_info.offline and not self.early_release: |
9659 |
# doesn't make sense to delay the release
|
9660 |
self.early_release = True |
9661 |
self.lu.LogInfo("Old secondary %s is offline, automatically enabling" |
9662 |
" early-release mode", secondary_node)
|
9663 |
|
9664 |
else:
|
9665 |
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" % |
9666 |
self.mode)
|
9667 |
|
9668 |
# If not specified all disks should be replaced
|
9669 |
if not self.disks: |
9670 |
self.disks = range(len(self.instance.disks)) |
9671 |
|
9672 |
for node in check_nodes: |
9673 |
_CheckNodeOnline(self.lu, node)
|
9674 |
|
9675 |
touched_nodes = frozenset(node_name for node_name in [self.new_node, |
9676 |
self.other_node,
|
9677 |
self.target_node]
|
9678 |
if node_name is not None) |
9679 |
|
9680 |
# Release unneeded node locks
|
9681 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
|
9682 |
|
9683 |
# Release any owned node group
|
9684 |
if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP): |
9685 |
_ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
|
9686 |
|
9687 |
# Check whether disks are valid
|
9688 |
for disk_idx in self.disks: |
9689 |
instance.FindDisk(disk_idx) |
9690 |
|
9691 |
# Get secondary node IP addresses
|
9692 |
self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node) |
9693 |
in self.cfg.GetMultiNodeInfo(touched_nodes)) |
9694 |
|
9695 |
def Exec(self, feedback_fn): |
9696 |
"""Execute disk replacement.
|
9697 |
|
9698 |
This dispatches the disk replacement to the appropriate handler.
|
9699 |
|
9700 |
"""
|
9701 |
if self.delay_iallocator: |
9702 |
self._CheckPrereq2()
|
9703 |
|
9704 |
if __debug__:
|
9705 |
# Verify owned locks before starting operation
|
9706 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
|
9707 |
assert set(owned_nodes) == set(self.node_secondary_ip), \ |
9708 |
("Incorrect node locks, owning %s, expected %s" %
|
9709 |
(owned_nodes, self.node_secondary_ip.keys()))
|
9710 |
|
9711 |
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
|
9712 |
assert list(owned_instances) == [self.instance_name], \ |
9713 |
"Instance '%s' not locked" % self.instance_name |
9714 |
|
9715 |
assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \ |
9716 |
"Should not own any node group lock at this point"
|
9717 |
|
9718 |
if not self.disks: |
9719 |
feedback_fn("No disks need replacement")
|
9720 |
return
|
9721 |
|
9722 |
feedback_fn("Replacing disk(s) %s for %s" %
|
9723 |
(utils.CommaJoin(self.disks), self.instance.name)) |
9724 |
|
9725 |
activate_disks = (not self.instance.admin_up) |
9726 |
|
9727 |
# Activate the instance disks if we're replacing them on a down instance
|
9728 |
if activate_disks:
|
9729 |
_StartInstanceDisks(self.lu, self.instance, True) |
9730 |
|
9731 |
try:
|
9732 |
# Should we replace the secondary node?
|
9733 |
if self.new_node is not None: |
9734 |
fn = self._ExecDrbd8Secondary
|
9735 |
else:
|
9736 |
fn = self._ExecDrbd8DiskOnly
|
9737 |
|
9738 |
result = fn(feedback_fn) |
9739 |
finally:
|
9740 |
# Deactivate the instance disks if we're replacing them on a
|
9741 |
# down instance
|
9742 |
if activate_disks:
|
9743 |
_SafeShutdownInstanceDisks(self.lu, self.instance) |
9744 |
|
9745 |
if __debug__:
|
9746 |
# Verify owned locks
|
9747 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
|
9748 |
nodes = frozenset(self.node_secondary_ip) |
9749 |
assert ((self.early_release and not owned_nodes) or |
9750 |
(not self.early_release and not (set(owned_nodes) - nodes))), \ |
9751 |
("Not owning the correct locks, early_release=%s, owned=%r,"
|
9752 |
" nodes=%r" % (self.early_release, owned_nodes, nodes)) |
9753 |
|
9754 |
return result
|
9755 |
|
9756 |
def _CheckVolumeGroup(self, nodes): |
9757 |
self.lu.LogInfo("Checking volume groups") |
9758 |
|
9759 |
vgname = self.cfg.GetVGName()
|
9760 |
|
9761 |
# Make sure volume group exists on all involved nodes
|
9762 |
results = self.rpc.call_vg_list(nodes)
|
9763 |
if not results: |
9764 |
raise errors.OpExecError("Can't list volume groups on the nodes") |
9765 |
|
9766 |
for node in nodes: |
9767 |
res = results[node] |
9768 |
res.Raise("Error checking node %s" % node)
|
9769 |
if vgname not in res.payload: |
9770 |
raise errors.OpExecError("Volume group '%s' not found on node %s" % |
9771 |
(vgname, node)) |
9772 |
|
9773 |
def _CheckDisksExistence(self, nodes): |
9774 |
# Check disk existence
|
9775 |
for idx, dev in enumerate(self.instance.disks): |
9776 |
if idx not in self.disks: |
9777 |
continue
|
9778 |
|
9779 |
for node in nodes: |
9780 |
self.lu.LogInfo("Checking disk/%d on %s" % (idx, node)) |
9781 |
self.cfg.SetDiskID(dev, node)
|
9782 |
|
9783 |
result = self.rpc.call_blockdev_find(node, dev)
|
9784 |
|
9785 |
msg = result.fail_msg |
9786 |
if msg or not result.payload: |
9787 |
if not msg: |
9788 |
msg = "disk not found"
|
9789 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
9790 |
(idx, node, msg)) |
9791 |
|
9792 |
def _CheckDisksConsistency(self, node_name, on_primary, ldisk): |
9793 |
for idx, dev in enumerate(self.instance.disks): |
9794 |
if idx not in self.disks: |
9795 |
continue
|
9796 |
|
9797 |
self.lu.LogInfo("Checking disk/%d consistency on node %s" % |
9798 |
(idx, node_name)) |
9799 |
|
9800 |
if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary, |
9801 |
ldisk=ldisk): |
9802 |
raise errors.OpExecError("Node %s has degraded storage, unsafe to" |
9803 |
" replace disks for instance %s" %
|
9804 |
(node_name, self.instance.name))
|
9805 |
|
9806 |
def _CreateNewStorage(self, node_name): |
9807 |
"""Create new storage on the primary or secondary node.
|
9808 |
|
9809 |
This is only used for same-node replaces, not for changing the
|
9810 |
secondary node, hence we don't want to modify the existing disk.
|
9811 |
|
9812 |
"""
|
9813 |
iv_names = {} |
9814 |
|
9815 |
for idx, dev in enumerate(self.instance.disks): |
9816 |
if idx not in self.disks: |
9817 |
continue
|
9818 |
|
9819 |
self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx)) |
9820 |
|
9821 |
self.cfg.SetDiskID(dev, node_name)
|
9822 |
|
9823 |
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]] |
9824 |
names = _GenerateUniqueNames(self.lu, lv_names)
|
9825 |
|
9826 |
vg_data = dev.children[0].logical_id[0] |
9827 |
lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size, |
9828 |
logical_id=(vg_data, names[0]))
|
9829 |
vg_meta = dev.children[1].logical_id[0] |
9830 |
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
9831 |
logical_id=(vg_meta, names[1]))
|
9832 |
|
9833 |
new_lvs = [lv_data, lv_meta] |
9834 |
old_lvs = [child.Copy() for child in dev.children] |
9835 |
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) |
9836 |
|
9837 |
# we pass force_create=True to force the LVM creation
|
9838 |
for new_lv in new_lvs: |
9839 |
_CreateBlockDev(self.lu, node_name, self.instance, new_lv, True, |
9840 |
_GetInstanceInfoText(self.instance), False) |
9841 |
|
9842 |
return iv_names
|
9843 |
|
9844 |
def _CheckDevices(self, node_name, iv_names): |
9845 |
for name, (dev, _, _) in iv_names.iteritems(): |
9846 |
self.cfg.SetDiskID(dev, node_name)
|
9847 |
|
9848 |
result = self.rpc.call_blockdev_find(node_name, dev)
|
9849 |
|
9850 |
msg = result.fail_msg |
9851 |
if msg or not result.payload: |
9852 |
if not msg: |
9853 |
msg = "disk not found"
|
9854 |
raise errors.OpExecError("Can't find DRBD device %s: %s" % |
9855 |
(name, msg)) |
9856 |
|
9857 |
if result.payload.is_degraded:
|
9858 |
raise errors.OpExecError("DRBD device %s is degraded!" % name) |
9859 |
|
9860 |
def _RemoveOldStorage(self, node_name, iv_names): |
9861 |
for name, (_, old_lvs, _) in iv_names.iteritems(): |
9862 |
self.lu.LogInfo("Remove logical volumes for %s" % name) |
9863 |
|
9864 |
for lv in old_lvs: |
9865 |
self.cfg.SetDiskID(lv, node_name)
|
9866 |
|
9867 |
msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
|
9868 |
if msg:
|
9869 |
self.lu.LogWarning("Can't remove old LV: %s" % msg, |
9870 |
hint="remove unused LVs manually")
|
9871 |
|
9872 |
def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613 |
9873 |
"""Replace a disk on the primary or secondary for DRBD 8.
|
9874 |
|
9875 |
The algorithm for replace is quite complicated:
|
9876 |
|
9877 |
1. for each disk to be replaced:
|
9878 |
|
9879 |
1. create new LVs on the target node with unique names
|
9880 |
1. detach old LVs from the drbd device
|
9881 |
1. rename old LVs to name_replaced.<time_t>
|
9882 |
1. rename new LVs to old LVs
|
9883 |
1. attach the new LVs (with the old names now) to the drbd device
|
9884 |
|
9885 |
1. wait for sync across all devices
|
9886 |
|
9887 |
1. for each modified disk:
|
9888 |
|
9889 |
1. remove old LVs (which have the name name_replaces.<time_t>)
|
9890 |
|
9891 |
Failures are not very well handled.
|
9892 |
|
9893 |
"""
|
9894 |
steps_total = 6
|
9895 |
|
9896 |
# Step: check device activation
|
9897 |
self.lu.LogStep(1, steps_total, "Check device existence") |
9898 |
self._CheckDisksExistence([self.other_node, self.target_node]) |
9899 |
self._CheckVolumeGroup([self.target_node, self.other_node]) |
9900 |
|
9901 |
# Step: check other node consistency
|
9902 |
self.lu.LogStep(2, steps_total, "Check peer consistency") |
9903 |
self._CheckDisksConsistency(self.other_node, |
9904 |
self.other_node == self.instance.primary_node, |
9905 |
False)
|
9906 |
|
9907 |
# Step: create new storage
|
9908 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
9909 |
iv_names = self._CreateNewStorage(self.target_node) |
9910 |
|
9911 |
# Step: for each lv, detach+rename*2+attach
|
9912 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
9913 |
for dev, old_lvs, new_lvs in iv_names.itervalues(): |
9914 |
self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name) |
9915 |
|
9916 |
result = self.rpc.call_blockdev_removechildren(self.target_node, dev, |
9917 |
old_lvs) |
9918 |
result.Raise("Can't detach drbd from local storage on node"
|
9919 |
" %s for device %s" % (self.target_node, dev.iv_name)) |
9920 |
#dev.children = []
|
9921 |
#cfg.Update(instance)
|
9922 |
|
9923 |
# ok, we created the new LVs, so now we know we have the needed
|
9924 |
# storage; as such, we proceed on the target node to rename
|
9925 |
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
|
9926 |
# using the assumption that logical_id == physical_id (which in
|
9927 |
# turn is the unique_id on that node)
|
9928 |
|
9929 |
# FIXME(iustin): use a better name for the replaced LVs
|
9930 |
temp_suffix = int(time.time())
|
9931 |
ren_fn = lambda d, suff: (d.physical_id[0], |
9932 |
d.physical_id[1] + "_replaced-%s" % suff) |
9933 |
|
9934 |
# Build the rename list based on what LVs exist on the node
|
9935 |
rename_old_to_new = [] |
9936 |
for to_ren in old_lvs: |
9937 |
result = self.rpc.call_blockdev_find(self.target_node, to_ren) |
9938 |
if not result.fail_msg and result.payload: |
9939 |
# device exists
|
9940 |
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix))) |
9941 |
|
9942 |
self.lu.LogInfo("Renaming the old LVs on the target node") |
9943 |
result = self.rpc.call_blockdev_rename(self.target_node, |
9944 |
rename_old_to_new) |
9945 |
result.Raise("Can't rename old LVs on node %s" % self.target_node) |
9946 |
|
9947 |
# Now we rename the new LVs to the old LVs
|
9948 |
self.lu.LogInfo("Renaming the new LVs on the target node") |
9949 |
rename_new_to_old = [(new, old.physical_id) |
9950 |
for old, new in zip(old_lvs, new_lvs)] |
9951 |
result = self.rpc.call_blockdev_rename(self.target_node, |
9952 |
rename_new_to_old) |
9953 |
result.Raise("Can't rename new LVs on node %s" % self.target_node) |
9954 |
|
9955 |
# Intermediate steps of in memory modifications
|
9956 |
for old, new in zip(old_lvs, new_lvs): |
9957 |
new.logical_id = old.logical_id |
9958 |
self.cfg.SetDiskID(new, self.target_node) |
9959 |
|
9960 |
# We need to modify old_lvs so that removal later removes the
|
9961 |
# right LVs, not the newly added ones; note that old_lvs is a
|
9962 |
# copy here
|
9963 |
for disk in old_lvs: |
9964 |
disk.logical_id = ren_fn(disk, temp_suffix) |
9965 |
self.cfg.SetDiskID(disk, self.target_node) |
9966 |
|
9967 |
# Now that the new lvs have the old name, we can add them to the device
|
9968 |
self.lu.LogInfo("Adding new mirror component on %s" % self.target_node) |
9969 |
result = self.rpc.call_blockdev_addchildren(self.target_node, dev, |
9970 |
new_lvs) |
9971 |
msg = result.fail_msg |
9972 |
if msg:
|
9973 |
for new_lv in new_lvs: |
9974 |
msg2 = self.rpc.call_blockdev_remove(self.target_node, |
9975 |
new_lv).fail_msg |
9976 |
if msg2:
|
9977 |
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2, |
9978 |
hint=("cleanup manually the unused logical"
|
9979 |
"volumes"))
|
9980 |
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg) |
9981 |
|
9982 |
cstep = 5
|
9983 |
if self.early_release: |
9984 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
9985 |
cstep += 1
|
9986 |
self._RemoveOldStorage(self.target_node, iv_names) |
9987 |
# WARNING: we release both node locks here, do not do other RPCs
|
9988 |
# than WaitForSync to the primary node
|
9989 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE,
|
9990 |
names=[self.target_node, self.other_node]) |
9991 |
|
9992 |
# Wait for sync
|
9993 |
# This can fail as the old devices are degraded and _WaitForSync
|
9994 |
# does a combined result over all disks, so we don't check its return value
|
9995 |
self.lu.LogStep(cstep, steps_total, "Sync devices") |
9996 |
cstep += 1
|
9997 |
_WaitForSync(self.lu, self.instance) |
9998 |
|
9999 |
# Check all devices manually
|
10000 |
self._CheckDevices(self.instance.primary_node, iv_names) |
10001 |
|
10002 |
# Step: remove old storage
|
10003 |
if not self.early_release: |
10004 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
10005 |
cstep += 1
|
10006 |
self._RemoveOldStorage(self.target_node, iv_names) |
10007 |
|
10008 |
def _ExecDrbd8Secondary(self, feedback_fn): |
10009 |
"""Replace the secondary node for DRBD 8.
|
10010 |
|
10011 |
The algorithm for replace is quite complicated:
|
10012 |
- for all disks of the instance:
|
10013 |
- create new LVs on the new node with same names
|
10014 |
- shutdown the drbd device on the old secondary
|
10015 |
- disconnect the drbd network on the primary
|
10016 |
- create the drbd device on the new secondary
|
10017 |
- network attach the drbd on the primary, using an artifice:
|
10018 |
the drbd code for Attach() will connect to the network if it
|
10019 |
finds a device which is connected to the good local disks but
|
10020 |
not network enabled
|
10021 |
- wait for sync across all devices
|
10022 |
- remove all disks from the old secondary
|
10023 |
|
10024 |
Failures are not very well handled.
|
10025 |
|
10026 |
"""
|
10027 |
steps_total = 6
|
10028 |
|
10029 |
pnode = self.instance.primary_node
|
10030 |
|
10031 |
# Step: check device activation
|
10032 |
self.lu.LogStep(1, steps_total, "Check device existence") |
10033 |
self._CheckDisksExistence([self.instance.primary_node]) |
10034 |
self._CheckVolumeGroup([self.instance.primary_node]) |
10035 |
|
10036 |
# Step: check other node consistency
|
10037 |
self.lu.LogStep(2, steps_total, "Check peer consistency") |
10038 |
self._CheckDisksConsistency(self.instance.primary_node, True, True) |
10039 |
|
10040 |
# Step: create new storage
|
10041 |
self.lu.LogStep(3, steps_total, "Allocate new storage") |
10042 |
for idx, dev in enumerate(self.instance.disks): |
10043 |
self.lu.LogInfo("Adding new local storage on %s for disk/%d" % |
10044 |
(self.new_node, idx))
|
10045 |
# we pass force_create=True to force LVM creation
|
10046 |
for new_lv in dev.children: |
10047 |
_CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True, |
10048 |
_GetInstanceInfoText(self.instance), False) |
10049 |
|
10050 |
# Step 4: dbrd minors and drbd setups changes
|
10051 |
# after this, we must manually remove the drbd minors on both the
|
10052 |
# error and the success paths
|
10053 |
self.lu.LogStep(4, steps_total, "Changing drbd configuration") |
10054 |
minors = self.cfg.AllocateDRBDMinor([self.new_node |
10055 |
for dev in self.instance.disks], |
10056 |
self.instance.name)
|
10057 |
logging.debug("Allocated minors %r", minors)
|
10058 |
|
10059 |
iv_names = {} |
10060 |
for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)): |
10061 |
self.lu.LogInfo("activating a new drbd on %s for disk/%d" % |
10062 |
(self.new_node, idx))
|
10063 |
# create new devices on new_node; note that we create two IDs:
|
10064 |
# one without port, so the drbd will be activated without
|
10065 |
# networking information on the new node at this stage, and one
|
10066 |
# with network, for the latter activation in step 4
|
10067 |
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id |
10068 |
if self.instance.primary_node == o_node1: |
10069 |
p_minor = o_minor1 |
10070 |
else:
|
10071 |
assert self.instance.primary_node == o_node2, "Three-node instance?" |
10072 |
p_minor = o_minor2 |
10073 |
|
10074 |
new_alone_id = (self.instance.primary_node, self.new_node, None, |
10075 |
p_minor, new_minor, o_secret) |
10076 |
new_net_id = (self.instance.primary_node, self.new_node, o_port, |
10077 |
p_minor, new_minor, o_secret) |
10078 |
|
10079 |
iv_names[idx] = (dev, dev.children, new_net_id) |
10080 |
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
|
10081 |
new_net_id) |
10082 |
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, |
10083 |
logical_id=new_alone_id, |
10084 |
children=dev.children, |
10085 |
size=dev.size) |
10086 |
try:
|
10087 |
_CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd, |
10088 |
_GetInstanceInfoText(self.instance), False) |
10089 |
except errors.GenericError:
|
10090 |
self.cfg.ReleaseDRBDMinors(self.instance.name) |
10091 |
raise
|
10092 |
|
10093 |
# We have new devices, shutdown the drbd on the old secondary
|
10094 |
for idx, dev in enumerate(self.instance.disks): |
10095 |
self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx) |
10096 |
self.cfg.SetDiskID(dev, self.target_node) |
10097 |
msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg |
10098 |
if msg:
|
10099 |
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old" |
10100 |
"node: %s" % (idx, msg),
|
10101 |
hint=("Please cleanup this device manually as"
|
10102 |
" soon as possible"))
|
10103 |
|
10104 |
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)") |
10105 |
result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip, |
10106 |
self.instance.disks)[pnode]
|
10107 |
|
10108 |
msg = result.fail_msg |
10109 |
if msg:
|
10110 |
# detaches didn't succeed (unlikely)
|
10111 |
self.cfg.ReleaseDRBDMinors(self.instance.name) |
10112 |
raise errors.OpExecError("Can't detach the disks from the network on" |
10113 |
" old node: %s" % (msg,))
|
10114 |
|
10115 |
# if we managed to detach at least one, we update all the disks of
|
10116 |
# the instance to point to the new secondary
|
10117 |
self.lu.LogInfo("Updating instance configuration") |
10118 |
for dev, _, new_logical_id in iv_names.itervalues(): |
10119 |
dev.logical_id = new_logical_id |
10120 |
self.cfg.SetDiskID(dev, self.instance.primary_node) |
10121 |
|
10122 |
self.cfg.Update(self.instance, feedback_fn) |
10123 |
|
10124 |
# and now perform the drbd attach
|
10125 |
self.lu.LogInfo("Attaching primary drbds to new secondary" |
10126 |
" (standalone => connected)")
|
10127 |
result = self.rpc.call_drbd_attach_net([self.instance.primary_node, |
10128 |
self.new_node],
|
10129 |
self.node_secondary_ip,
|
10130 |
self.instance.disks,
|
10131 |
self.instance.name,
|
10132 |
False)
|
10133 |
for to_node, to_result in result.items(): |
10134 |
msg = to_result.fail_msg |
10135 |
if msg:
|
10136 |
self.lu.LogWarning("Can't attach drbd disks on node %s: %s", |
10137 |
to_node, msg, |
10138 |
hint=("please do a gnt-instance info to see the"
|
10139 |
" status of disks"))
|
10140 |
cstep = 5
|
10141 |
if self.early_release: |
10142 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
10143 |
cstep += 1
|
10144 |
self._RemoveOldStorage(self.target_node, iv_names) |
10145 |
# WARNING: we release all node locks here, do not do other RPCs
|
10146 |
# than WaitForSync to the primary node
|
10147 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE,
|
10148 |
names=[self.instance.primary_node,
|
10149 |
self.target_node,
|
10150 |
self.new_node])
|
10151 |
|
10152 |
# Wait for sync
|
10153 |
# This can fail as the old devices are degraded and _WaitForSync
|
10154 |
# does a combined result over all disks, so we don't check its return value
|
10155 |
self.lu.LogStep(cstep, steps_total, "Sync devices") |
10156 |
cstep += 1
|
10157 |
_WaitForSync(self.lu, self.instance) |
10158 |
|
10159 |
# Check all devices manually
|
10160 |
self._CheckDevices(self.instance.primary_node, iv_names) |
10161 |
|
10162 |
# Step: remove old storage
|
10163 |
if not self.early_release: |
10164 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
10165 |
self._RemoveOldStorage(self.target_node, iv_names) |
10166 |
|
10167 |
|
10168 |
class LURepairNodeStorage(NoHooksLU): |
10169 |
"""Repairs the volume group on a node.
|
10170 |
|
10171 |
"""
|
10172 |
REQ_BGL = False
|
10173 |
|
10174 |
def CheckArguments(self): |
10175 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
10176 |
|
10177 |
storage_type = self.op.storage_type
|
10178 |
|
10179 |
if (constants.SO_FIX_CONSISTENCY not in |
10180 |
constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])): |
10181 |
raise errors.OpPrereqError("Storage units of type '%s' can not be" |
10182 |
" repaired" % storage_type,
|
10183 |
errors.ECODE_INVAL) |
10184 |
|
10185 |
def ExpandNames(self): |
10186 |
self.needed_locks = {
|
10187 |
locking.LEVEL_NODE: [self.op.node_name],
|
10188 |
} |
10189 |
|
10190 |
def _CheckFaultyDisks(self, instance, node_name): |
10191 |
"""Ensure faulty disks abort the opcode or at least warn."""
|
10192 |
try:
|
10193 |
if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance, |
10194 |
node_name, True):
|
10195 |
raise errors.OpPrereqError("Instance '%s' has faulty disks on" |
10196 |
" node '%s'" % (instance.name, node_name),
|
10197 |
errors.ECODE_STATE) |
10198 |
except errors.OpPrereqError, err:
|
10199 |
if self.op.ignore_consistency: |
10200 |
self.proc.LogWarning(str(err.args[0])) |
10201 |
else:
|
10202 |
raise
|
10203 |
|
10204 |
def CheckPrereq(self): |
10205 |
"""Check prerequisites.
|
10206 |
|
10207 |
"""
|
10208 |
# Check whether any instance on this node has faulty disks
|
10209 |
for inst in _GetNodeInstances(self.cfg, self.op.node_name): |
10210 |
if not inst.admin_up: |
10211 |
continue
|
10212 |
check_nodes = set(inst.all_nodes)
|
10213 |
check_nodes.discard(self.op.node_name)
|
10214 |
for inst_node_name in check_nodes: |
10215 |
self._CheckFaultyDisks(inst, inst_node_name)
|
10216 |
|
10217 |
def Exec(self, feedback_fn): |
10218 |
feedback_fn("Repairing storage unit '%s' on %s ..." %
|
10219 |
(self.op.name, self.op.node_name)) |
10220 |
|
10221 |
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type) |
10222 |
result = self.rpc.call_storage_execute(self.op.node_name, |
10223 |
self.op.storage_type, st_args,
|
10224 |
self.op.name,
|
10225 |
constants.SO_FIX_CONSISTENCY) |
10226 |
result.Raise("Failed to repair storage unit '%s' on %s" %
|
10227 |
(self.op.name, self.op.node_name)) |
10228 |
|
10229 |
|
10230 |
class LUNodeEvacuate(NoHooksLU): |
10231 |
"""Evacuates instances off a list of nodes.
|
10232 |
|
10233 |
"""
|
10234 |
REQ_BGL = False
|
10235 |
|
10236 |
def CheckArguments(self): |
10237 |
_CheckIAllocatorOrNode(self, "iallocator", "remote_node") |
10238 |
|
10239 |
def ExpandNames(self): |
10240 |
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) |
10241 |
|
10242 |
if self.op.remote_node is not None: |
10243 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
10244 |
assert self.op.remote_node |
10245 |
|
10246 |
if self.op.remote_node == self.op.node_name: |
10247 |
raise errors.OpPrereqError("Can not use evacuated node as a new" |
10248 |
" secondary node", errors.ECODE_INVAL)
|
10249 |
|
10250 |
if self.op.mode != constants.IALLOCATOR_NEVAC_SEC: |
10251 |
raise errors.OpPrereqError("Without the use of an iallocator only" |
10252 |
" secondary instances can be evacuated",
|
10253 |
errors.ECODE_INVAL) |
10254 |
|
10255 |
# Declare locks
|
10256 |
self.share_locks = _ShareAll()
|
10257 |
self.needed_locks = {
|
10258 |
locking.LEVEL_INSTANCE: [], |
10259 |
locking.LEVEL_NODEGROUP: [], |
10260 |
locking.LEVEL_NODE: [], |
10261 |
} |
10262 |
|
10263 |
if self.op.remote_node is None: |
10264 |
# Iallocator will choose any node(s) in the same group
|
10265 |
group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name]) |
10266 |
else:
|
10267 |
group_nodes = frozenset([self.op.remote_node]) |
10268 |
|
10269 |
# Determine nodes to be locked
|
10270 |
self.lock_nodes = set([self.op.node_name]) | group_nodes |
10271 |
|
10272 |
def _DetermineInstances(self): |
10273 |
"""Builds list of instances to operate on.
|
10274 |
|
10275 |
"""
|
10276 |
assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES |
10277 |
|
10278 |
if self.op.mode == constants.IALLOCATOR_NEVAC_PRI: |
10279 |
# Primary instances only
|
10280 |
inst_fn = _GetNodePrimaryInstances |
10281 |
assert self.op.remote_node is None, \ |
10282 |
"Evacuating primary instances requires iallocator"
|
10283 |
elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC: |
10284 |
# Secondary instances only
|
10285 |
inst_fn = _GetNodeSecondaryInstances |
10286 |
else:
|
10287 |
# All instances
|
10288 |
assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL |
10289 |
inst_fn = _GetNodeInstances |
10290 |
|
10291 |
return inst_fn(self.cfg, self.op.node_name) |
10292 |
|
10293 |
def DeclareLocks(self, level): |
10294 |
if level == locking.LEVEL_INSTANCE:
|
10295 |
# Lock instances optimistically, needs verification once node and group
|
10296 |
# locks have been acquired
|
10297 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
10298 |
set(i.name for i in self._DetermineInstances()) |
10299 |
|
10300 |
elif level == locking.LEVEL_NODEGROUP:
|
10301 |
# Lock node groups optimistically, needs verification once nodes have
|
10302 |
# been acquired
|
10303 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
10304 |
self.cfg.GetNodeGroupsFromNodes(self.lock_nodes) |
10305 |
|
10306 |
elif level == locking.LEVEL_NODE:
|
10307 |
self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes |
10308 |
|
10309 |
def CheckPrereq(self): |
10310 |
# Verify locks
|
10311 |
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
|
10312 |
owned_nodes = self.owned_locks(locking.LEVEL_NODE)
|
10313 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
|
10314 |
|
10315 |
assert owned_nodes == self.lock_nodes |
10316 |
|
10317 |
wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
|
10318 |
if owned_groups != wanted_groups:
|
10319 |
raise errors.OpExecError("Node groups changed since locks were acquired," |
10320 |
" current groups are '%s', used to be '%s'" %
|
10321 |
(utils.CommaJoin(wanted_groups), |
10322 |
utils.CommaJoin(owned_groups))) |
10323 |
|
10324 |
# Determine affected instances
|
10325 |
self.instances = self._DetermineInstances() |
10326 |
self.instance_names = [i.name for i in self.instances] |
10327 |
|
10328 |
if set(self.instance_names) != owned_instances: |
10329 |
raise errors.OpExecError("Instances on node '%s' changed since locks" |
10330 |
" were acquired, current instances are '%s',"
|
10331 |
" used to be '%s'" %
|
10332 |
(self.op.node_name,
|
10333 |
utils.CommaJoin(self.instance_names),
|
10334 |
utils.CommaJoin(owned_instances))) |
10335 |
|
10336 |
if self.instance_names: |
10337 |
self.LogInfo("Evacuating instances from node '%s': %s", |
10338 |
self.op.node_name,
|
10339 |
utils.CommaJoin(utils.NiceSort(self.instance_names)))
|
10340 |
else:
|
10341 |
self.LogInfo("No instances to evacuate from node '%s'", |
10342 |
self.op.node_name)
|
10343 |
|
10344 |
if self.op.remote_node is not None: |
10345 |
for i in self.instances: |
10346 |
if i.primary_node == self.op.remote_node: |
10347 |
raise errors.OpPrereqError("Node %s is the primary node of" |
10348 |
" instance %s, cannot use it as"
|
10349 |
" secondary" %
|
10350 |
(self.op.remote_node, i.name),
|
10351 |
errors.ECODE_INVAL) |
10352 |
|
10353 |
def Exec(self, feedback_fn): |
10354 |
assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None) |
10355 |
|
10356 |
if not self.instance_names: |
10357 |
# No instances to evacuate
|
10358 |
jobs = [] |
10359 |
|
10360 |
elif self.op.iallocator is not None: |
10361 |
# TODO: Implement relocation to other group
|
10362 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC, |
10363 |
evac_mode=self.op.mode,
|
10364 |
instances=list(self.instance_names)) |
10365 |
|
10366 |
ial.Run(self.op.iallocator)
|
10367 |
|
10368 |
if not ial.success: |
10369 |
raise errors.OpPrereqError("Can't compute node evacuation using" |
10370 |
" iallocator '%s': %s" %
|
10371 |
(self.op.iallocator, ial.info),
|
10372 |
errors.ECODE_NORES) |
10373 |
|
10374 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True) |
10375 |
|
10376 |
elif self.op.remote_node is not None: |
10377 |
assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC |
10378 |
jobs = [ |
10379 |
[opcodes.OpInstanceReplaceDisks(instance_name=instance_name, |
10380 |
remote_node=self.op.remote_node,
|
10381 |
disks=[], |
10382 |
mode=constants.REPLACE_DISK_CHG, |
10383 |
early_release=self.op.early_release)]
|
10384 |
for instance_name in self.instance_names |
10385 |
] |
10386 |
|
10387 |
else:
|
10388 |
raise errors.ProgrammerError("No iallocator or remote node") |
10389 |
|
10390 |
return ResultWithJobs(jobs)
|
10391 |
|
10392 |
|
10393 |
def _SetOpEarlyRelease(early_release, op): |
10394 |
"""Sets C{early_release} flag on opcodes if available.
|
10395 |
|
10396 |
"""
|
10397 |
try:
|
10398 |
op.early_release = early_release |
10399 |
except AttributeError: |
10400 |
assert not isinstance(op, opcodes.OpInstanceReplaceDisks) |
10401 |
|
10402 |
return op
|
10403 |
|
10404 |
|
10405 |
def _NodeEvacDest(use_nodes, group, nodes): |
10406 |
"""Returns group or nodes depending on caller's choice.
|
10407 |
|
10408 |
"""
|
10409 |
if use_nodes:
|
10410 |
return utils.CommaJoin(nodes)
|
10411 |
else:
|
10412 |
return group
|
10413 |
|
10414 |
|
10415 |
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes): |
10416 |
"""Unpacks the result of change-group and node-evacuate iallocator requests.
|
10417 |
|
10418 |
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
|
10419 |
L{constants.IALLOCATOR_MODE_CHG_GROUP}.
|
10420 |
|
10421 |
@type lu: L{LogicalUnit}
|
10422 |
@param lu: Logical unit instance
|
10423 |
@type alloc_result: tuple/list
|
10424 |
@param alloc_result: Result from iallocator
|
10425 |
@type early_release: bool
|
10426 |
@param early_release: Whether to release locks early if possible
|
10427 |
@type use_nodes: bool
|
10428 |
@param use_nodes: Whether to display node names instead of groups
|
10429 |
|
10430 |
"""
|
10431 |
(moved, failed, jobs) = alloc_result |
10432 |
|
10433 |
if failed:
|
10434 |
lu.LogWarning("Unable to evacuate instances %s",
|
10435 |
utils.CommaJoin("%s (%s)" % (name, reason)
|
10436 |
for (name, reason) in failed)) |
10437 |
|
10438 |
if moved:
|
10439 |
lu.LogInfo("Instances to be moved: %s",
|
10440 |
utils.CommaJoin("%s (to %s)" %
|
10441 |
(name, _NodeEvacDest(use_nodes, group, nodes)) |
10442 |
for (name, group, nodes) in moved)) |
10443 |
|
10444 |
return [map(compat.partial(_SetOpEarlyRelease, early_release), |
10445 |
map(opcodes.OpCode.LoadOpCode, ops))
|
10446 |
for ops in jobs] |
10447 |
|
10448 |
|
10449 |
class LUInstanceGrowDisk(LogicalUnit): |
10450 |
"""Grow a disk of an instance.
|
10451 |
|
10452 |
"""
|
10453 |
HPATH = "disk-grow"
|
10454 |
HTYPE = constants.HTYPE_INSTANCE |
10455 |
REQ_BGL = False
|
10456 |
|
10457 |
def ExpandNames(self): |
10458 |
self._ExpandAndLockInstance()
|
10459 |
self.needed_locks[locking.LEVEL_NODE] = []
|
10460 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
10461 |
|
10462 |
def DeclareLocks(self, level): |
10463 |
if level == locking.LEVEL_NODE:
|
10464 |
self._LockInstancesNodes()
|
10465 |
|
10466 |
def BuildHooksEnv(self): |
10467 |
"""Build hooks env.
|
10468 |
|
10469 |
This runs on the master, the primary and all the secondaries.
|
10470 |
|
10471 |
"""
|
10472 |
env = { |
10473 |
"DISK": self.op.disk, |
10474 |
"AMOUNT": self.op.amount, |
10475 |
} |
10476 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
10477 |
return env
|
10478 |
|
10479 |
def BuildHooksNodes(self): |
10480 |
"""Build hooks nodes.
|
10481 |
|
10482 |
"""
|
10483 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
10484 |
return (nl, nl)
|
10485 |
|
10486 |
def CheckPrereq(self): |
10487 |
"""Check prerequisites.
|
10488 |
|
10489 |
This checks that the instance is in the cluster.
|
10490 |
|
10491 |
"""
|
10492 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
10493 |
assert instance is not None, \ |
10494 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
10495 |
nodenames = list(instance.all_nodes)
|
10496 |
for node in nodenames: |
10497 |
_CheckNodeOnline(self, node)
|
10498 |
|
10499 |
self.instance = instance
|
10500 |
|
10501 |
if instance.disk_template not in constants.DTS_GROWABLE: |
10502 |
raise errors.OpPrereqError("Instance's disk layout does not support" |
10503 |
" growing", errors.ECODE_INVAL)
|
10504 |
|
10505 |
self.disk = instance.FindDisk(self.op.disk) |
10506 |
|
10507 |
if instance.disk_template not in (constants.DT_FILE, |
10508 |
constants.DT_SHARED_FILE): |
10509 |
# TODO: check the free disk space for file, when that feature will be
|
10510 |
# supported
|
10511 |
_CheckNodesFreeDiskPerVG(self, nodenames,
|
10512 |
self.disk.ComputeGrowth(self.op.amount)) |
10513 |
|
10514 |
def Exec(self, feedback_fn): |
10515 |
"""Execute disk grow.
|
10516 |
|
10517 |
"""
|
10518 |
instance = self.instance
|
10519 |
disk = self.disk
|
10520 |
|
10521 |
disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk]) |
10522 |
if not disks_ok: |
10523 |
raise errors.OpExecError("Cannot activate block device to grow") |
10524 |
|
10525 |
# First run all grow ops in dry-run mode
|
10526 |
for node in instance.all_nodes: |
10527 |
self.cfg.SetDiskID(disk, node)
|
10528 |
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True) |
10529 |
result.Raise("Grow request failed to node %s" % node)
|
10530 |
|
10531 |
# We know that (as far as we can test) operations across different
|
10532 |
# nodes will succeed, time to run it for real
|
10533 |
for node in instance.all_nodes: |
10534 |
self.cfg.SetDiskID(disk, node)
|
10535 |
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False) |
10536 |
result.Raise("Grow request failed to node %s" % node)
|
10537 |
|
10538 |
# TODO: Rewrite code to work properly
|
10539 |
# DRBD goes into sync mode for a short amount of time after executing the
|
10540 |
# "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
|
10541 |
# calling "resize" in sync mode fails. Sleeping for a short amount of
|
10542 |
# time is a work-around.
|
10543 |
time.sleep(5)
|
10544 |
|
10545 |
disk.RecordGrow(self.op.amount)
|
10546 |
self.cfg.Update(instance, feedback_fn)
|
10547 |
if self.op.wait_for_sync: |
10548 |
disk_abort = not _WaitForSync(self, instance, disks=[disk]) |
10549 |
if disk_abort:
|
10550 |
self.proc.LogWarning("Disk sync-ing has not returned a good" |
10551 |
" status; please check the instance")
|
10552 |
if not instance.admin_up: |
10553 |
_SafeShutdownInstanceDisks(self, instance, disks=[disk])
|
10554 |
elif not instance.admin_up: |
10555 |
self.proc.LogWarning("Not shutting down the disk even if the instance is" |
10556 |
" not supposed to be running because no wait for"
|
10557 |
" sync mode was requested")
|
10558 |
|
10559 |
|
10560 |
class LUInstanceQueryData(NoHooksLU): |
10561 |
"""Query runtime instance data.
|
10562 |
|
10563 |
"""
|
10564 |
REQ_BGL = False
|
10565 |
|
10566 |
def ExpandNames(self): |
10567 |
self.needed_locks = {}
|
10568 |
|
10569 |
# Use locking if requested or when non-static information is wanted
|
10570 |
if not (self.op.static or self.op.use_locking): |
10571 |
self.LogWarning("Non-static data requested, locks need to be acquired") |
10572 |
self.op.use_locking = True |
10573 |
|
10574 |
if self.op.instances or not self.op.use_locking: |
10575 |
# Expand instance names right here
|
10576 |
self.wanted_names = _GetWantedInstances(self, self.op.instances) |
10577 |
else:
|
10578 |
# Will use acquired locks
|
10579 |
self.wanted_names = None |
10580 |
|
10581 |
if self.op.use_locking: |
10582 |
self.share_locks = _ShareAll()
|
10583 |
|
10584 |
if self.wanted_names is None: |
10585 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
10586 |
else:
|
10587 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names |
10588 |
|
10589 |
self.needed_locks[locking.LEVEL_NODE] = []
|
10590 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
10591 |
|
10592 |
def DeclareLocks(self, level): |
10593 |
if self.op.use_locking and level == locking.LEVEL_NODE: |
10594 |
self._LockInstancesNodes()
|
10595 |
|
10596 |
def CheckPrereq(self): |
10597 |
"""Check prerequisites.
|
10598 |
|
10599 |
This only checks the optional instance list against the existing names.
|
10600 |
|
10601 |
"""
|
10602 |
if self.wanted_names is None: |
10603 |
assert self.op.use_locking, "Locking was not used" |
10604 |
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE) |
10605 |
|
10606 |
self.wanted_instances = \
|
10607 |
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names)) |
10608 |
|
10609 |
def _ComputeBlockdevStatus(self, node, instance_name, dev): |
10610 |
"""Returns the status of a block device
|
10611 |
|
10612 |
"""
|
10613 |
if self.op.static or not node: |
10614 |
return None |
10615 |
|
10616 |
self.cfg.SetDiskID(dev, node)
|
10617 |
|
10618 |
result = self.rpc.call_blockdev_find(node, dev)
|
10619 |
if result.offline:
|
10620 |
return None |
10621 |
|
10622 |
result.Raise("Can't compute disk status for %s" % instance_name)
|
10623 |
|
10624 |
status = result.payload |
10625 |
if status is None: |
10626 |
return None |
10627 |
|
10628 |
return (status.dev_path, status.major, status.minor,
|
10629 |
status.sync_percent, status.estimated_time, |
10630 |
status.is_degraded, status.ldisk_status) |
10631 |
|
10632 |
def _ComputeDiskStatus(self, instance, snode, dev): |
10633 |
"""Compute block device status.
|
10634 |
|
10635 |
"""
|
10636 |
if dev.dev_type in constants.LDS_DRBD: |
10637 |
# we change the snode then (otherwise we use the one passed in)
|
10638 |
if dev.logical_id[0] == instance.primary_node: |
10639 |
snode = dev.logical_id[1]
|
10640 |
else:
|
10641 |
snode = dev.logical_id[0]
|
10642 |
|
10643 |
dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
|
10644 |
instance.name, dev) |
10645 |
dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
|
10646 |
|
10647 |
if dev.children:
|
10648 |
dev_children = map(compat.partial(self._ComputeDiskStatus, |
10649 |
instance, snode), |
10650 |
dev.children) |
10651 |
else:
|
10652 |
dev_children = [] |
10653 |
|
10654 |
return {
|
10655 |
"iv_name": dev.iv_name,
|
10656 |
"dev_type": dev.dev_type,
|
10657 |
"logical_id": dev.logical_id,
|
10658 |
"physical_id": dev.physical_id,
|
10659 |
"pstatus": dev_pstatus,
|
10660 |
"sstatus": dev_sstatus,
|
10661 |
"children": dev_children,
|
10662 |
"mode": dev.mode,
|
10663 |
"size": dev.size,
|
10664 |
} |
10665 |
|
10666 |
def Exec(self, feedback_fn): |
10667 |
"""Gather and return data"""
|
10668 |
result = {} |
10669 |
|
10670 |
cluster = self.cfg.GetClusterInfo()
|
10671 |
|
10672 |
pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
|
10673 |
for i in self.wanted_instances) |
10674 |
for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes): |
10675 |
if self.op.static or pnode.offline: |
10676 |
remote_state = None
|
10677 |
if pnode.offline:
|
10678 |
self.LogWarning("Primary node %s is marked offline, returning static" |
10679 |
" information only for instance %s" %
|
10680 |
(pnode.name, instance.name)) |
10681 |
else:
|
10682 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
10683 |
instance.name, |
10684 |
instance.hypervisor) |
10685 |
remote_info.Raise("Error checking node %s" % instance.primary_node)
|
10686 |
remote_info = remote_info.payload |
10687 |
if remote_info and "state" in remote_info: |
10688 |
remote_state = "up"
|
10689 |
else:
|
10690 |
remote_state = "down"
|
10691 |
|
10692 |
if instance.admin_up:
|
10693 |
config_state = "up"
|
10694 |
else:
|
10695 |
config_state = "down"
|
10696 |
|
10697 |
disks = map(compat.partial(self._ComputeDiskStatus, instance, None), |
10698 |
instance.disks) |
10699 |
|
10700 |
result[instance.name] = { |
10701 |
"name": instance.name,
|
10702 |
"config_state": config_state,
|
10703 |
"run_state": remote_state,
|
10704 |
"pnode": instance.primary_node,
|
10705 |
"snodes": instance.secondary_nodes,
|
10706 |
"os": instance.os,
|
10707 |
# this happens to be the same format used for hooks
|
10708 |
"nics": _NICListToTuple(self, instance.nics), |
10709 |
"disk_template": instance.disk_template,
|
10710 |
"disks": disks,
|
10711 |
"hypervisor": instance.hypervisor,
|
10712 |
"network_port": instance.network_port,
|
10713 |
"hv_instance": instance.hvparams,
|
10714 |
"hv_actual": cluster.FillHV(instance, skip_globals=True), |
10715 |
"be_instance": instance.beparams,
|
10716 |
"be_actual": cluster.FillBE(instance),
|
10717 |
"os_instance": instance.osparams,
|
10718 |
"os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
|
10719 |
"serial_no": instance.serial_no,
|
10720 |
"mtime": instance.mtime,
|
10721 |
"ctime": instance.ctime,
|
10722 |
"uuid": instance.uuid,
|
10723 |
} |
10724 |
|
10725 |
return result
|
10726 |
|
10727 |
|
10728 |
class LUInstanceSetParams(LogicalUnit): |
10729 |
"""Modifies an instances's parameters.
|
10730 |
|
10731 |
"""
|
10732 |
HPATH = "instance-modify"
|
10733 |
HTYPE = constants.HTYPE_INSTANCE |
10734 |
REQ_BGL = False
|
10735 |
|
10736 |
def CheckArguments(self): |
10737 |
if not (self.op.nics or self.op.disks or self.op.disk_template or |
10738 |
self.op.hvparams or self.op.beparams or self.op.os_name): |
10739 |
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) |
10740 |
|
10741 |
if self.op.hvparams: |
10742 |
_CheckGlobalHvParams(self.op.hvparams)
|
10743 |
|
10744 |
# Disk validation
|
10745 |
disk_addremove = 0
|
10746 |
for disk_op, disk_dict in self.op.disks: |
10747 |
utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES) |
10748 |
if disk_op == constants.DDM_REMOVE:
|
10749 |
disk_addremove += 1
|
10750 |
continue
|
10751 |
elif disk_op == constants.DDM_ADD:
|
10752 |
disk_addremove += 1
|
10753 |
else:
|
10754 |
if not isinstance(disk_op, int): |
10755 |
raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL) |
10756 |
if not isinstance(disk_dict, dict): |
10757 |
msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
|
10758 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
10759 |
|
10760 |
if disk_op == constants.DDM_ADD:
|
10761 |
mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) |
10762 |
if mode not in constants.DISK_ACCESS_SET: |
10763 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, |
10764 |
errors.ECODE_INVAL) |
10765 |
size = disk_dict.get(constants.IDISK_SIZE, None)
|
10766 |
if size is None: |
10767 |
raise errors.OpPrereqError("Required disk parameter size missing", |
10768 |
errors.ECODE_INVAL) |
10769 |
try:
|
10770 |
size = int(size)
|
10771 |
except (TypeError, ValueError), err: |
10772 |
raise errors.OpPrereqError("Invalid disk size parameter: %s" % |
10773 |
str(err), errors.ECODE_INVAL)
|
10774 |
disk_dict[constants.IDISK_SIZE] = size |
10775 |
else:
|
10776 |
# modification of disk
|
10777 |
if constants.IDISK_SIZE in disk_dict: |
10778 |
raise errors.OpPrereqError("Disk size change not possible, use" |
10779 |
" grow-disk", errors.ECODE_INVAL)
|
10780 |
|
10781 |
if disk_addremove > 1: |
10782 |
raise errors.OpPrereqError("Only one disk add or remove operation" |
10783 |
" supported at a time", errors.ECODE_INVAL)
|
10784 |
|
10785 |
if self.op.disks and self.op.disk_template is not None: |
10786 |
raise errors.OpPrereqError("Disk template conversion and other disk" |
10787 |
" changes not supported at the same time",
|
10788 |
errors.ECODE_INVAL) |
10789 |
|
10790 |
if (self.op.disk_template and |
10791 |
self.op.disk_template in constants.DTS_INT_MIRROR and |
10792 |
self.op.remote_node is None): |
10793 |
raise errors.OpPrereqError("Changing the disk template to a mirrored" |
10794 |
" one requires specifying a secondary node",
|
10795 |
errors.ECODE_INVAL) |
10796 |
|
10797 |
# NIC validation
|
10798 |
nic_addremove = 0
|
10799 |
for nic_op, nic_dict in self.op.nics: |
10800 |
utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES) |
10801 |
if nic_op == constants.DDM_REMOVE:
|
10802 |
nic_addremove += 1
|
10803 |
continue
|
10804 |
elif nic_op == constants.DDM_ADD:
|
10805 |
nic_addremove += 1
|
10806 |
else:
|
10807 |
if not isinstance(nic_op, int): |
10808 |
raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL) |
10809 |
if not isinstance(nic_dict, dict): |
10810 |
msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
|
10811 |
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
10812 |
|
10813 |
# nic_dict should be a dict
|
10814 |
nic_ip = nic_dict.get(constants.INIC_IP, None)
|
10815 |
if nic_ip is not None: |
10816 |
if nic_ip.lower() == constants.VALUE_NONE:
|
10817 |
nic_dict[constants.INIC_IP] = None
|
10818 |
else:
|
10819 |
if not netutils.IPAddress.IsValid(nic_ip): |
10820 |
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip, |
10821 |
errors.ECODE_INVAL) |
10822 |
|
10823 |
nic_bridge = nic_dict.get("bridge", None) |
10824 |
nic_link = nic_dict.get(constants.INIC_LINK, None)
|
10825 |
if nic_bridge and nic_link: |
10826 |
raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" |
10827 |
" at the same time", errors.ECODE_INVAL)
|
10828 |
elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE: |
10829 |
nic_dict["bridge"] = None |
10830 |
elif nic_link and nic_link.lower() == constants.VALUE_NONE: |
10831 |
nic_dict[constants.INIC_LINK] = None
|
10832 |
|
10833 |
if nic_op == constants.DDM_ADD:
|
10834 |
nic_mac = nic_dict.get(constants.INIC_MAC, None)
|
10835 |
if nic_mac is None: |
10836 |
nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO |
10837 |
|
10838 |
if constants.INIC_MAC in nic_dict: |
10839 |
nic_mac = nic_dict[constants.INIC_MAC] |
10840 |
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
10841 |
nic_mac = utils.NormalizeAndValidateMac(nic_mac) |
10842 |
|
10843 |
if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO: |
10844 |
raise errors.OpPrereqError("'auto' is not a valid MAC address when" |
10845 |
" modifying an existing nic",
|
10846 |
errors.ECODE_INVAL) |
10847 |
|
10848 |
if nic_addremove > 1: |
10849 |
raise errors.OpPrereqError("Only one NIC add or remove operation" |
10850 |
" supported at a time", errors.ECODE_INVAL)
|
10851 |
|
10852 |
def ExpandNames(self): |
10853 |
self._ExpandAndLockInstance()
|
10854 |
self.needed_locks[locking.LEVEL_NODE] = []
|
10855 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
10856 |
|
10857 |
def DeclareLocks(self, level): |
10858 |
if level == locking.LEVEL_NODE:
|
10859 |
self._LockInstancesNodes()
|
10860 |
if self.op.disk_template and self.op.remote_node: |
10861 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
10862 |
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node) |
10863 |
|
10864 |
def BuildHooksEnv(self): |
10865 |
"""Build hooks env.
|
10866 |
|
10867 |
This runs on the master, primary and secondaries.
|
10868 |
|
10869 |
"""
|
10870 |
args = dict()
|
10871 |
if constants.BE_MEMORY in self.be_new: |
10872 |
args["memory"] = self.be_new[constants.BE_MEMORY] |
10873 |
if constants.BE_VCPUS in self.be_new: |
10874 |
args["vcpus"] = self.be_new[constants.BE_VCPUS] |
10875 |
# TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
|
10876 |
# information at all.
|
10877 |
if self.op.nics: |
10878 |
args["nics"] = []
|
10879 |
nic_override = dict(self.op.nics) |
10880 |
for idx, nic in enumerate(self.instance.nics): |
10881 |
if idx in nic_override: |
10882 |
this_nic_override = nic_override[idx] |
10883 |
else:
|
10884 |
this_nic_override = {} |
10885 |
if constants.INIC_IP in this_nic_override: |
10886 |
ip = this_nic_override[constants.INIC_IP] |
10887 |
else:
|
10888 |
ip = nic.ip |
10889 |
if constants.INIC_MAC in this_nic_override: |
10890 |
mac = this_nic_override[constants.INIC_MAC] |
10891 |
else:
|
10892 |
mac = nic.mac |
10893 |
if idx in self.nic_pnew: |
10894 |
nicparams = self.nic_pnew[idx]
|
10895 |
else:
|
10896 |
nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
|
10897 |
mode = nicparams[constants.NIC_MODE] |
10898 |
link = nicparams[constants.NIC_LINK] |
10899 |
args["nics"].append((ip, mac, mode, link))
|
10900 |
if constants.DDM_ADD in nic_override: |
10901 |
ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
|
10902 |
mac = nic_override[constants.DDM_ADD][constants.INIC_MAC] |
10903 |
nicparams = self.nic_pnew[constants.DDM_ADD]
|
10904 |
mode = nicparams[constants.NIC_MODE] |
10905 |
link = nicparams[constants.NIC_LINK] |
10906 |
args["nics"].append((ip, mac, mode, link))
|
10907 |
elif constants.DDM_REMOVE in nic_override: |
10908 |
del args["nics"][-1] |
10909 |
|
10910 |
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args) |
10911 |
if self.op.disk_template: |
10912 |
env["NEW_DISK_TEMPLATE"] = self.op.disk_template |
10913 |
|
10914 |
return env
|
10915 |
|
10916 |
def BuildHooksNodes(self): |
10917 |
"""Build hooks nodes.
|
10918 |
|
10919 |
"""
|
10920 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
10921 |
return (nl, nl)
|
10922 |
|
10923 |
def CheckPrereq(self): |
10924 |
"""Check prerequisites.
|
10925 |
|
10926 |
This only checks the instance list against the existing names.
|
10927 |
|
10928 |
"""
|
10929 |
# checking the new params on the primary/secondary nodes
|
10930 |
|
10931 |
instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
10932 |
cluster = self.cluster = self.cfg.GetClusterInfo() |
10933 |
assert self.instance is not None, \ |
10934 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
10935 |
pnode = instance.primary_node |
10936 |
nodelist = list(instance.all_nodes)
|
10937 |
|
10938 |
# OS change
|
10939 |
if self.op.os_name and not self.op.force: |
10940 |
_CheckNodeHasOS(self, instance.primary_node, self.op.os_name, |
10941 |
self.op.force_variant)
|
10942 |
instance_os = self.op.os_name
|
10943 |
else:
|
10944 |
instance_os = instance.os |
10945 |
|
10946 |
if self.op.disk_template: |
10947 |
if instance.disk_template == self.op.disk_template: |
10948 |
raise errors.OpPrereqError("Instance already has disk template %s" % |
10949 |
instance.disk_template, errors.ECODE_INVAL) |
10950 |
|
10951 |
if (instance.disk_template,
|
10952 |
self.op.disk_template) not in self._DISK_CONVERSIONS: |
10953 |
raise errors.OpPrereqError("Unsupported disk template conversion from" |
10954 |
" %s to %s" % (instance.disk_template,
|
10955 |
self.op.disk_template),
|
10956 |
errors.ECODE_INVAL) |
10957 |
_CheckInstanceDown(self, instance, "cannot change disk template") |
10958 |
if self.op.disk_template in constants.DTS_INT_MIRROR: |
10959 |
if self.op.remote_node == pnode: |
10960 |
raise errors.OpPrereqError("Given new secondary node %s is the same" |
10961 |
" as the primary node of the instance" %
|
10962 |
self.op.remote_node, errors.ECODE_STATE)
|
10963 |
_CheckNodeOnline(self, self.op.remote_node) |
10964 |
_CheckNodeNotDrained(self, self.op.remote_node) |
10965 |
# FIXME: here we assume that the old instance type is DT_PLAIN
|
10966 |
assert instance.disk_template == constants.DT_PLAIN
|
10967 |
disks = [{constants.IDISK_SIZE: d.size, |
10968 |
constants.IDISK_VG: d.logical_id[0]}
|
10969 |
for d in instance.disks] |
10970 |
required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
|
10971 |
_CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required) |
10972 |
|
10973 |
# hvparams processing
|
10974 |
if self.op.hvparams: |
10975 |
hv_type = instance.hypervisor |
10976 |
i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
|
10977 |
utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) |
10978 |
hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict) |
10979 |
|
10980 |
# local check
|
10981 |
hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new) |
10982 |
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
|
10983 |
self.hv_proposed = self.hv_new = hv_new # the new actual values |
10984 |
self.hv_inst = i_hvdict # the new dict (without defaults) |
10985 |
else:
|
10986 |
self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
|
10987 |
instance.hvparams) |
10988 |
self.hv_new = self.hv_inst = {} |
10989 |
|
10990 |
# beparams processing
|
10991 |
if self.op.beparams: |
10992 |
i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
|
10993 |
use_none=True)
|
10994 |
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) |
10995 |
be_new = cluster.SimpleFillBE(i_bedict) |
10996 |
self.be_proposed = self.be_new = be_new # the new actual values |
10997 |
self.be_inst = i_bedict # the new dict (without defaults) |
10998 |
else:
|
10999 |
self.be_new = self.be_inst = {} |
11000 |
self.be_proposed = cluster.SimpleFillBE(instance.beparams)
|
11001 |
be_old = cluster.FillBE(instance) |
11002 |
|
11003 |
# CPU param validation -- checking every time a paramtere is
|
11004 |
# changed to cover all cases where either CPU mask or vcpus have
|
11005 |
# changed
|
11006 |
if (constants.BE_VCPUS in self.be_proposed and |
11007 |
constants.HV_CPU_MASK in self.hv_proposed): |
11008 |
cpu_list = \ |
11009 |
utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
|
11010 |
# Verify mask is consistent with number of vCPUs. Can skip this
|
11011 |
# test if only 1 entry in the CPU mask, which means same mask
|
11012 |
# is applied to all vCPUs.
|
11013 |
if (len(cpu_list) > 1 and |
11014 |
len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): |
11015 |
raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" |
11016 |
" CPU mask [%s]" %
|
11017 |
(self.be_proposed[constants.BE_VCPUS],
|
11018 |
self.hv_proposed[constants.HV_CPU_MASK]),
|
11019 |
errors.ECODE_INVAL) |
11020 |
|
11021 |
# Only perform this test if a new CPU mask is given
|
11022 |
if constants.HV_CPU_MASK in self.hv_new: |
11023 |
# Calculate the largest CPU number requested
|
11024 |
max_requested_cpu = max(map(max, cpu_list)) |
11025 |
# Check that all of the instance's nodes have enough physical CPUs to
|
11026 |
# satisfy the requested CPU mask
|
11027 |
_CheckNodesPhysicalCPUs(self, instance.all_nodes,
|
11028 |
max_requested_cpu + 1, instance.hypervisor)
|
11029 |
|
11030 |
# osparams processing
|
11031 |
if self.op.osparams: |
11032 |
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
|
11033 |
_CheckOSParams(self, True, nodelist, instance_os, i_osdict) |
11034 |
self.os_inst = i_osdict # the new dict (without defaults) |
11035 |
else:
|
11036 |
self.os_inst = {}
|
11037 |
|
11038 |
self.warn = []
|
11039 |
|
11040 |
if (constants.BE_MEMORY in self.op.beparams and not self.op.force and |
11041 |
be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]): |
11042 |
mem_check_list = [pnode] |
11043 |
if be_new[constants.BE_AUTO_BALANCE]:
|
11044 |
# either we changed auto_balance to yes or it was from before
|
11045 |
mem_check_list.extend(instance.secondary_nodes) |
11046 |
instance_info = self.rpc.call_instance_info(pnode, instance.name,
|
11047 |
instance.hypervisor) |
11048 |
nodeinfo = self.rpc.call_node_info(mem_check_list, None, |
11049 |
instance.hypervisor) |
11050 |
pninfo = nodeinfo[pnode] |
11051 |
msg = pninfo.fail_msg |
11052 |
if msg:
|
11053 |
# Assume the primary node is unreachable and go ahead
|
11054 |
self.warn.append("Can't get info from primary node %s: %s" % |
11055 |
(pnode, msg)) |
11056 |
elif not isinstance(pninfo.payload.get("memory_free", None), int): |
11057 |
self.warn.append("Node data from primary node %s doesn't contain" |
11058 |
" free memory information" % pnode)
|
11059 |
elif instance_info.fail_msg:
|
11060 |
self.warn.append("Can't get instance runtime information: %s" % |
11061 |
instance_info.fail_msg) |
11062 |
else:
|
11063 |
if instance_info.payload:
|
11064 |
current_mem = int(instance_info.payload["memory"]) |
11065 |
else:
|
11066 |
# Assume instance not running
|
11067 |
# (there is a slight race condition here, but it's not very probable,
|
11068 |
# and we have no other way to check)
|
11069 |
current_mem = 0
|
11070 |
miss_mem = (be_new[constants.BE_MEMORY] - current_mem - |
11071 |
pninfo.payload["memory_free"])
|
11072 |
if miss_mem > 0: |
11073 |
raise errors.OpPrereqError("This change will prevent the instance" |
11074 |
" from starting, due to %d MB of memory"
|
11075 |
" missing on its primary node" % miss_mem,
|
11076 |
errors.ECODE_NORES) |
11077 |
|
11078 |
if be_new[constants.BE_AUTO_BALANCE]:
|
11079 |
for node, nres in nodeinfo.items(): |
11080 |
if node not in instance.secondary_nodes: |
11081 |
continue
|
11082 |
nres.Raise("Can't get info from secondary node %s" % node,
|
11083 |
prereq=True, ecode=errors.ECODE_STATE)
|
11084 |
if not isinstance(nres.payload.get("memory_free", None), int): |
11085 |
raise errors.OpPrereqError("Secondary node %s didn't return free" |
11086 |
" memory information" % node,
|
11087 |
errors.ECODE_STATE) |
11088 |
elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]: |
11089 |
raise errors.OpPrereqError("This change will prevent the instance" |
11090 |
" from failover to its secondary node"
|
11091 |
" %s, due to not enough memory" % node,
|
11092 |
errors.ECODE_STATE) |
11093 |
|
11094 |
# NIC processing
|
11095 |
self.nic_pnew = {}
|
11096 |
self.nic_pinst = {}
|
11097 |
for nic_op, nic_dict in self.op.nics: |
11098 |
if nic_op == constants.DDM_REMOVE:
|
11099 |
if not instance.nics: |
11100 |
raise errors.OpPrereqError("Instance has no NICs, cannot remove", |
11101 |
errors.ECODE_INVAL) |
11102 |
continue
|
11103 |
if nic_op != constants.DDM_ADD:
|
11104 |
# an existing nic
|
11105 |
if not instance.nics: |
11106 |
raise errors.OpPrereqError("Invalid NIC index %s, instance has" |
11107 |
" no NICs" % nic_op,
|
11108 |
errors.ECODE_INVAL) |
11109 |
if nic_op < 0 or nic_op >= len(instance.nics): |
11110 |
raise errors.OpPrereqError("Invalid NIC index %s, valid values" |
11111 |
" are 0 to %d" %
|
11112 |
(nic_op, len(instance.nics) - 1), |
11113 |
errors.ECODE_INVAL) |
11114 |
old_nic_params = instance.nics[nic_op].nicparams |
11115 |
old_nic_ip = instance.nics[nic_op].ip |
11116 |
else:
|
11117 |
old_nic_params = {} |
11118 |
old_nic_ip = None
|
11119 |
|
11120 |
update_params_dict = dict([(key, nic_dict[key])
|
11121 |
for key in constants.NICS_PARAMETERS |
11122 |
if key in nic_dict]) |
11123 |
|
11124 |
if "bridge" in nic_dict: |
11125 |
update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
|
11126 |
|
11127 |
new_nic_params = _GetUpdatedParams(old_nic_params, |
11128 |
update_params_dict) |
11129 |
utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES) |
11130 |
new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params) |
11131 |
objects.NIC.CheckParameterSyntax(new_filled_nic_params) |
11132 |
self.nic_pinst[nic_op] = new_nic_params
|
11133 |
self.nic_pnew[nic_op] = new_filled_nic_params
|
11134 |
new_nic_mode = new_filled_nic_params[constants.NIC_MODE] |
11135 |
|
11136 |
if new_nic_mode == constants.NIC_MODE_BRIDGED:
|
11137 |
nic_bridge = new_filled_nic_params[constants.NIC_LINK] |
11138 |
msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
|
11139 |
if msg:
|
11140 |
msg = "Error checking bridges on node %s: %s" % (pnode, msg)
|
11141 |
if self.op.force: |
11142 |
self.warn.append(msg)
|
11143 |
else:
|
11144 |
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
|
11145 |
if new_nic_mode == constants.NIC_MODE_ROUTED:
|
11146 |
if constants.INIC_IP in nic_dict: |
11147 |
nic_ip = nic_dict[constants.INIC_IP] |
11148 |
else:
|
11149 |
nic_ip = old_nic_ip |
11150 |
if nic_ip is None: |
11151 |
raise errors.OpPrereqError("Cannot set the nic ip to None" |
11152 |
" on a routed nic", errors.ECODE_INVAL)
|
11153 |
if constants.INIC_MAC in nic_dict: |
11154 |
nic_mac = nic_dict[constants.INIC_MAC] |
11155 |
if nic_mac is None: |
11156 |
raise errors.OpPrereqError("Cannot set the nic mac to None", |
11157 |
errors.ECODE_INVAL) |
11158 |
elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
11159 |
# otherwise generate the mac
|
11160 |
nic_dict[constants.INIC_MAC] = \ |
11161 |
self.cfg.GenerateMAC(self.proc.GetECId()) |
11162 |
else:
|
11163 |
# or validate/reserve the current one
|
11164 |
try:
|
11165 |
self.cfg.ReserveMAC(nic_mac, self.proc.GetECId()) |
11166 |
except errors.ReservationError:
|
11167 |
raise errors.OpPrereqError("MAC address %s already in use" |
11168 |
" in cluster" % nic_mac,
|
11169 |
errors.ECODE_NOTUNIQUE) |
11170 |
|
11171 |
# DISK processing
|
11172 |
if self.op.disks and instance.disk_template == constants.DT_DISKLESS: |
11173 |
raise errors.OpPrereqError("Disk operations not supported for" |
11174 |
" diskless instances",
|
11175 |
errors.ECODE_INVAL) |
11176 |
for disk_op, _ in self.op.disks: |
11177 |
if disk_op == constants.DDM_REMOVE:
|
11178 |
if len(instance.disks) == 1: |
11179 |
raise errors.OpPrereqError("Cannot remove the last disk of" |
11180 |
" an instance", errors.ECODE_INVAL)
|
11181 |
_CheckInstanceDown(self, instance, "cannot remove disks") |
11182 |
|
11183 |
if (disk_op == constants.DDM_ADD and |
11184 |
len(instance.disks) >= constants.MAX_DISKS):
|
11185 |
raise errors.OpPrereqError("Instance has too many disks (%d), cannot" |
11186 |
" add more" % constants.MAX_DISKS,
|
11187 |
errors.ECODE_STATE) |
11188 |
if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE): |
11189 |
# an existing disk
|
11190 |
if disk_op < 0 or disk_op >= len(instance.disks): |
11191 |
raise errors.OpPrereqError("Invalid disk index %s, valid values" |
11192 |
" are 0 to %d" %
|
11193 |
(disk_op, len(instance.disks)),
|
11194 |
errors.ECODE_INVAL) |
11195 |
|
11196 |
return
|
11197 |
|
11198 |
def _ConvertPlainToDrbd(self, feedback_fn): |
11199 |
"""Converts an instance from plain to drbd.
|
11200 |
|
11201 |
"""
|
11202 |
feedback_fn("Converting template to drbd")
|
11203 |
instance = self.instance
|
11204 |
pnode = instance.primary_node |
11205 |
snode = self.op.remote_node
|
11206 |
|
11207 |
# create a fake disk info for _GenerateDiskTemplate
|
11208 |
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, |
11209 |
constants.IDISK_VG: d.logical_id[0]}
|
11210 |
for d in instance.disks] |
11211 |
new_disks = _GenerateDiskTemplate(self, self.op.disk_template, |
11212 |
instance.name, pnode, [snode], |
11213 |
disk_info, None, None, 0, feedback_fn) |
11214 |
info = _GetInstanceInfoText(instance) |
11215 |
feedback_fn("Creating aditional volumes...")
|
11216 |
# first, create the missing data and meta devices
|
11217 |
for disk in new_disks: |
11218 |
# unfortunately this is... not too nice
|
11219 |
_CreateSingleBlockDev(self, pnode, instance, disk.children[1], |
11220 |
info, True)
|
11221 |
for child in disk.children: |
11222 |
_CreateSingleBlockDev(self, snode, instance, child, info, True) |
11223 |
# at this stage, all new LVs have been created, we can rename the
|
11224 |
# old ones
|
11225 |
feedback_fn("Renaming original volumes...")
|
11226 |
rename_list = [(o, n.children[0].logical_id)
|
11227 |
for (o, n) in zip(instance.disks, new_disks)] |
11228 |
result = self.rpc.call_blockdev_rename(pnode, rename_list)
|
11229 |
result.Raise("Failed to rename original LVs")
|
11230 |
|
11231 |
feedback_fn("Initializing DRBD devices...")
|
11232 |
# all child devices are in place, we can now create the DRBD devices
|
11233 |
for disk in new_disks: |
11234 |
for node in [pnode, snode]: |
11235 |
f_create = node == pnode |
11236 |
_CreateSingleBlockDev(self, node, instance, disk, info, f_create)
|
11237 |
|
11238 |
# at this point, the instance has been modified
|
11239 |
instance.disk_template = constants.DT_DRBD8 |
11240 |
instance.disks = new_disks |
11241 |
self.cfg.Update(instance, feedback_fn)
|
11242 |
|
11243 |
# disks are created, waiting for sync
|
11244 |
disk_abort = not _WaitForSync(self, instance, |
11245 |
oneshot=not self.op.wait_for_sync) |
11246 |
if disk_abort:
|
11247 |
raise errors.OpExecError("There are some degraded disks for" |
11248 |
" this instance, please cleanup manually")
|
11249 |
|
11250 |
def _ConvertDrbdToPlain(self, feedback_fn): |
11251 |
"""Converts an instance from drbd to plain.
|
11252 |
|
11253 |
"""
|
11254 |
instance = self.instance
|
11255 |
assert len(instance.secondary_nodes) == 1 |
11256 |
pnode = instance.primary_node |
11257 |
snode = instance.secondary_nodes[0]
|
11258 |
feedback_fn("Converting template to plain")
|
11259 |
|
11260 |
old_disks = instance.disks |
11261 |
new_disks = [d.children[0] for d in old_disks] |
11262 |
|
11263 |
# copy over size and mode
|
11264 |
for parent, child in zip(old_disks, new_disks): |
11265 |
child.size = parent.size |
11266 |
child.mode = parent.mode |
11267 |
|
11268 |
# update instance structure
|
11269 |
instance.disks = new_disks |
11270 |
instance.disk_template = constants.DT_PLAIN |
11271 |
self.cfg.Update(instance, feedback_fn)
|
11272 |
|
11273 |
feedback_fn("Removing volumes on the secondary node...")
|
11274 |
for disk in old_disks: |
11275 |
self.cfg.SetDiskID(disk, snode)
|
11276 |
msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
|
11277 |
if msg:
|
11278 |
self.LogWarning("Could not remove block device %s on node %s," |
11279 |
" continuing anyway: %s", disk.iv_name, snode, msg)
|
11280 |
|
11281 |
feedback_fn("Removing unneeded volumes on the primary node...")
|
11282 |
for idx, disk in enumerate(old_disks): |
11283 |
meta = disk.children[1]
|
11284 |
self.cfg.SetDiskID(meta, pnode)
|
11285 |
msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
|
11286 |
if msg:
|
11287 |
self.LogWarning("Could not remove metadata for disk %d on node %s," |
11288 |
" continuing anyway: %s", idx, pnode, msg)
|
11289 |
|
11290 |
def Exec(self, feedback_fn): |
11291 |
"""Modifies an instance.
|
11292 |
|
11293 |
All parameters take effect only at the next restart of the instance.
|
11294 |
|
11295 |
"""
|
11296 |
# Process here the warnings from CheckPrereq, as we don't have a
|
11297 |
# feedback_fn there.
|
11298 |
for warn in self.warn: |
11299 |
feedback_fn("WARNING: %s" % warn)
|
11300 |
|
11301 |
result = [] |
11302 |
instance = self.instance
|
11303 |
# disk changes
|
11304 |
for disk_op, disk_dict in self.op.disks: |
11305 |
if disk_op == constants.DDM_REMOVE:
|
11306 |
# remove the last disk
|
11307 |
device = instance.disks.pop() |
11308 |
device_idx = len(instance.disks)
|
11309 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
11310 |
self.cfg.SetDiskID(disk, node)
|
11311 |
msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
|
11312 |
if msg:
|
11313 |
self.LogWarning("Could not remove disk/%d on node %s: %s," |
11314 |
" continuing anyway", device_idx, node, msg)
|
11315 |
result.append(("disk/%d" % device_idx, "remove")) |
11316 |
elif disk_op == constants.DDM_ADD:
|
11317 |
# add a new disk
|
11318 |
if instance.disk_template in (constants.DT_FILE, |
11319 |
constants.DT_SHARED_FILE): |
11320 |
file_driver, file_path = instance.disks[0].logical_id
|
11321 |
file_path = os.path.dirname(file_path) |
11322 |
else:
|
11323 |
file_driver = file_path = None
|
11324 |
disk_idx_base = len(instance.disks)
|
11325 |
new_disk = _GenerateDiskTemplate(self,
|
11326 |
instance.disk_template, |
11327 |
instance.name, instance.primary_node, |
11328 |
instance.secondary_nodes, |
11329 |
[disk_dict], |
11330 |
file_path, |
11331 |
file_driver, |
11332 |
disk_idx_base, feedback_fn)[0]
|
11333 |
instance.disks.append(new_disk) |
11334 |
info = _GetInstanceInfoText(instance) |
11335 |
|
11336 |
logging.info("Creating volume %s for instance %s",
|
11337 |
new_disk.iv_name, instance.name) |
11338 |
# Note: this needs to be kept in sync with _CreateDisks
|
11339 |
#HARDCODE
|
11340 |
for node in instance.all_nodes: |
11341 |
f_create = node == instance.primary_node |
11342 |
try:
|
11343 |
_CreateBlockDev(self, node, instance, new_disk,
|
11344 |
f_create, info, f_create) |
11345 |
except errors.OpExecError, err:
|
11346 |
self.LogWarning("Failed to create volume %s (%s) on" |
11347 |
" node %s: %s",
|
11348 |
new_disk.iv_name, new_disk, node, err) |
11349 |
result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" % |
11350 |
(new_disk.size, new_disk.mode))) |
11351 |
else:
|
11352 |
# change a given disk
|
11353 |
instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE] |
11354 |
result.append(("disk.mode/%d" % disk_op,
|
11355 |
disk_dict[constants.IDISK_MODE])) |
11356 |
|
11357 |
if self.op.disk_template: |
11358 |
r_shut = _ShutdownInstanceDisks(self, instance)
|
11359 |
if not r_shut: |
11360 |
raise errors.OpExecError("Cannot shutdown instance disks, unable to" |
11361 |
" proceed with disk template conversion")
|
11362 |
mode = (instance.disk_template, self.op.disk_template)
|
11363 |
try:
|
11364 |
self._DISK_CONVERSIONS[mode](self, feedback_fn) |
11365 |
except:
|
11366 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
11367 |
raise
|
11368 |
result.append(("disk_template", self.op.disk_template)) |
11369 |
|
11370 |
# NIC changes
|
11371 |
for nic_op, nic_dict in self.op.nics: |
11372 |
if nic_op == constants.DDM_REMOVE:
|
11373 |
# remove the last nic
|
11374 |
del instance.nics[-1] |
11375 |
result.append(("nic.%d" % len(instance.nics), "remove")) |
11376 |
elif nic_op == constants.DDM_ADD:
|
11377 |
# mac and bridge should be set, by now
|
11378 |
mac = nic_dict[constants.INIC_MAC] |
11379 |
ip = nic_dict.get(constants.INIC_IP, None)
|
11380 |
nicparams = self.nic_pinst[constants.DDM_ADD]
|
11381 |
new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams) |
11382 |
instance.nics.append(new_nic) |
11383 |
result.append(("nic.%d" % (len(instance.nics) - 1), |
11384 |
"add:mac=%s,ip=%s,mode=%s,link=%s" %
|
11385 |
(new_nic.mac, new_nic.ip, |
11386 |
self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
|
11387 |
self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
|
11388 |
))) |
11389 |
else:
|
11390 |
for key in (constants.INIC_MAC, constants.INIC_IP): |
11391 |
if key in nic_dict: |
11392 |
setattr(instance.nics[nic_op], key, nic_dict[key])
|
11393 |
if nic_op in self.nic_pinst: |
11394 |
instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
|
11395 |
for key, val in nic_dict.iteritems(): |
11396 |
result.append(("nic.%s/%d" % (key, nic_op), val))
|
11397 |
|
11398 |
# hvparams changes
|
11399 |
if self.op.hvparams: |
11400 |
instance.hvparams = self.hv_inst
|
11401 |
for key, val in self.op.hvparams.iteritems(): |
11402 |
result.append(("hv/%s" % key, val))
|
11403 |
|
11404 |
# beparams changes
|
11405 |
if self.op.beparams: |
11406 |
instance.beparams = self.be_inst
|
11407 |
for key, val in self.op.beparams.iteritems(): |
11408 |
result.append(("be/%s" % key, val))
|
11409 |
|
11410 |
# OS change
|
11411 |
if self.op.os_name: |
11412 |
instance.os = self.op.os_name
|
11413 |
|
11414 |
# osparams changes
|
11415 |
if self.op.osparams: |
11416 |
instance.osparams = self.os_inst
|
11417 |
for key, val in self.op.osparams.iteritems(): |
11418 |
result.append(("os/%s" % key, val))
|
11419 |
|
11420 |
self.cfg.Update(instance, feedback_fn)
|
11421 |
|
11422 |
return result
|
11423 |
|
11424 |
_DISK_CONVERSIONS = { |
11425 |
(constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, |
11426 |
(constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, |
11427 |
} |
11428 |
|
11429 |
|
11430 |
class LUInstanceChangeGroup(LogicalUnit): |
11431 |
HPATH = "instance-change-group"
|
11432 |
HTYPE = constants.HTYPE_INSTANCE |
11433 |
REQ_BGL = False
|
11434 |
|
11435 |
def ExpandNames(self): |
11436 |
self.share_locks = _ShareAll()
|
11437 |
self.needed_locks = {
|
11438 |
locking.LEVEL_NODEGROUP: [], |
11439 |
locking.LEVEL_NODE: [], |
11440 |
} |
11441 |
|
11442 |
self._ExpandAndLockInstance()
|
11443 |
|
11444 |
if self.op.target_groups: |
11445 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup, |
11446 |
self.op.target_groups)
|
11447 |
else:
|
11448 |
self.req_target_uuids = None |
11449 |
|
11450 |
self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator) |
11451 |
|
11452 |
def DeclareLocks(self, level): |
11453 |
if level == locking.LEVEL_NODEGROUP:
|
11454 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
11455 |
|
11456 |
if self.req_target_uuids: |
11457 |
lock_groups = set(self.req_target_uuids) |
11458 |
|
11459 |
# Lock all groups used by instance optimistically; this requires going
|
11460 |
# via the node before it's locked, requiring verification later on
|
11461 |
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
11462 |
lock_groups.update(instance_groups) |
11463 |
else:
|
11464 |
# No target groups, need to lock all of them
|
11465 |
lock_groups = locking.ALL_SET |
11466 |
|
11467 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
11468 |
|
11469 |
elif level == locking.LEVEL_NODE:
|
11470 |
if self.req_target_uuids: |
11471 |
# Lock all nodes used by instances
|
11472 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
11473 |
self._LockInstancesNodes()
|
11474 |
|
11475 |
# Lock all nodes in all potential target groups
|
11476 |
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) - |
11477 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name)) |
11478 |
member_nodes = [node_name |
11479 |
for group in lock_groups |
11480 |
for node_name in self.cfg.GetNodeGroup(group).members] |
11481 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
11482 |
else:
|
11483 |
# Lock all nodes as all groups are potential targets
|
11484 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
11485 |
|
11486 |
def CheckPrereq(self): |
11487 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
11488 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
11489 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
11490 |
|
11491 |
assert (self.req_target_uuids is None or |
11492 |
owned_groups.issuperset(self.req_target_uuids))
|
11493 |
assert owned_instances == set([self.op.instance_name]) |
11494 |
|
11495 |
# Get instance information
|
11496 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
11497 |
|
11498 |
# Check if node groups for locked instance are still correct
|
11499 |
assert owned_nodes.issuperset(self.instance.all_nodes), \ |
11500 |
("Instance %s's nodes changed while we kept the lock" %
|
11501 |
self.op.instance_name)
|
11502 |
|
11503 |
inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, |
11504 |
owned_groups) |
11505 |
|
11506 |
if self.req_target_uuids: |
11507 |
# User requested specific target groups
|
11508 |
self.target_uuids = self.req_target_uuids |
11509 |
else:
|
11510 |
# All groups except those used by the instance are potential targets
|
11511 |
self.target_uuids = owned_groups - inst_groups
|
11512 |
|
11513 |
conflicting_groups = self.target_uuids & inst_groups
|
11514 |
if conflicting_groups:
|
11515 |
raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" |
11516 |
" used by the instance '%s'" %
|
11517 |
(utils.CommaJoin(conflicting_groups), |
11518 |
self.op.instance_name),
|
11519 |
errors.ECODE_INVAL) |
11520 |
|
11521 |
if not self.target_uuids: |
11522 |
raise errors.OpPrereqError("There are no possible target groups", |
11523 |
errors.ECODE_INVAL) |
11524 |
|
11525 |
def BuildHooksEnv(self): |
11526 |
"""Build hooks env.
|
11527 |
|
11528 |
"""
|
11529 |
assert self.target_uuids |
11530 |
|
11531 |
env = { |
11532 |
"TARGET_GROUPS": " ".join(self.target_uuids), |
11533 |
} |
11534 |
|
11535 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
11536 |
|
11537 |
return env
|
11538 |
|
11539 |
def BuildHooksNodes(self): |
11540 |
"""Build hooks nodes.
|
11541 |
|
11542 |
"""
|
11543 |
mn = self.cfg.GetMasterNode()
|
11544 |
return ([mn], [mn])
|
11545 |
|
11546 |
def Exec(self, feedback_fn): |
11547 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) |
11548 |
|
11549 |
assert instances == [self.op.instance_name], "Instance not locked" |
11550 |
|
11551 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP, |
11552 |
instances=instances, target_groups=list(self.target_uuids)) |
11553 |
|
11554 |
ial.Run(self.op.iallocator)
|
11555 |
|
11556 |
if not ial.success: |
11557 |
raise errors.OpPrereqError("Can't compute solution for changing group of" |
11558 |
" instance '%s' using iallocator '%s': %s" %
|
11559 |
(self.op.instance_name, self.op.iallocator, |
11560 |
ial.info), |
11561 |
errors.ECODE_NORES) |
11562 |
|
11563 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False) |
11564 |
|
11565 |
self.LogInfo("Iallocator returned %s job(s) for changing group of" |
11566 |
" instance '%s'", len(jobs), self.op.instance_name) |
11567 |
|
11568 |
return ResultWithJobs(jobs)
|
11569 |
|
11570 |
|
11571 |
class LUBackupQuery(NoHooksLU): |
11572 |
"""Query the exports list
|
11573 |
|
11574 |
"""
|
11575 |
REQ_BGL = False
|
11576 |
|
11577 |
def ExpandNames(self): |
11578 |
self.needed_locks = {}
|
11579 |
self.share_locks[locking.LEVEL_NODE] = 1 |
11580 |
if not self.op.nodes: |
11581 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
11582 |
else:
|
11583 |
self.needed_locks[locking.LEVEL_NODE] = \
|
11584 |
_GetWantedNodes(self, self.op.nodes) |
11585 |
|
11586 |
def Exec(self, feedback_fn): |
11587 |
"""Compute the list of all the exported system images.
|
11588 |
|
11589 |
@rtype: dict
|
11590 |
@return: a dictionary with the structure node->(export-list)
|
11591 |
where export-list is a list of the instances exported on
|
11592 |
that node.
|
11593 |
|
11594 |
"""
|
11595 |
self.nodes = self.owned_locks(locking.LEVEL_NODE) |
11596 |
rpcresult = self.rpc.call_export_list(self.nodes) |
11597 |
result = {} |
11598 |
for node in rpcresult: |
11599 |
if rpcresult[node].fail_msg:
|
11600 |
result[node] = False
|
11601 |
else:
|
11602 |
result[node] = rpcresult[node].payload |
11603 |
|
11604 |
return result
|
11605 |
|
11606 |
|
11607 |
class LUBackupPrepare(NoHooksLU): |
11608 |
"""Prepares an instance for an export and returns useful information.
|
11609 |
|
11610 |
"""
|
11611 |
REQ_BGL = False
|
11612 |
|
11613 |
def ExpandNames(self): |
11614 |
self._ExpandAndLockInstance()
|
11615 |
|
11616 |
def CheckPrereq(self): |
11617 |
"""Check prerequisites.
|
11618 |
|
11619 |
"""
|
11620 |
instance_name = self.op.instance_name
|
11621 |
|
11622 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
11623 |
assert self.instance is not None, \ |
11624 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
11625 |
_CheckNodeOnline(self, self.instance.primary_node) |
11626 |
|
11627 |
self._cds = _GetClusterDomainSecret()
|
11628 |
|
11629 |
def Exec(self, feedback_fn): |
11630 |
"""Prepares an instance for an export.
|
11631 |
|
11632 |
"""
|
11633 |
instance = self.instance
|
11634 |
|
11635 |
if self.op.mode == constants.EXPORT_MODE_REMOTE: |
11636 |
salt = utils.GenerateSecret(8)
|
11637 |
|
11638 |
feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
|
11639 |
result = self.rpc.call_x509_cert_create(instance.primary_node,
|
11640 |
constants.RIE_CERT_VALIDITY) |
11641 |
result.Raise("Can't create X509 key and certificate on %s" % result.node)
|
11642 |
|
11643 |
(name, cert_pem) = result.payload |
11644 |
|
11645 |
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, |
11646 |
cert_pem) |
11647 |
|
11648 |
return {
|
11649 |
"handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds), |
11650 |
"x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt), |
11651 |
salt), |
11652 |
"x509_ca": utils.SignX509Certificate(cert, self._cds, salt), |
11653 |
} |
11654 |
|
11655 |
return None |
11656 |
|
11657 |
|
11658 |
class LUBackupExport(LogicalUnit): |
11659 |
"""Export an instance to an image in the cluster.
|
11660 |
|
11661 |
"""
|
11662 |
HPATH = "instance-export"
|
11663 |
HTYPE = constants.HTYPE_INSTANCE |
11664 |
REQ_BGL = False
|
11665 |
|
11666 |
def CheckArguments(self): |
11667 |
"""Check the arguments.
|
11668 |
|
11669 |
"""
|
11670 |
self.x509_key_name = self.op.x509_key_name |
11671 |
self.dest_x509_ca_pem = self.op.destination_x509_ca |
11672 |
|
11673 |
if self.op.mode == constants.EXPORT_MODE_REMOTE: |
11674 |
if not self.x509_key_name: |
11675 |
raise errors.OpPrereqError("Missing X509 key name for encryption", |
11676 |
errors.ECODE_INVAL) |
11677 |
|
11678 |
if not self.dest_x509_ca_pem: |
11679 |
raise errors.OpPrereqError("Missing destination X509 CA", |
11680 |
errors.ECODE_INVAL) |
11681 |
|
11682 |
def ExpandNames(self): |
11683 |
self._ExpandAndLockInstance()
|
11684 |
|
11685 |
# Lock all nodes for local exports
|
11686 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
11687 |
# FIXME: lock only instance primary and destination node
|
11688 |
#
|
11689 |
# Sad but true, for now we have do lock all nodes, as we don't know where
|
11690 |
# the previous export might be, and in this LU we search for it and
|
11691 |
# remove it from its current node. In the future we could fix this by:
|
11692 |
# - making a tasklet to search (share-lock all), then create the
|
11693 |
# new one, then one to remove, after
|
11694 |
# - removing the removal operation altogether
|
11695 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
11696 |
|
11697 |
def DeclareLocks(self, level): |
11698 |
"""Last minute lock declaration."""
|
11699 |
# All nodes are locked anyway, so nothing to do here.
|
11700 |
|
11701 |
def BuildHooksEnv(self): |
11702 |
"""Build hooks env.
|
11703 |
|
11704 |
This will run on the master, primary node and target node.
|
11705 |
|
11706 |
"""
|
11707 |
env = { |
11708 |
"EXPORT_MODE": self.op.mode, |
11709 |
"EXPORT_NODE": self.op.target_node, |
11710 |
"EXPORT_DO_SHUTDOWN": self.op.shutdown, |
11711 |
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, |
11712 |
# TODO: Generic function for boolean env variables
|
11713 |
"REMOVE_INSTANCE": str(bool(self.op.remove_instance)), |
11714 |
} |
11715 |
|
11716 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
11717 |
|
11718 |
return env
|
11719 |
|
11720 |
def BuildHooksNodes(self): |
11721 |
"""Build hooks nodes.
|
11722 |
|
11723 |
"""
|
11724 |
nl = [self.cfg.GetMasterNode(), self.instance.primary_node] |
11725 |
|
11726 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
11727 |
nl.append(self.op.target_node)
|
11728 |
|
11729 |
return (nl, nl)
|
11730 |
|
11731 |
def CheckPrereq(self): |
11732 |
"""Check prerequisites.
|
11733 |
|
11734 |
This checks that the instance and node names are valid.
|
11735 |
|
11736 |
"""
|
11737 |
instance_name = self.op.instance_name
|
11738 |
|
11739 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
11740 |
assert self.instance is not None, \ |
11741 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
11742 |
_CheckNodeOnline(self, self.instance.primary_node) |
11743 |
|
11744 |
if (self.op.remove_instance and self.instance.admin_up and |
11745 |
not self.op.shutdown): |
11746 |
raise errors.OpPrereqError("Can not remove instance without shutting it" |
11747 |
" down before")
|
11748 |
|
11749 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
11750 |
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) |
11751 |
self.dst_node = self.cfg.GetNodeInfo(self.op.target_node) |
11752 |
assert self.dst_node is not None |
11753 |
|
11754 |
_CheckNodeOnline(self, self.dst_node.name) |
11755 |
_CheckNodeNotDrained(self, self.dst_node.name) |
11756 |
|
11757 |
self._cds = None |
11758 |
self.dest_disk_info = None |
11759 |
self.dest_x509_ca = None |
11760 |
|
11761 |
elif self.op.mode == constants.EXPORT_MODE_REMOTE: |
11762 |
self.dst_node = None |
11763 |
|
11764 |
if len(self.op.target_node) != len(self.instance.disks): |
11765 |
raise errors.OpPrereqError(("Received destination information for %s" |
11766 |
" disks, but instance %s has %s disks") %
|
11767 |
(len(self.op.target_node), instance_name, |
11768 |
len(self.instance.disks)), |
11769 |
errors.ECODE_INVAL) |
11770 |
|
11771 |
cds = _GetClusterDomainSecret() |
11772 |
|
11773 |
# Check X509 key name
|
11774 |
try:
|
11775 |
(key_name, hmac_digest, hmac_salt) = self.x509_key_name
|
11776 |
except (TypeError, ValueError), err: |
11777 |
raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err) |
11778 |
|
11779 |
if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt): |
11780 |
raise errors.OpPrereqError("HMAC for X509 key name is wrong", |
11781 |
errors.ECODE_INVAL) |
11782 |
|
11783 |
# Load and verify CA
|
11784 |
try:
|
11785 |
(cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
|
11786 |
except OpenSSL.crypto.Error, err:
|
11787 |
raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" % |
11788 |
(err, ), errors.ECODE_INVAL) |
11789 |
|
11790 |
(errcode, msg) = utils.VerifyX509Certificate(cert, None, None) |
11791 |
if errcode is not None: |
11792 |
raise errors.OpPrereqError("Invalid destination X509 CA (%s)" % |
11793 |
(msg, ), errors.ECODE_INVAL) |
11794 |
|
11795 |
self.dest_x509_ca = cert
|
11796 |
|
11797 |
# Verify target information
|
11798 |
disk_info = [] |
11799 |
for idx, disk_data in enumerate(self.op.target_node): |
11800 |
try:
|
11801 |
(host, port, magic) = \ |
11802 |
masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data) |
11803 |
except errors.GenericError, err:
|
11804 |
raise errors.OpPrereqError("Target info for disk %s: %s" % |
11805 |
(idx, err), errors.ECODE_INVAL) |
11806 |
|
11807 |
disk_info.append((host, port, magic)) |
11808 |
|
11809 |
assert len(disk_info) == len(self.op.target_node) |
11810 |
self.dest_disk_info = disk_info
|
11811 |
|
11812 |
else:
|
11813 |
raise errors.ProgrammerError("Unhandled export mode %r" % |
11814 |
self.op.mode)
|
11815 |
|
11816 |
# instance disk type verification
|
11817 |
# TODO: Implement export support for file-based disks
|
11818 |
for disk in self.instance.disks: |
11819 |
if disk.dev_type == constants.LD_FILE:
|
11820 |
raise errors.OpPrereqError("Export not supported for instances with" |
11821 |
" file-based disks", errors.ECODE_INVAL)
|
11822 |
|
11823 |
def _CleanupExports(self, feedback_fn): |
11824 |
"""Removes exports of current instance from all other nodes.
|
11825 |
|
11826 |
If an instance in a cluster with nodes A..D was exported to node C, its
|
11827 |
exports will be removed from the nodes A, B and D.
|
11828 |
|
11829 |
"""
|
11830 |
assert self.op.mode != constants.EXPORT_MODE_REMOTE |
11831 |
|
11832 |
nodelist = self.cfg.GetNodeList()
|
11833 |
nodelist.remove(self.dst_node.name)
|
11834 |
|
11835 |
# on one-node clusters nodelist will be empty after the removal
|
11836 |
# if we proceed the backup would be removed because OpBackupQuery
|
11837 |
# substitutes an empty list with the full cluster node list.
|
11838 |
iname = self.instance.name
|
11839 |
if nodelist:
|
11840 |
feedback_fn("Removing old exports for instance %s" % iname)
|
11841 |
exportlist = self.rpc.call_export_list(nodelist)
|
11842 |
for node in exportlist: |
11843 |
if exportlist[node].fail_msg:
|
11844 |
continue
|
11845 |
if iname in exportlist[node].payload: |
11846 |
msg = self.rpc.call_export_remove(node, iname).fail_msg
|
11847 |
if msg:
|
11848 |
self.LogWarning("Could not remove older export for instance %s" |
11849 |
" on node %s: %s", iname, node, msg)
|
11850 |
|
11851 |
def Exec(self, feedback_fn): |
11852 |
"""Export an instance to an image in the cluster.
|
11853 |
|
11854 |
"""
|
11855 |
assert self.op.mode in constants.EXPORT_MODES |
11856 |
|
11857 |
instance = self.instance
|
11858 |
src_node = instance.primary_node |
11859 |
|
11860 |
if self.op.shutdown: |
11861 |
# shutdown the instance, but not the disks
|
11862 |
feedback_fn("Shutting down instance %s" % instance.name)
|
11863 |
result = self.rpc.call_instance_shutdown(src_node, instance,
|
11864 |
self.op.shutdown_timeout)
|
11865 |
# TODO: Maybe ignore failures if ignore_remove_failures is set
|
11866 |
result.Raise("Could not shutdown instance %s on"
|
11867 |
" node %s" % (instance.name, src_node))
|
11868 |
|
11869 |
# set the disks ID correctly since call_instance_start needs the
|
11870 |
# correct drbd minor to create the symlinks
|
11871 |
for disk in instance.disks: |
11872 |
self.cfg.SetDiskID(disk, src_node)
|
11873 |
|
11874 |
activate_disks = (not instance.admin_up)
|
11875 |
|
11876 |
if activate_disks:
|
11877 |
# Activate the instance disks if we'exporting a stopped instance
|
11878 |
feedback_fn("Activating disks for %s" % instance.name)
|
11879 |
_StartInstanceDisks(self, instance, None) |
11880 |
|
11881 |
try:
|
11882 |
helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
|
11883 |
instance) |
11884 |
|
11885 |
helper.CreateSnapshots() |
11886 |
try:
|
11887 |
if (self.op.shutdown and instance.admin_up and |
11888 |
not self.op.remove_instance): |
11889 |
assert not activate_disks |
11890 |
feedback_fn("Starting instance %s" % instance.name)
|
11891 |
result = self.rpc.call_instance_start(src_node, instance,
|
11892 |
None, None, False) |
11893 |
msg = result.fail_msg |
11894 |
if msg:
|
11895 |
feedback_fn("Failed to start instance: %s" % msg)
|
11896 |
_ShutdownInstanceDisks(self, instance)
|
11897 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
11898 |
|
11899 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
11900 |
(fin_resu, dresults) = helper.LocalExport(self.dst_node)
|
11901 |
elif self.op.mode == constants.EXPORT_MODE_REMOTE: |
11902 |
connect_timeout = constants.RIE_CONNECT_TIMEOUT |
11903 |
timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) |
11904 |
|
11905 |
(key_name, _, _) = self.x509_key_name
|
11906 |
|
11907 |
dest_ca_pem = \ |
11908 |
OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, |
11909 |
self.dest_x509_ca)
|
11910 |
|
11911 |
(fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
|
11912 |
key_name, dest_ca_pem, |
11913 |
timeouts) |
11914 |
finally:
|
11915 |
helper.Cleanup() |
11916 |
|
11917 |
# Check for backwards compatibility
|
11918 |
assert len(dresults) == len(instance.disks) |
11919 |
assert compat.all(isinstance(i, bool) for i in dresults), \ |
11920 |
"Not all results are boolean: %r" % dresults
|
11921 |
|
11922 |
finally:
|
11923 |
if activate_disks:
|
11924 |
feedback_fn("Deactivating disks for %s" % instance.name)
|
11925 |
_ShutdownInstanceDisks(self, instance)
|
11926 |
|
11927 |
if not (compat.all(dresults) and fin_resu): |
11928 |
failures = [] |
11929 |
if not fin_resu: |
11930 |
failures.append("export finalization")
|
11931 |
if not compat.all(dresults): |
11932 |
fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults) |
11933 |
if not dsk) |
11934 |
failures.append("disk export: disk(s) %s" % fdsk)
|
11935 |
|
11936 |
raise errors.OpExecError("Export failed, errors in %s" % |
11937 |
utils.CommaJoin(failures)) |
11938 |
|
11939 |
# At this point, the export was successful, we can cleanup/finish
|
11940 |
|
11941 |
# Remove instance if requested
|
11942 |
if self.op.remove_instance: |
11943 |
feedback_fn("Removing instance %s" % instance.name)
|
11944 |
_RemoveInstance(self, feedback_fn, instance,
|
11945 |
self.op.ignore_remove_failures)
|
11946 |
|
11947 |
if self.op.mode == constants.EXPORT_MODE_LOCAL: |
11948 |
self._CleanupExports(feedback_fn)
|
11949 |
|
11950 |
return fin_resu, dresults
|
11951 |
|
11952 |
|
11953 |
class LUBackupRemove(NoHooksLU): |
11954 |
"""Remove exports related to the named instance.
|
11955 |
|
11956 |
"""
|
11957 |
REQ_BGL = False
|
11958 |
|
11959 |
def ExpandNames(self): |
11960 |
self.needed_locks = {}
|
11961 |
# We need all nodes to be locked in order for RemoveExport to work, but we
|
11962 |
# don't need to lock the instance itself, as nothing will happen to it (and
|
11963 |
# we can remove exports also for a removed instance)
|
11964 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
11965 |
|
11966 |
def Exec(self, feedback_fn): |
11967 |
"""Remove any export.
|
11968 |
|
11969 |
"""
|
11970 |
instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
11971 |
# If the instance was not found we'll try with the name that was passed in.
|
11972 |
# This will only work if it was an FQDN, though.
|
11973 |
fqdn_warn = False
|
11974 |
if not instance_name: |
11975 |
fqdn_warn = True
|
11976 |
instance_name = self.op.instance_name
|
11977 |
|
11978 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
|
11979 |
exportlist = self.rpc.call_export_list(locked_nodes)
|
11980 |
found = False
|
11981 |
for node in exportlist: |
11982 |
msg = exportlist[node].fail_msg |
11983 |
if msg:
|
11984 |
self.LogWarning("Failed to query node %s (continuing): %s", node, msg) |
11985 |
continue
|
11986 |
if instance_name in exportlist[node].payload: |
11987 |
found = True
|
11988 |
result = self.rpc.call_export_remove(node, instance_name)
|
11989 |
msg = result.fail_msg |
11990 |
if msg:
|
11991 |
logging.error("Could not remove export for instance %s"
|
11992 |
" on node %s: %s", instance_name, node, msg)
|
11993 |
|
11994 |
if fqdn_warn and not found: |
11995 |
feedback_fn("Export not found. If trying to remove an export belonging"
|
11996 |
" to a deleted instance please use its Fully Qualified"
|
11997 |
" Domain Name.")
|
11998 |
|
11999 |
|
12000 |
class LUGroupAdd(LogicalUnit): |
12001 |
"""Logical unit for creating node groups.
|
12002 |
|
12003 |
"""
|
12004 |
HPATH = "group-add"
|
12005 |
HTYPE = constants.HTYPE_GROUP |
12006 |
REQ_BGL = False
|
12007 |
|
12008 |
def ExpandNames(self): |
12009 |
# We need the new group's UUID here so that we can create and acquire the
|
12010 |
# corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
|
12011 |
# that it should not check whether the UUID exists in the configuration.
|
12012 |
self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) |
12013 |
self.needed_locks = {}
|
12014 |
self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid |
12015 |
|
12016 |
def CheckPrereq(self): |
12017 |
"""Check prerequisites.
|
12018 |
|
12019 |
This checks that the given group name is not an existing node group
|
12020 |
already.
|
12021 |
|
12022 |
"""
|
12023 |
try:
|
12024 |
existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12025 |
except errors.OpPrereqError:
|
12026 |
pass
|
12027 |
else:
|
12028 |
raise errors.OpPrereqError("Desired group name '%s' already exists as a" |
12029 |
" node group (UUID: %s)" %
|
12030 |
(self.op.group_name, existing_uuid),
|
12031 |
errors.ECODE_EXISTS) |
12032 |
|
12033 |
if self.op.ndparams: |
12034 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
12035 |
|
12036 |
def BuildHooksEnv(self): |
12037 |
"""Build hooks env.
|
12038 |
|
12039 |
"""
|
12040 |
return {
|
12041 |
"GROUP_NAME": self.op.group_name, |
12042 |
} |
12043 |
|
12044 |
def BuildHooksNodes(self): |
12045 |
"""Build hooks nodes.
|
12046 |
|
12047 |
"""
|
12048 |
mn = self.cfg.GetMasterNode()
|
12049 |
return ([mn], [mn])
|
12050 |
|
12051 |
def Exec(self, feedback_fn): |
12052 |
"""Add the node group to the cluster.
|
12053 |
|
12054 |
"""
|
12055 |
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
|
12056 |
uuid=self.group_uuid,
|
12057 |
alloc_policy=self.op.alloc_policy,
|
12058 |
ndparams=self.op.ndparams)
|
12059 |
|
12060 |
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False) |
12061 |
del self.remove_locks[locking.LEVEL_NODEGROUP] |
12062 |
|
12063 |
|
12064 |
class LUGroupAssignNodes(NoHooksLU): |
12065 |
"""Logical unit for assigning nodes to groups.
|
12066 |
|
12067 |
"""
|
12068 |
REQ_BGL = False
|
12069 |
|
12070 |
def ExpandNames(self): |
12071 |
# These raise errors.OpPrereqError on their own:
|
12072 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12073 |
self.op.nodes = _GetWantedNodes(self, self.op.nodes) |
12074 |
|
12075 |
# We want to lock all the affected nodes and groups. We have readily
|
12076 |
# available the list of nodes, and the *destination* group. To gather the
|
12077 |
# list of "source" groups, we need to fetch node information later on.
|
12078 |
self.needed_locks = {
|
12079 |
locking.LEVEL_NODEGROUP: set([self.group_uuid]), |
12080 |
locking.LEVEL_NODE: self.op.nodes,
|
12081 |
} |
12082 |
|
12083 |
def DeclareLocks(self, level): |
12084 |
if level == locking.LEVEL_NODEGROUP:
|
12085 |
assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1 |
12086 |
|
12087 |
# Try to get all affected nodes' groups without having the group or node
|
12088 |
# lock yet. Needs verification later in the code flow.
|
12089 |
groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes) |
12090 |
|
12091 |
self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
|
12092 |
|
12093 |
def CheckPrereq(self): |
12094 |
"""Check prerequisites.
|
12095 |
|
12096 |
"""
|
12097 |
assert self.needed_locks[locking.LEVEL_NODEGROUP] |
12098 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) == |
12099 |
frozenset(self.op.nodes)) |
12100 |
|
12101 |
expected_locks = (set([self.group_uuid]) | |
12102 |
self.cfg.GetNodeGroupsFromNodes(self.op.nodes)) |
12103 |
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
|
12104 |
if actual_locks != expected_locks:
|
12105 |
raise errors.OpExecError("Nodes changed groups since locks were acquired," |
12106 |
" current groups are '%s', used to be '%s'" %
|
12107 |
(utils.CommaJoin(expected_locks), |
12108 |
utils.CommaJoin(actual_locks))) |
12109 |
|
12110 |
self.node_data = self.cfg.GetAllNodesInfo() |
12111 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
12112 |
instance_data = self.cfg.GetAllInstancesInfo()
|
12113 |
|
12114 |
if self.group is None: |
12115 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
12116 |
(self.op.group_name, self.group_uuid)) |
12117 |
|
12118 |
(new_splits, previous_splits) = \ |
12119 |
self.CheckAssignmentForSplitInstances([(node, self.group_uuid) |
12120 |
for node in self.op.nodes], |
12121 |
self.node_data, instance_data)
|
12122 |
|
12123 |
if new_splits:
|
12124 |
fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits)) |
12125 |
|
12126 |
if not self.op.force: |
12127 |
raise errors.OpExecError("The following instances get split by this" |
12128 |
" change and --force was not given: %s" %
|
12129 |
fmt_new_splits) |
12130 |
else:
|
12131 |
self.LogWarning("This operation will split the following instances: %s", |
12132 |
fmt_new_splits) |
12133 |
|
12134 |
if previous_splits:
|
12135 |
self.LogWarning("In addition, these already-split instances continue" |
12136 |
" to be split across groups: %s",
|
12137 |
utils.CommaJoin(utils.NiceSort(previous_splits))) |
12138 |
|
12139 |
def Exec(self, feedback_fn): |
12140 |
"""Assign nodes to a new group.
|
12141 |
|
12142 |
"""
|
12143 |
for node in self.op.nodes: |
12144 |
self.node_data[node].group = self.group_uuid |
12145 |
|
12146 |
# FIXME: Depends on side-effects of modifying the result of
|
12147 |
# C{cfg.GetAllNodesInfo}
|
12148 |
|
12149 |
self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes. |
12150 |
|
12151 |
@staticmethod
|
12152 |
def CheckAssignmentForSplitInstances(changes, node_data, instance_data): |
12153 |
"""Check for split instances after a node assignment.
|
12154 |
|
12155 |
This method considers a series of node assignments as an atomic operation,
|
12156 |
and returns information about split instances after applying the set of
|
12157 |
changes.
|
12158 |
|
12159 |
In particular, it returns information about newly split instances, and
|
12160 |
instances that were already split, and remain so after the change.
|
12161 |
|
12162 |
Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
|
12163 |
considered.
|
12164 |
|
12165 |
@type changes: list of (node_name, new_group_uuid) pairs.
|
12166 |
@param changes: list of node assignments to consider.
|
12167 |
@param node_data: a dict with data for all nodes
|
12168 |
@param instance_data: a dict with all instances to consider
|
12169 |
@rtype: a two-tuple
|
12170 |
@return: a list of instances that were previously okay and result split as a
|
12171 |
consequence of this change, and a list of instances that were previously
|
12172 |
split and this change does not fix.
|
12173 |
|
12174 |
"""
|
12175 |
changed_nodes = dict((node, group) for node, group in changes |
12176 |
if node_data[node].group != group)
|
12177 |
|
12178 |
all_split_instances = set()
|
12179 |
previously_split_instances = set()
|
12180 |
|
12181 |
def InstanceNodes(instance): |
12182 |
return [instance.primary_node] + list(instance.secondary_nodes) |
12183 |
|
12184 |
for inst in instance_data.values(): |
12185 |
if inst.disk_template not in constants.DTS_INT_MIRROR: |
12186 |
continue
|
12187 |
|
12188 |
instance_nodes = InstanceNodes(inst) |
12189 |
|
12190 |
if len(set(node_data[node].group for node in instance_nodes)) > 1: |
12191 |
previously_split_instances.add(inst.name) |
12192 |
|
12193 |
if len(set(changed_nodes.get(node, node_data[node].group) |
12194 |
for node in instance_nodes)) > 1: |
12195 |
all_split_instances.add(inst.name) |
12196 |
|
12197 |
return (list(all_split_instances - previously_split_instances), |
12198 |
list(previously_split_instances & all_split_instances))
|
12199 |
|
12200 |
|
12201 |
class _GroupQuery(_QueryBase): |
12202 |
FIELDS = query.GROUP_FIELDS |
12203 |
|
12204 |
def ExpandNames(self, lu): |
12205 |
lu.needed_locks = {} |
12206 |
|
12207 |
self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
|
12208 |
name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values()) |
12209 |
|
12210 |
if not self.names: |
12211 |
self.wanted = [name_to_uuid[name]
|
12212 |
for name in utils.NiceSort(name_to_uuid.keys())] |
12213 |
else:
|
12214 |
# Accept names to be either names or UUIDs.
|
12215 |
missing = [] |
12216 |
self.wanted = []
|
12217 |
all_uuid = frozenset(self._all_groups.keys()) |
12218 |
|
12219 |
for name in self.names: |
12220 |
if name in all_uuid: |
12221 |
self.wanted.append(name)
|
12222 |
elif name in name_to_uuid: |
12223 |
self.wanted.append(name_to_uuid[name])
|
12224 |
else:
|
12225 |
missing.append(name) |
12226 |
|
12227 |
if missing:
|
12228 |
raise errors.OpPrereqError("Some groups do not exist: %s" % |
12229 |
utils.CommaJoin(missing), |
12230 |
errors.ECODE_NOENT) |
12231 |
|
12232 |
def DeclareLocks(self, lu, level): |
12233 |
pass
|
12234 |
|
12235 |
def _GetQueryData(self, lu): |
12236 |
"""Computes the list of node groups and their attributes.
|
12237 |
|
12238 |
"""
|
12239 |
do_nodes = query.GQ_NODE in self.requested_data |
12240 |
do_instances = query.GQ_INST in self.requested_data |
12241 |
|
12242 |
group_to_nodes = None
|
12243 |
group_to_instances = None
|
12244 |
|
12245 |
# For GQ_NODE, we need to map group->[nodes], and group->[instances] for
|
12246 |
# GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
|
12247 |
# latter GetAllInstancesInfo() is not enough, for we have to go through
|
12248 |
# instance->node. Hence, we will need to process nodes even if we only need
|
12249 |
# instance information.
|
12250 |
if do_nodes or do_instances: |
12251 |
all_nodes = lu.cfg.GetAllNodesInfo() |
12252 |
group_to_nodes = dict((uuid, []) for uuid in self.wanted) |
12253 |
node_to_group = {} |
12254 |
|
12255 |
for node in all_nodes.values(): |
12256 |
if node.group in group_to_nodes: |
12257 |
group_to_nodes[node.group].append(node.name) |
12258 |
node_to_group[node.name] = node.group |
12259 |
|
12260 |
if do_instances:
|
12261 |
all_instances = lu.cfg.GetAllInstancesInfo() |
12262 |
group_to_instances = dict((uuid, []) for uuid in self.wanted) |
12263 |
|
12264 |
for instance in all_instances.values(): |
12265 |
node = instance.primary_node |
12266 |
if node in node_to_group: |
12267 |
group_to_instances[node_to_group[node]].append(instance.name) |
12268 |
|
12269 |
if not do_nodes: |
12270 |
# Do not pass on node information if it was not requested.
|
12271 |
group_to_nodes = None
|
12272 |
|
12273 |
return query.GroupQueryData([self._all_groups[uuid] |
12274 |
for uuid in self.wanted], |
12275 |
group_to_nodes, group_to_instances) |
12276 |
|
12277 |
|
12278 |
class LUGroupQuery(NoHooksLU): |
12279 |
"""Logical unit for querying node groups.
|
12280 |
|
12281 |
"""
|
12282 |
REQ_BGL = False
|
12283 |
|
12284 |
def CheckArguments(self): |
12285 |
self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names), |
12286 |
self.op.output_fields, False) |
12287 |
|
12288 |
def ExpandNames(self): |
12289 |
self.gq.ExpandNames(self) |
12290 |
|
12291 |
def DeclareLocks(self, level): |
12292 |
self.gq.DeclareLocks(self, level) |
12293 |
|
12294 |
def Exec(self, feedback_fn): |
12295 |
return self.gq.OldStyleQuery(self) |
12296 |
|
12297 |
|
12298 |
class LUGroupSetParams(LogicalUnit): |
12299 |
"""Modifies the parameters of a node group.
|
12300 |
|
12301 |
"""
|
12302 |
HPATH = "group-modify"
|
12303 |
HTYPE = constants.HTYPE_GROUP |
12304 |
REQ_BGL = False
|
12305 |
|
12306 |
def CheckArguments(self): |
12307 |
all_changes = [ |
12308 |
self.op.ndparams,
|
12309 |
self.op.alloc_policy,
|
12310 |
] |
12311 |
|
12312 |
if all_changes.count(None) == len(all_changes): |
12313 |
raise errors.OpPrereqError("Please pass at least one modification", |
12314 |
errors.ECODE_INVAL) |
12315 |
|
12316 |
def ExpandNames(self): |
12317 |
# This raises errors.OpPrereqError on its own:
|
12318 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12319 |
|
12320 |
self.needed_locks = {
|
12321 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
12322 |
} |
12323 |
|
12324 |
def CheckPrereq(self): |
12325 |
"""Check prerequisites.
|
12326 |
|
12327 |
"""
|
12328 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
12329 |
|
12330 |
if self.group is None: |
12331 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
12332 |
(self.op.group_name, self.group_uuid)) |
12333 |
|
12334 |
if self.op.ndparams: |
12335 |
new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams) |
12336 |
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
|
12337 |
self.new_ndparams = new_ndparams
|
12338 |
|
12339 |
def BuildHooksEnv(self): |
12340 |
"""Build hooks env.
|
12341 |
|
12342 |
"""
|
12343 |
return {
|
12344 |
"GROUP_NAME": self.op.group_name, |
12345 |
"NEW_ALLOC_POLICY": self.op.alloc_policy, |
12346 |
} |
12347 |
|
12348 |
def BuildHooksNodes(self): |
12349 |
"""Build hooks nodes.
|
12350 |
|
12351 |
"""
|
12352 |
mn = self.cfg.GetMasterNode()
|
12353 |
return ([mn], [mn])
|
12354 |
|
12355 |
def Exec(self, feedback_fn): |
12356 |
"""Modifies the node group.
|
12357 |
|
12358 |
"""
|
12359 |
result = [] |
12360 |
|
12361 |
if self.op.ndparams: |
12362 |
self.group.ndparams = self.new_ndparams |
12363 |
result.append(("ndparams", str(self.group.ndparams))) |
12364 |
|
12365 |
if self.op.alloc_policy: |
12366 |
self.group.alloc_policy = self.op.alloc_policy |
12367 |
|
12368 |
self.cfg.Update(self.group, feedback_fn) |
12369 |
return result
|
12370 |
|
12371 |
|
12372 |
class LUGroupRemove(LogicalUnit): |
12373 |
HPATH = "group-remove"
|
12374 |
HTYPE = constants.HTYPE_GROUP |
12375 |
REQ_BGL = False
|
12376 |
|
12377 |
def ExpandNames(self): |
12378 |
# This will raises errors.OpPrereqError on its own:
|
12379 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12380 |
self.needed_locks = {
|
12381 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
12382 |
} |
12383 |
|
12384 |
def CheckPrereq(self): |
12385 |
"""Check prerequisites.
|
12386 |
|
12387 |
This checks that the given group name exists as a node group, that is
|
12388 |
empty (i.e., contains no nodes), and that is not the last group of the
|
12389 |
cluster.
|
12390 |
|
12391 |
"""
|
12392 |
# Verify that the group is empty.
|
12393 |
group_nodes = [node.name |
12394 |
for node in self.cfg.GetAllNodesInfo().values() |
12395 |
if node.group == self.group_uuid] |
12396 |
|
12397 |
if group_nodes:
|
12398 |
raise errors.OpPrereqError("Group '%s' not empty, has the following" |
12399 |
" nodes: %s" %
|
12400 |
(self.op.group_name,
|
12401 |
utils.CommaJoin(utils.NiceSort(group_nodes))), |
12402 |
errors.ECODE_STATE) |
12403 |
|
12404 |
# Verify the cluster would not be left group-less.
|
12405 |
if len(self.cfg.GetNodeGroupList()) == 1: |
12406 |
raise errors.OpPrereqError("Group '%s' is the only group," |
12407 |
" cannot be removed" %
|
12408 |
self.op.group_name,
|
12409 |
errors.ECODE_STATE) |
12410 |
|
12411 |
def BuildHooksEnv(self): |
12412 |
"""Build hooks env.
|
12413 |
|
12414 |
"""
|
12415 |
return {
|
12416 |
"GROUP_NAME": self.op.group_name, |
12417 |
} |
12418 |
|
12419 |
def BuildHooksNodes(self): |
12420 |
"""Build hooks nodes.
|
12421 |
|
12422 |
"""
|
12423 |
mn = self.cfg.GetMasterNode()
|
12424 |
return ([mn], [mn])
|
12425 |
|
12426 |
def Exec(self, feedback_fn): |
12427 |
"""Remove the node group.
|
12428 |
|
12429 |
"""
|
12430 |
try:
|
12431 |
self.cfg.RemoveNodeGroup(self.group_uuid) |
12432 |
except errors.ConfigurationError:
|
12433 |
raise errors.OpExecError("Group '%s' with UUID %s disappeared" % |
12434 |
(self.op.group_name, self.group_uuid)) |
12435 |
|
12436 |
self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid |
12437 |
|
12438 |
|
12439 |
class LUGroupRename(LogicalUnit): |
12440 |
HPATH = "group-rename"
|
12441 |
HTYPE = constants.HTYPE_GROUP |
12442 |
REQ_BGL = False
|
12443 |
|
12444 |
def ExpandNames(self): |
12445 |
# This raises errors.OpPrereqError on its own:
|
12446 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12447 |
|
12448 |
self.needed_locks = {
|
12449 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
12450 |
} |
12451 |
|
12452 |
def CheckPrereq(self): |
12453 |
"""Check prerequisites.
|
12454 |
|
12455 |
Ensures requested new name is not yet used.
|
12456 |
|
12457 |
"""
|
12458 |
try:
|
12459 |
new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name) |
12460 |
except errors.OpPrereqError:
|
12461 |
pass
|
12462 |
else:
|
12463 |
raise errors.OpPrereqError("Desired new name '%s' clashes with existing" |
12464 |
" node group (UUID: %s)" %
|
12465 |
(self.op.new_name, new_name_uuid),
|
12466 |
errors.ECODE_EXISTS) |
12467 |
|
12468 |
def BuildHooksEnv(self): |
12469 |
"""Build hooks env.
|
12470 |
|
12471 |
"""
|
12472 |
return {
|
12473 |
"OLD_NAME": self.op.group_name, |
12474 |
"NEW_NAME": self.op.new_name, |
12475 |
} |
12476 |
|
12477 |
def BuildHooksNodes(self): |
12478 |
"""Build hooks nodes.
|
12479 |
|
12480 |
"""
|
12481 |
mn = self.cfg.GetMasterNode()
|
12482 |
|
12483 |
all_nodes = self.cfg.GetAllNodesInfo()
|
12484 |
all_nodes.pop(mn, None)
|
12485 |
|
12486 |
run_nodes = [mn] |
12487 |
run_nodes.extend(node.name for node in all_nodes.values() |
12488 |
if node.group == self.group_uuid) |
12489 |
|
12490 |
return (run_nodes, run_nodes)
|
12491 |
|
12492 |
def Exec(self, feedback_fn): |
12493 |
"""Rename the node group.
|
12494 |
|
12495 |
"""
|
12496 |
group = self.cfg.GetNodeGroup(self.group_uuid) |
12497 |
|
12498 |
if group is None: |
12499 |
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" % |
12500 |
(self.op.group_name, self.group_uuid)) |
12501 |
|
12502 |
group.name = self.op.new_name
|
12503 |
self.cfg.Update(group, feedback_fn)
|
12504 |
|
12505 |
return self.op.new_name |
12506 |
|
12507 |
|
12508 |
class LUGroupEvacuate(LogicalUnit): |
12509 |
HPATH = "group-evacuate"
|
12510 |
HTYPE = constants.HTYPE_GROUP |
12511 |
REQ_BGL = False
|
12512 |
|
12513 |
def ExpandNames(self): |
12514 |
# This raises errors.OpPrereqError on its own:
|
12515 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
12516 |
|
12517 |
if self.op.target_groups: |
12518 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup, |
12519 |
self.op.target_groups)
|
12520 |
else:
|
12521 |
self.req_target_uuids = []
|
12522 |
|
12523 |
if self.group_uuid in self.req_target_uuids: |
12524 |
raise errors.OpPrereqError("Group to be evacuated (%s) can not be used" |
12525 |
" as a target group (targets are %s)" %
|
12526 |
(self.group_uuid,
|
12527 |
utils.CommaJoin(self.req_target_uuids)),
|
12528 |
errors.ECODE_INVAL) |
12529 |
|
12530 |
self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator) |
12531 |
|
12532 |
self.share_locks = _ShareAll()
|
12533 |
self.needed_locks = {
|
12534 |
locking.LEVEL_INSTANCE: [], |
12535 |
locking.LEVEL_NODEGROUP: [], |
12536 |
locking.LEVEL_NODE: [], |
12537 |
} |
12538 |
|
12539 |
def DeclareLocks(self, level): |
12540 |
if level == locking.LEVEL_INSTANCE:
|
12541 |
assert not self.needed_locks[locking.LEVEL_INSTANCE] |
12542 |
|
12543 |
# Lock instances optimistically, needs verification once node and group
|
12544 |
# locks have been acquired
|
12545 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
12546 |
self.cfg.GetNodeGroupInstances(self.group_uuid) |
12547 |
|
12548 |
elif level == locking.LEVEL_NODEGROUP:
|
12549 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
12550 |
|
12551 |
if self.req_target_uuids: |
12552 |
lock_groups = set([self.group_uuid] + self.req_target_uuids) |
12553 |
|
12554 |
# Lock all groups used by instances optimistically; this requires going
|
12555 |
# via the node before it's locked, requiring verification later on
|
12556 |
lock_groups.update(group_uuid |
12557 |
for instance_name in |
12558 |
self.owned_locks(locking.LEVEL_INSTANCE)
|
12559 |
for group_uuid in |
12560 |
self.cfg.GetInstanceNodeGroups(instance_name))
|
12561 |
else:
|
12562 |
# No target groups, need to lock all of them
|
12563 |
lock_groups = locking.ALL_SET |
12564 |
|
12565 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
12566 |
|
12567 |
elif level == locking.LEVEL_NODE:
|
12568 |
# This will only lock the nodes in the group to be evacuated which
|
12569 |
# contain actual instances
|
12570 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
12571 |
self._LockInstancesNodes()
|
12572 |
|
12573 |
# Lock all nodes in group to be evacuated and target groups
|
12574 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
12575 |
assert self.group_uuid in owned_groups |
12576 |
member_nodes = [node_name |
12577 |
for group in owned_groups |
12578 |
for node_name in self.cfg.GetNodeGroup(group).members] |
12579 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
12580 |
|
12581 |
def CheckPrereq(self): |
12582 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
12583 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
12584 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
12585 |
|
12586 |
assert owned_groups.issuperset(self.req_target_uuids) |
12587 |
assert self.group_uuid in owned_groups |
12588 |
|
12589 |
# Check if locked instances are still correct
|
12590 |
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
12591 |
|
12592 |
# Get instance information
|
12593 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances)) |
12594 |
|
12595 |
# Check if node groups for locked instances are still correct
|
12596 |
for instance_name in owned_instances: |
12597 |
inst = self.instances[instance_name]
|
12598 |
assert owned_nodes.issuperset(inst.all_nodes), \
|
12599 |
"Instance %s's nodes changed while we kept the lock" % instance_name
|
12600 |
|
12601 |
inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
|
12602 |
owned_groups) |
12603 |
|
12604 |
assert self.group_uuid in inst_groups, \ |
12605 |
"Instance %s has no node in group %s" % (instance_name, self.group_uuid) |
12606 |
|
12607 |
if self.req_target_uuids: |
12608 |
# User requested specific target groups
|
12609 |
self.target_uuids = self.req_target_uuids |
12610 |
else:
|
12611 |
# All groups except the one to be evacuated are potential targets
|
12612 |
self.target_uuids = [group_uuid for group_uuid in owned_groups |
12613 |
if group_uuid != self.group_uuid] |
12614 |
|
12615 |
if not self.target_uuids: |
12616 |
raise errors.OpPrereqError("There are no possible target groups", |
12617 |
errors.ECODE_INVAL) |
12618 |
|
12619 |
def BuildHooksEnv(self): |
12620 |
"""Build hooks env.
|
12621 |
|
12622 |
"""
|
12623 |
return {
|
12624 |
"GROUP_NAME": self.op.group_name, |
12625 |
"TARGET_GROUPS": " ".join(self.target_uuids), |
12626 |
} |
12627 |
|
12628 |
def BuildHooksNodes(self): |
12629 |
"""Build hooks nodes.
|
12630 |
|
12631 |
"""
|
12632 |
mn = self.cfg.GetMasterNode()
|
12633 |
|
12634 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
12635 |
|
12636 |
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members |
12637 |
|
12638 |
return (run_nodes, run_nodes)
|
12639 |
|
12640 |
def Exec(self, feedback_fn): |
12641 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) |
12642 |
|
12643 |
assert self.group_uuid not in self.target_uuids |
12644 |
|
12645 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP, |
12646 |
instances=instances, target_groups=self.target_uuids)
|
12647 |
|
12648 |
ial.Run(self.op.iallocator)
|
12649 |
|
12650 |
if not ial.success: |
12651 |
raise errors.OpPrereqError("Can't compute group evacuation using" |
12652 |
" iallocator '%s': %s" %
|
12653 |
(self.op.iallocator, ial.info),
|
12654 |
errors.ECODE_NORES) |
12655 |
|
12656 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False) |
12657 |
|
12658 |
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s", |
12659 |
len(jobs), self.op.group_name) |
12660 |
|
12661 |
return ResultWithJobs(jobs)
|
12662 |
|
12663 |
|
12664 |
class TagsLU(NoHooksLU): # pylint: disable=W0223 |
12665 |
"""Generic tags LU.
|
12666 |
|
12667 |
This is an abstract class which is the parent of all the other tags LUs.
|
12668 |
|
12669 |
"""
|
12670 |
def ExpandNames(self): |
12671 |
self.group_uuid = None |
12672 |
self.needed_locks = {}
|
12673 |
if self.op.kind == constants.TAG_NODE: |
12674 |
self.op.name = _ExpandNodeName(self.cfg, self.op.name) |
12675 |
self.needed_locks[locking.LEVEL_NODE] = self.op.name |
12676 |
elif self.op.kind == constants.TAG_INSTANCE: |
12677 |
self.op.name = _ExpandInstanceName(self.cfg, self.op.name) |
12678 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name |
12679 |
elif self.op.kind == constants.TAG_NODEGROUP: |
12680 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.name) |
12681 |
|
12682 |
# FIXME: Acquire BGL for cluster tag operations (as of this writing it's
|
12683 |
# not possible to acquire the BGL based on opcode parameters)
|
12684 |
|
12685 |
def CheckPrereq(self): |
12686 |
"""Check prerequisites.
|
12687 |
|
12688 |
"""
|
12689 |
if self.op.kind == constants.TAG_CLUSTER: |
12690 |
self.target = self.cfg.GetClusterInfo() |
12691 |
elif self.op.kind == constants.TAG_NODE: |
12692 |
self.target = self.cfg.GetNodeInfo(self.op.name) |
12693 |
elif self.op.kind == constants.TAG_INSTANCE: |
12694 |
self.target = self.cfg.GetInstanceInfo(self.op.name) |
12695 |
elif self.op.kind == constants.TAG_NODEGROUP: |
12696 |
self.target = self.cfg.GetNodeGroup(self.group_uuid) |
12697 |
else:
|
12698 |
raise errors.OpPrereqError("Wrong tag type requested (%s)" % |
12699 |
str(self.op.kind), errors.ECODE_INVAL) |
12700 |
|
12701 |
|
12702 |
class LUTagsGet(TagsLU): |
12703 |
"""Returns the tags of a given object.
|
12704 |
|
12705 |
"""
|
12706 |
REQ_BGL = False
|
12707 |
|
12708 |
def ExpandNames(self): |
12709 |
TagsLU.ExpandNames(self)
|
12710 |
|
12711 |
# Share locks as this is only a read operation
|
12712 |
self.share_locks = _ShareAll()
|
12713 |
|
12714 |
def Exec(self, feedback_fn): |
12715 |
"""Returns the tag list.
|
12716 |
|
12717 |
"""
|
12718 |
return list(self.target.GetTags()) |
12719 |
|
12720 |
|
12721 |
class LUTagsSearch(NoHooksLU): |
12722 |
"""Searches the tags for a given pattern.
|
12723 |
|
12724 |
"""
|
12725 |
REQ_BGL = False
|
12726 |
|
12727 |
def ExpandNames(self): |
12728 |
self.needed_locks = {}
|
12729 |
|
12730 |
def CheckPrereq(self): |
12731 |
"""Check prerequisites.
|
12732 |
|
12733 |
This checks the pattern passed for validity by compiling it.
|
12734 |
|
12735 |
"""
|
12736 |
try:
|
12737 |
self.re = re.compile(self.op.pattern) |
12738 |
except re.error, err:
|
12739 |
raise errors.OpPrereqError("Invalid search pattern '%s': %s" % |
12740 |
(self.op.pattern, err), errors.ECODE_INVAL)
|
12741 |
|
12742 |
def Exec(self, feedback_fn): |
12743 |
"""Returns the tag list.
|
12744 |
|
12745 |
"""
|
12746 |
cfg = self.cfg
|
12747 |
tgts = [("/cluster", cfg.GetClusterInfo())]
|
12748 |
ilist = cfg.GetAllInstancesInfo().values() |
12749 |
tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) |
12750 |
nlist = cfg.GetAllNodesInfo().values() |
12751 |
tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) |
12752 |
tgts.extend(("/nodegroup/%s" % n.name, n)
|
12753 |
for n in cfg.GetAllNodeGroupsInfo().values()) |
12754 |
results = [] |
12755 |
for path, target in tgts: |
12756 |
for tag in target.GetTags(): |
12757 |
if self.re.search(tag): |
12758 |
results.append((path, tag)) |
12759 |
return results
|
12760 |
|
12761 |
|
12762 |
class LUTagsSet(TagsLU): |
12763 |
"""Sets a tag on a given object.
|
12764 |
|
12765 |
"""
|
12766 |
REQ_BGL = False
|
12767 |
|
12768 |
def CheckPrereq(self): |
12769 |
"""Check prerequisites.
|
12770 |
|
12771 |
This checks the type and length of the tag name and value.
|
12772 |
|
12773 |
"""
|
12774 |
TagsLU.CheckPrereq(self)
|
12775 |
for tag in self.op.tags: |
12776 |
objects.TaggableObject.ValidateTag(tag) |
12777 |
|
12778 |
def Exec(self, feedback_fn): |
12779 |
"""Sets the tag.
|
12780 |
|
12781 |
"""
|
12782 |
try:
|
12783 |
for tag in self.op.tags: |
12784 |
self.target.AddTag(tag)
|
12785 |
except errors.TagError, err:
|
12786 |
raise errors.OpExecError("Error while setting tag: %s" % str(err)) |
12787 |
self.cfg.Update(self.target, feedback_fn) |
12788 |
|
12789 |
|
12790 |
class LUTagsDel(TagsLU): |
12791 |
"""Delete a list of tags from a given object.
|
12792 |
|
12793 |
"""
|
12794 |
REQ_BGL = False
|
12795 |
|
12796 |
def CheckPrereq(self): |
12797 |
"""Check prerequisites.
|
12798 |
|
12799 |
This checks that we have the given tag.
|
12800 |
|
12801 |
"""
|
12802 |
TagsLU.CheckPrereq(self)
|
12803 |
for tag in self.op.tags: |
12804 |
objects.TaggableObject.ValidateTag(tag) |
12805 |
del_tags = frozenset(self.op.tags) |
12806 |
cur_tags = self.target.GetTags()
|
12807 |
|
12808 |
diff_tags = del_tags - cur_tags |
12809 |
if diff_tags:
|
12810 |
diff_names = ("'%s'" % i for i in sorted(diff_tags)) |
12811 |
raise errors.OpPrereqError("Tag(s) %s not found" % |
12812 |
(utils.CommaJoin(diff_names), ), |
12813 |
errors.ECODE_NOENT) |
12814 |
|
12815 |
def Exec(self, feedback_fn): |
12816 |
"""Remove the tag from the object.
|
12817 |
|
12818 |
"""
|
12819 |
for tag in self.op.tags: |
12820 |
self.target.RemoveTag(tag)
|
12821 |
self.cfg.Update(self.target, feedback_fn) |
12822 |
|
12823 |
|
12824 |
class LUTestDelay(NoHooksLU): |
12825 |
"""Sleep for a specified amount of time.
|
12826 |
|
12827 |
This LU sleeps on the master and/or nodes for a specified amount of
|
12828 |
time.
|
12829 |
|
12830 |
"""
|
12831 |
REQ_BGL = False
|
12832 |
|
12833 |
def ExpandNames(self): |
12834 |
"""Expand names and set required locks.
|
12835 |
|
12836 |
This expands the node list, if any.
|
12837 |
|
12838 |
"""
|
12839 |
self.needed_locks = {}
|
12840 |
if self.op.on_nodes: |
12841 |
# _GetWantedNodes can be used here, but is not always appropriate to use
|
12842 |
# this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
|
12843 |
# more information.
|
12844 |
self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) |
12845 |
self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes |
12846 |
|
12847 |
def _TestDelay(self): |
12848 |
"""Do the actual sleep.
|
12849 |
|
12850 |
"""
|
12851 |
if self.op.on_master: |
12852 |
if not utils.TestDelay(self.op.duration): |
12853 |
raise errors.OpExecError("Error during master delay test") |
12854 |
if self.op.on_nodes: |
12855 |
result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration) |
12856 |
for node, node_result in result.items(): |
12857 |
node_result.Raise("Failure during rpc call to node %s" % node)
|
12858 |
|
12859 |
def Exec(self, feedback_fn): |
12860 |
"""Execute the test delay opcode, with the wanted repetitions.
|
12861 |
|
12862 |
"""
|
12863 |
if self.op.repeat == 0: |
12864 |
self._TestDelay()
|
12865 |
else:
|
12866 |
top_value = self.op.repeat - 1 |
12867 |
for i in range(self.op.repeat): |
12868 |
self.LogInfo("Test delay iteration %d/%d" % (i, top_value)) |
12869 |
self._TestDelay()
|
12870 |
|
12871 |
|
12872 |
class LUTestJqueue(NoHooksLU): |
12873 |
"""Utility LU to test some aspects of the job queue.
|
12874 |
|
12875 |
"""
|
12876 |
REQ_BGL = False
|
12877 |
|
12878 |
# Must be lower than default timeout for WaitForJobChange to see whether it
|
12879 |
# notices changed jobs
|
12880 |
_CLIENT_CONNECT_TIMEOUT = 20.0
|
12881 |
_CLIENT_CONFIRM_TIMEOUT = 60.0
|
12882 |
|
12883 |
@classmethod
|
12884 |
def _NotifyUsingSocket(cls, cb, errcls): |
12885 |
"""Opens a Unix socket and waits for another program to connect.
|
12886 |
|
12887 |
@type cb: callable
|
12888 |
@param cb: Callback to send socket name to client
|
12889 |
@type errcls: class
|
12890 |
@param errcls: Exception class to use for errors
|
12891 |
|
12892 |
"""
|
12893 |
# Using a temporary directory as there's no easy way to create temporary
|
12894 |
# sockets without writing a custom loop around tempfile.mktemp and
|
12895 |
# socket.bind
|
12896 |
tmpdir = tempfile.mkdtemp() |
12897 |
try:
|
12898 |
tmpsock = utils.PathJoin(tmpdir, "sock")
|
12899 |
|
12900 |
logging.debug("Creating temporary socket at %s", tmpsock)
|
12901 |
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
12902 |
try:
|
12903 |
sock.bind(tmpsock) |
12904 |
sock.listen(1)
|
12905 |
|
12906 |
# Send details to client
|
12907 |
cb(tmpsock) |
12908 |
|
12909 |
# Wait for client to connect before continuing
|
12910 |
sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT) |
12911 |
try:
|
12912 |
(conn, _) = sock.accept() |
12913 |
except socket.error, err:
|
12914 |
raise errcls("Client didn't connect in time (%s)" % err) |
12915 |
finally:
|
12916 |
sock.close() |
12917 |
finally:
|
12918 |
# Remove as soon as client is connected
|
12919 |
shutil.rmtree(tmpdir) |
12920 |
|
12921 |
# Wait for client to close
|
12922 |
try:
|
12923 |
try:
|
12924 |
# pylint: disable=E1101
|
12925 |
# Instance of '_socketobject' has no ... member
|
12926 |
conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT) |
12927 |
conn.recv(1)
|
12928 |
except socket.error, err:
|
12929 |
raise errcls("Client failed to confirm notification (%s)" % err) |
12930 |
finally:
|
12931 |
conn.close() |
12932 |
|
12933 |
def _SendNotification(self, test, arg, sockname): |
12934 |
"""Sends a notification to the client.
|
12935 |
|
12936 |
@type test: string
|
12937 |
@param test: Test name
|
12938 |
@param arg: Test argument (depends on test)
|
12939 |
@type sockname: string
|
12940 |
@param sockname: Socket path
|
12941 |
|
12942 |
"""
|
12943 |
self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
|
12944 |
|
12945 |
def _Notify(self, prereq, test, arg): |
12946 |
"""Notifies the client of a test.
|
12947 |
|
12948 |
@type prereq: bool
|
12949 |
@param prereq: Whether this is a prereq-phase test
|
12950 |
@type test: string
|
12951 |
@param test: Test name
|
12952 |
@param arg: Test argument (depends on test)
|
12953 |
|
12954 |
"""
|
12955 |
if prereq:
|
12956 |
errcls = errors.OpPrereqError |
12957 |
else:
|
12958 |
errcls = errors.OpExecError |
12959 |
|
12960 |
return self._NotifyUsingSocket(compat.partial(self._SendNotification, |
12961 |
test, arg), |
12962 |
errcls) |
12963 |
|
12964 |
def CheckArguments(self): |
12965 |
self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1 |
12966 |
self.expandnames_calls = 0 |
12967 |
|
12968 |
def ExpandNames(self): |
12969 |
checkargs_calls = getattr(self, "checkargs_calls", 0) |
12970 |
if checkargs_calls < 1: |
12971 |
raise errors.ProgrammerError("CheckArguments was not called") |
12972 |
|
12973 |
self.expandnames_calls += 1 |
12974 |
|
12975 |
if self.op.notify_waitlock: |
12976 |
self._Notify(True, constants.JQT_EXPANDNAMES, None) |
12977 |
|
12978 |
self.LogInfo("Expanding names") |
12979 |
|
12980 |
# Get lock on master node (just to get a lock, not for a particular reason)
|
12981 |
self.needed_locks = {
|
12982 |
locking.LEVEL_NODE: self.cfg.GetMasterNode(),
|
12983 |
} |
12984 |
|
12985 |
def Exec(self, feedback_fn): |
12986 |
if self.expandnames_calls < 1: |
12987 |
raise errors.ProgrammerError("ExpandNames was not called") |
12988 |
|
12989 |
if self.op.notify_exec: |
12990 |
self._Notify(False, constants.JQT_EXEC, None) |
12991 |
|
12992 |
self.LogInfo("Executing") |
12993 |
|
12994 |
if self.op.log_messages: |
12995 |
self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages)) |
12996 |
for idx, msg in enumerate(self.op.log_messages): |
12997 |
self.LogInfo("Sending log message %s", idx + 1) |
12998 |
feedback_fn(constants.JQT_MSGPREFIX + msg) |
12999 |
# Report how many test messages have been sent
|
13000 |
self._Notify(False, constants.JQT_LOGMSG, idx + 1) |
13001 |
|
13002 |
if self.op.fail: |
13003 |
raise errors.OpExecError("Opcode failure was requested") |
13004 |
|
13005 |
return True |
13006 |
|
13007 |
|
13008 |
class IAllocator(object): |
13009 |
"""IAllocator framework.
|
13010 |
|
13011 |
An IAllocator instance has three sets of attributes:
|
13012 |
- cfg that is needed to query the cluster
|
13013 |
- input data (all members of the _KEYS class attribute are required)
|
13014 |
- four buffer attributes (in|out_data|text), that represent the
|
13015 |
input (to the external script) in text and data structure format,
|
13016 |
and the output from it, again in two formats
|
13017 |
- the result variables from the script (success, info, nodes) for
|
13018 |
easy usage
|
13019 |
|
13020 |
"""
|
13021 |
# pylint: disable=R0902
|
13022 |
# lots of instance attributes
|
13023 |
|
13024 |
def __init__(self, cfg, rpc, mode, **kwargs): |
13025 |
self.cfg = cfg
|
13026 |
self.rpc = rpc
|
13027 |
# init buffer variables
|
13028 |
self.in_text = self.out_text = self.in_data = self.out_data = None |
13029 |
# init all input fields so that pylint is happy
|
13030 |
self.mode = mode
|
13031 |
self.memory = self.disks = self.disk_template = None |
13032 |
self.os = self.tags = self.nics = self.vcpus = None |
13033 |
self.hypervisor = None |
13034 |
self.relocate_from = None |
13035 |
self.name = None |
13036 |
self.instances = None |
13037 |
self.evac_mode = None |
13038 |
self.target_groups = []
|
13039 |
# computed fields
|
13040 |
self.required_nodes = None |
13041 |
# init result fields
|
13042 |
self.success = self.info = self.result = None |
13043 |
|
13044 |
try:
|
13045 |
(fn, keydata, self._result_check) = self._MODE_DATA[self.mode] |
13046 |
except KeyError: |
13047 |
raise errors.ProgrammerError("Unknown mode '%s' passed to the" |
13048 |
" IAllocator" % self.mode) |
13049 |
|
13050 |
keyset = [n for (n, _) in keydata] |
13051 |
|
13052 |
for key in kwargs: |
13053 |
if key not in keyset: |
13054 |
raise errors.ProgrammerError("Invalid input parameter '%s' to" |
13055 |
" IAllocator" % key)
|
13056 |
setattr(self, key, kwargs[key]) |
13057 |
|
13058 |
for key in keyset: |
13059 |
if key not in kwargs: |
13060 |
raise errors.ProgrammerError("Missing input parameter '%s' to" |
13061 |
" IAllocator" % key)
|
13062 |
self._BuildInputData(compat.partial(fn, self), keydata) |
13063 |
|
13064 |
def _ComputeClusterData(self): |
13065 |
"""Compute the generic allocator input data.
|
13066 |
|
13067 |
This is the data that is independent of the actual operation.
|
13068 |
|
13069 |
"""
|
13070 |
cfg = self.cfg
|
13071 |
cluster_info = cfg.GetClusterInfo() |
13072 |
# cluster data
|
13073 |
data = { |
13074 |
"version": constants.IALLOCATOR_VERSION,
|
13075 |
"cluster_name": cfg.GetClusterName(),
|
13076 |
"cluster_tags": list(cluster_info.GetTags()), |
13077 |
"enabled_hypervisors": list(cluster_info.enabled_hypervisors), |
13078 |
# we don't have job IDs
|
13079 |
} |
13080 |
ninfo = cfg.GetAllNodesInfo() |
13081 |
iinfo = cfg.GetAllInstancesInfo().values() |
13082 |
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo] |
13083 |
|
13084 |
# node data
|
13085 |
node_list = [n.name for n in ninfo.values() if n.vm_capable] |
13086 |
|
13087 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
13088 |
hypervisor_name = self.hypervisor
|
13089 |
elif self.mode == constants.IALLOCATOR_MODE_RELOC: |
13090 |
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
|
13091 |
else:
|
13092 |
hypervisor_name = cluster_info.enabled_hypervisors[0]
|
13093 |
|
13094 |
node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
|
13095 |
hypervisor_name) |
13096 |
node_iinfo = \ |
13097 |
self.rpc.call_all_instances_info(node_list,
|
13098 |
cluster_info.enabled_hypervisors) |
13099 |
|
13100 |
data["nodegroups"] = self._ComputeNodeGroupData(cfg) |
13101 |
|
13102 |
config_ndata = self._ComputeBasicNodeData(ninfo)
|
13103 |
data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo, |
13104 |
i_list, config_ndata) |
13105 |
assert len(data["nodes"]) == len(ninfo), \ |
13106 |
"Incomplete node data computed"
|
13107 |
|
13108 |
data["instances"] = self._ComputeInstanceData(cluster_info, i_list) |
13109 |
|
13110 |
self.in_data = data
|
13111 |
|
13112 |
@staticmethod
|
13113 |
def _ComputeNodeGroupData(cfg): |
13114 |
"""Compute node groups data.
|
13115 |
|
13116 |
"""
|
13117 |
ng = dict((guuid, {
|
13118 |
"name": gdata.name,
|
13119 |
"alloc_policy": gdata.alloc_policy,
|
13120 |
}) |
13121 |
for guuid, gdata in cfg.GetAllNodeGroupsInfo().items()) |
13122 |
|
13123 |
return ng
|
13124 |
|
13125 |
@staticmethod
|
13126 |
def _ComputeBasicNodeData(node_cfg): |
13127 |
"""Compute global node data.
|
13128 |
|
13129 |
@rtype: dict
|
13130 |
@returns: a dict of name: (node dict, node config)
|
13131 |
|
13132 |
"""
|
13133 |
# fill in static (config-based) values
|
13134 |
node_results = dict((ninfo.name, {
|
13135 |
"tags": list(ninfo.GetTags()), |
13136 |
"primary_ip": ninfo.primary_ip,
|
13137 |
"secondary_ip": ninfo.secondary_ip,
|
13138 |
"offline": ninfo.offline,
|
13139 |
"drained": ninfo.drained,
|
13140 |
"master_candidate": ninfo.master_candidate,
|
13141 |
"group": ninfo.group,
|
13142 |
"master_capable": ninfo.master_capable,
|
13143 |
"vm_capable": ninfo.vm_capable,
|
13144 |
}) |
13145 |
for ninfo in node_cfg.values()) |
13146 |
|
13147 |
return node_results
|
13148 |
|
13149 |
@staticmethod
|
13150 |
def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list, |
13151 |
node_results): |
13152 |
"""Compute global node data.
|
13153 |
|
13154 |
@param node_results: the basic node structures as filled from the config
|
13155 |
|
13156 |
"""
|
13157 |
# make a copy of the current dict
|
13158 |
node_results = dict(node_results)
|
13159 |
for nname, nresult in node_data.items(): |
13160 |
assert nname in node_results, "Missing basic data for node %s" % nname |
13161 |
ninfo = node_cfg[nname] |
13162 |
|
13163 |
if not (ninfo.offline or ninfo.drained): |
13164 |
nresult.Raise("Can't get data for node %s" % nname)
|
13165 |
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
|
13166 |
nname) |
13167 |
remote_info = nresult.payload |
13168 |
|
13169 |
for attr in ["memory_total", "memory_free", "memory_dom0", |
13170 |
"vg_size", "vg_free", "cpu_total"]: |
13171 |
if attr not in remote_info: |
13172 |
raise errors.OpExecError("Node '%s' didn't return attribute" |
13173 |
" '%s'" % (nname, attr))
|
13174 |
if not isinstance(remote_info[attr], int): |
13175 |
raise errors.OpExecError("Node '%s' returned invalid value" |
13176 |
" for '%s': %s" %
|
13177 |
(nname, attr, remote_info[attr])) |
13178 |
# compute memory used by primary instances
|
13179 |
i_p_mem = i_p_up_mem = 0
|
13180 |
for iinfo, beinfo in i_list: |
13181 |
if iinfo.primary_node == nname:
|
13182 |
i_p_mem += beinfo[constants.BE_MEMORY] |
13183 |
if iinfo.name not in node_iinfo[nname].payload: |
13184 |
i_used_mem = 0
|
13185 |
else:
|
13186 |
i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"]) |
13187 |
i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem |
13188 |
remote_info["memory_free"] -= max(0, i_mem_diff) |
13189 |
|
13190 |
if iinfo.admin_up:
|
13191 |
i_p_up_mem += beinfo[constants.BE_MEMORY] |
13192 |
|
13193 |
# compute memory used by instances
|
13194 |
pnr_dyn = { |
13195 |
"total_memory": remote_info["memory_total"], |
13196 |
"reserved_memory": remote_info["memory_dom0"], |
13197 |
"free_memory": remote_info["memory_free"], |
13198 |
"total_disk": remote_info["vg_size"], |
13199 |
"free_disk": remote_info["vg_free"], |
13200 |
"total_cpus": remote_info["cpu_total"], |
13201 |
"i_pri_memory": i_p_mem,
|
13202 |
"i_pri_up_memory": i_p_up_mem,
|
13203 |
} |
13204 |
pnr_dyn.update(node_results[nname]) |
13205 |
node_results[nname] = pnr_dyn |
13206 |
|
13207 |
return node_results
|
13208 |
|
13209 |
@staticmethod
|
13210 |
def _ComputeInstanceData(cluster_info, i_list): |
13211 |
"""Compute global instance data.
|
13212 |
|
13213 |
"""
|
13214 |
instance_data = {} |
13215 |
for iinfo, beinfo in i_list: |
13216 |
nic_data = [] |
13217 |
for nic in iinfo.nics: |
13218 |
filled_params = cluster_info.SimpleFillNIC(nic.nicparams) |
13219 |
nic_dict = { |
13220 |
"mac": nic.mac,
|
13221 |
"ip": nic.ip,
|
13222 |
"mode": filled_params[constants.NIC_MODE],
|
13223 |
"link": filled_params[constants.NIC_LINK],
|
13224 |
} |
13225 |
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
13226 |
nic_dict["bridge"] = filled_params[constants.NIC_LINK]
|
13227 |
nic_data.append(nic_dict) |
13228 |
pir = { |
13229 |
"tags": list(iinfo.GetTags()), |
13230 |
"admin_up": iinfo.admin_up,
|
13231 |
"vcpus": beinfo[constants.BE_VCPUS],
|
13232 |
"memory": beinfo[constants.BE_MEMORY],
|
13233 |
"os": iinfo.os,
|
13234 |
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), |
13235 |
"nics": nic_data,
|
13236 |
"disks": [{constants.IDISK_SIZE: dsk.size,
|
13237 |
constants.IDISK_MODE: dsk.mode} |
13238 |
for dsk in iinfo.disks], |
13239 |
"disk_template": iinfo.disk_template,
|
13240 |
"hypervisor": iinfo.hypervisor,
|
13241 |
} |
13242 |
pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
|
13243 |
pir["disks"])
|
13244 |
instance_data[iinfo.name] = pir |
13245 |
|
13246 |
return instance_data
|
13247 |
|
13248 |
def _AddNewInstance(self): |
13249 |
"""Add new instance data to allocator structure.
|
13250 |
|
13251 |
This in combination with _AllocatorGetClusterData will create the
|
13252 |
correct structure needed as input for the allocator.
|
13253 |
|
13254 |
The checks for the completeness of the opcode must have already been
|
13255 |
done.
|
13256 |
|
13257 |
"""
|
13258 |
disk_space = _ComputeDiskSize(self.disk_template, self.disks) |
13259 |
|
13260 |
if self.disk_template in constants.DTS_INT_MIRROR: |
13261 |
self.required_nodes = 2 |
13262 |
else:
|
13263 |
self.required_nodes = 1 |
13264 |
|
13265 |
request = { |
13266 |
"name": self.name, |
13267 |
"disk_template": self.disk_template, |
13268 |
"tags": self.tags, |
13269 |
"os": self.os, |
13270 |
"vcpus": self.vcpus, |
13271 |
"memory": self.memory, |
13272 |
"disks": self.disks, |
13273 |
"disk_space_total": disk_space,
|
13274 |
"nics": self.nics, |
13275 |
"required_nodes": self.required_nodes, |
13276 |
"hypervisor": self.hypervisor, |
13277 |
} |
13278 |
|
13279 |
return request
|
13280 |
|
13281 |
def _AddRelocateInstance(self): |
13282 |
"""Add relocate instance data to allocator structure.
|
13283 |
|
13284 |
This in combination with _IAllocatorGetClusterData will create the
|
13285 |
correct structure needed as input for the allocator.
|
13286 |
|
13287 |
The checks for the completeness of the opcode must have already been
|
13288 |
done.
|
13289 |
|
13290 |
"""
|
13291 |
instance = self.cfg.GetInstanceInfo(self.name) |
13292 |
if instance is None: |
13293 |
raise errors.ProgrammerError("Unknown instance '%s' passed to" |
13294 |
" IAllocator" % self.name) |
13295 |
|
13296 |
if instance.disk_template not in constants.DTS_MIRRORED: |
13297 |
raise errors.OpPrereqError("Can't relocate non-mirrored instances", |
13298 |
errors.ECODE_INVAL) |
13299 |
|
13300 |
if instance.disk_template in constants.DTS_INT_MIRROR and \ |
13301 |
len(instance.secondary_nodes) != 1: |
13302 |
raise errors.OpPrereqError("Instance has not exactly one secondary node", |
13303 |
errors.ECODE_STATE) |
13304 |
|
13305 |
self.required_nodes = 1 |
13306 |
disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks] |
13307 |
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes) |
13308 |
|
13309 |
request = { |
13310 |
"name": self.name, |
13311 |
"disk_space_total": disk_space,
|
13312 |
"required_nodes": self.required_nodes, |
13313 |
"relocate_from": self.relocate_from, |
13314 |
} |
13315 |
return request
|
13316 |
|
13317 |
def _AddNodeEvacuate(self): |
13318 |
"""Get data for node-evacuate requests.
|
13319 |
|
13320 |
"""
|
13321 |
return {
|
13322 |
"instances": self.instances, |
13323 |
"evac_mode": self.evac_mode, |
13324 |
} |
13325 |
|
13326 |
def _AddChangeGroup(self): |
13327 |
"""Get data for node-evacuate requests.
|
13328 |
|
13329 |
"""
|
13330 |
return {
|
13331 |
"instances": self.instances, |
13332 |
"target_groups": self.target_groups, |
13333 |
} |
13334 |
|
13335 |
def _BuildInputData(self, fn, keydata): |
13336 |
"""Build input data structures.
|
13337 |
|
13338 |
"""
|
13339 |
self._ComputeClusterData()
|
13340 |
|
13341 |
request = fn() |
13342 |
request["type"] = self.mode |
13343 |
for keyname, keytype in keydata: |
13344 |
if keyname not in request: |
13345 |
raise errors.ProgrammerError("Request parameter %s is missing" % |
13346 |
keyname) |
13347 |
val = request[keyname] |
13348 |
if not keytype(val): |
13349 |
raise errors.ProgrammerError("Request parameter %s doesn't pass" |
13350 |
" validation, value %s, expected"
|
13351 |
" type %s" % (keyname, val, keytype))
|
13352 |
self.in_data["request"] = request |
13353 |
|
13354 |
self.in_text = serializer.Dump(self.in_data) |
13355 |
|
13356 |
_STRING_LIST = ht.TListOf(ht.TString) |
13357 |
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, { |
13358 |
# pylint: disable=E1101
|
13359 |
# Class '...' has no 'OP_ID' member
|
13360 |
"OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
|
13361 |
opcodes.OpInstanceMigrate.OP_ID, |
13362 |
opcodes.OpInstanceReplaceDisks.OP_ID]) |
13363 |
}))) |
13364 |
|
13365 |
_NEVAC_MOVED = \ |
13366 |
ht.TListOf(ht.TAnd(ht.TIsLength(3),
|
13367 |
ht.TItems([ht.TNonEmptyString, |
13368 |
ht.TNonEmptyString, |
13369 |
ht.TListOf(ht.TNonEmptyString), |
13370 |
]))) |
13371 |
_NEVAC_FAILED = \ |
13372 |
ht.TListOf(ht.TAnd(ht.TIsLength(2),
|
13373 |
ht.TItems([ht.TNonEmptyString, |
13374 |
ht.TMaybeString, |
13375 |
]))) |
13376 |
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
|
13377 |
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) |
13378 |
|
13379 |
_MODE_DATA = { |
13380 |
constants.IALLOCATOR_MODE_ALLOC: |
13381 |
(_AddNewInstance, |
13382 |
[ |
13383 |
("name", ht.TString),
|
13384 |
("memory", ht.TInt),
|
13385 |
("disks", ht.TListOf(ht.TDict)),
|
13386 |
("disk_template", ht.TString),
|
13387 |
("os", ht.TString),
|
13388 |
("tags", _STRING_LIST),
|
13389 |
("nics", ht.TListOf(ht.TDict)),
|
13390 |
("vcpus", ht.TInt),
|
13391 |
("hypervisor", ht.TString),
|
13392 |
], ht.TList), |
13393 |
constants.IALLOCATOR_MODE_RELOC: |
13394 |
(_AddRelocateInstance, |
13395 |
[("name", ht.TString), ("relocate_from", _STRING_LIST)], |
13396 |
ht.TList), |
13397 |
constants.IALLOCATOR_MODE_NODE_EVAC: |
13398 |
(_AddNodeEvacuate, [ |
13399 |
("instances", _STRING_LIST),
|
13400 |
("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
|
13401 |
], _NEVAC_RESULT), |
13402 |
constants.IALLOCATOR_MODE_CHG_GROUP: |
13403 |
(_AddChangeGroup, [ |
13404 |
("instances", _STRING_LIST),
|
13405 |
("target_groups", _STRING_LIST),
|
13406 |
], _NEVAC_RESULT), |
13407 |
} |
13408 |
|
13409 |
def Run(self, name, validate=True, call_fn=None): |
13410 |
"""Run an instance allocator and return the results.
|
13411 |
|
13412 |
"""
|
13413 |
if call_fn is None: |
13414 |
call_fn = self.rpc.call_iallocator_runner
|
13415 |
|
13416 |
result = call_fn(self.cfg.GetMasterNode(), name, self.in_text) |
13417 |
result.Raise("Failure while running the iallocator script")
|
13418 |
|
13419 |
self.out_text = result.payload
|
13420 |
if validate:
|
13421 |
self._ValidateResult()
|
13422 |
|
13423 |
def _ValidateResult(self): |
13424 |
"""Process the allocator results.
|
13425 |
|
13426 |
This will process and if successful save the result in
|
13427 |
self.out_data and the other parameters.
|
13428 |
|
13429 |
"""
|
13430 |
try:
|
13431 |
rdict = serializer.Load(self.out_text)
|
13432 |
except Exception, err: |
13433 |
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) |
13434 |
|
13435 |
if not isinstance(rdict, dict): |
13436 |
raise errors.OpExecError("Can't parse iallocator results: not a dict") |
13437 |
|
13438 |
# TODO: remove backwards compatiblity in later versions
|
13439 |
if "nodes" in rdict and "result" not in rdict: |
13440 |
rdict["result"] = rdict["nodes"] |
13441 |
del rdict["nodes"] |
13442 |
|
13443 |
for key in "success", "info", "result": |
13444 |
if key not in rdict: |
13445 |
raise errors.OpExecError("Can't parse iallocator results:" |
13446 |
" missing key '%s'" % key)
|
13447 |
setattr(self, key, rdict[key]) |
13448 |
|
13449 |
if not self._result_check(self.result): |
13450 |
raise errors.OpExecError("Iallocator returned invalid result," |
13451 |
" expected %s, got %s" %
|
13452 |
(self._result_check, self.result), |
13453 |
errors.ECODE_INVAL) |
13454 |
|
13455 |
if self.mode == constants.IALLOCATOR_MODE_RELOC: |
13456 |
assert self.relocate_from is not None |
13457 |
assert self.required_nodes == 1 |
13458 |
|
13459 |
node2group = dict((name, ndata["group"]) |
13460 |
for (name, ndata) in self.in_data["nodes"].items()) |
13461 |
|
13462 |
fn = compat.partial(self._NodesToGroups, node2group,
|
13463 |
self.in_data["nodegroups"]) |
13464 |
|
13465 |
instance = self.cfg.GetInstanceInfo(self.name) |
13466 |
request_groups = fn(self.relocate_from + [instance.primary_node])
|
13467 |
result_groups = fn(rdict["result"] + [instance.primary_node])
|
13468 |
|
13469 |
if self.success and not set(result_groups).issubset(request_groups): |
13470 |
raise errors.OpExecError("Groups of nodes returned by iallocator (%s)" |
13471 |
" differ from original groups (%s)" %
|
13472 |
(utils.CommaJoin(result_groups), |
13473 |
utils.CommaJoin(request_groups))) |
13474 |
|
13475 |
elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC: |
13476 |
assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES |
13477 |
|
13478 |
self.out_data = rdict
|
13479 |
|
13480 |
@staticmethod
|
13481 |
def _NodesToGroups(node2group, groups, nodes): |
13482 |
"""Returns a list of unique group names for a list of nodes.
|
13483 |
|
13484 |
@type node2group: dict
|
13485 |
@param node2group: Map from node name to group UUID
|
13486 |
@type groups: dict
|
13487 |
@param groups: Group information
|
13488 |
@type nodes: list
|
13489 |
@param nodes: Node names
|
13490 |
|
13491 |
"""
|
13492 |
result = set()
|
13493 |
|
13494 |
for node in nodes: |
13495 |
try:
|
13496 |
group_uuid = node2group[node] |
13497 |
except KeyError: |
13498 |
# Ignore unknown node
|
13499 |
pass
|
13500 |
else:
|
13501 |
try:
|
13502 |
group = groups[group_uuid] |
13503 |
except KeyError: |
13504 |
# Can't find group, let's use UUID
|
13505 |
group_name = group_uuid |
13506 |
else:
|
13507 |
group_name = group["name"]
|
13508 |
|
13509 |
result.add(group_name) |
13510 |
|
13511 |
return sorted(result) |
13512 |
|
13513 |
|
13514 |
class LUTestAllocator(NoHooksLU): |
13515 |
"""Run allocator tests.
|
13516 |
|
13517 |
This LU runs the allocator tests
|
13518 |
|
13519 |
"""
|
13520 |
def CheckPrereq(self): |
13521 |
"""Check prerequisites.
|
13522 |
|
13523 |
This checks the opcode parameters depending on the director and mode test.
|
13524 |
|
13525 |
"""
|
13526 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
13527 |
for attr in ["memory", "disks", "disk_template", |
13528 |
"os", "tags", "nics", "vcpus"]: |
13529 |
if not hasattr(self.op, attr): |
13530 |
raise errors.OpPrereqError("Missing attribute '%s' on opcode input" % |
13531 |
attr, errors.ECODE_INVAL) |
13532 |
iname = self.cfg.ExpandInstanceName(self.op.name) |
13533 |
if iname is not None: |
13534 |
raise errors.OpPrereqError("Instance '%s' already in the cluster" % |
13535 |
iname, errors.ECODE_EXISTS) |
13536 |
if not isinstance(self.op.nics, list): |
13537 |
raise errors.OpPrereqError("Invalid parameter 'nics'", |
13538 |
errors.ECODE_INVAL) |
13539 |
if not isinstance(self.op.disks, list): |
13540 |
raise errors.OpPrereqError("Invalid parameter 'disks'", |
13541 |
errors.ECODE_INVAL) |
13542 |
for row in self.op.disks: |
13543 |
if (not isinstance(row, dict) or |
13544 |
constants.IDISK_SIZE not in row or |
13545 |
not isinstance(row[constants.IDISK_SIZE], int) or |
13546 |
constants.IDISK_MODE not in row or |
13547 |
row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET): |
13548 |
raise errors.OpPrereqError("Invalid contents of the 'disks'" |
13549 |
" parameter", errors.ECODE_INVAL)
|
13550 |
if self.op.hypervisor is None: |
13551 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
13552 |
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: |
13553 |
fname = _ExpandInstanceName(self.cfg, self.op.name) |
13554 |
self.op.name = fname
|
13555 |
self.relocate_from = \
|
13556 |
list(self.cfg.GetInstanceInfo(fname).secondary_nodes) |
13557 |
elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP, |
13558 |
constants.IALLOCATOR_MODE_NODE_EVAC): |
13559 |
if not self.op.instances: |
13560 |
raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL) |
13561 |
self.op.instances = _GetWantedInstances(self, self.op.instances) |
13562 |
else:
|
13563 |
raise errors.OpPrereqError("Invalid test allocator mode '%s'" % |
13564 |
self.op.mode, errors.ECODE_INVAL)
|
13565 |
|
13566 |
if self.op.direction == constants.IALLOCATOR_DIR_OUT: |
13567 |
if self.op.allocator is None: |
13568 |
raise errors.OpPrereqError("Missing allocator name", |
13569 |
errors.ECODE_INVAL) |
13570 |
elif self.op.direction != constants.IALLOCATOR_DIR_IN: |
13571 |
raise errors.OpPrereqError("Wrong allocator test '%s'" % |
13572 |
self.op.direction, errors.ECODE_INVAL)
|
13573 |
|
13574 |
def Exec(self, feedback_fn): |
13575 |
"""Run the allocator test.
|
13576 |
|
13577 |
"""
|
13578 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
13579 |
ial = IAllocator(self.cfg, self.rpc, |
13580 |
mode=self.op.mode,
|
13581 |
name=self.op.name,
|
13582 |
memory=self.op.memory,
|
13583 |
disks=self.op.disks,
|
13584 |
disk_template=self.op.disk_template,
|
13585 |
os=self.op.os,
|
13586 |
tags=self.op.tags,
|
13587 |
nics=self.op.nics,
|
13588 |
vcpus=self.op.vcpus,
|
13589 |
hypervisor=self.op.hypervisor,
|
13590 |
) |
13591 |
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: |
13592 |
ial = IAllocator(self.cfg, self.rpc, |
13593 |
mode=self.op.mode,
|
13594 |
name=self.op.name,
|
13595 |
relocate_from=list(self.relocate_from), |
13596 |
) |
13597 |
elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP: |
13598 |
ial = IAllocator(self.cfg, self.rpc, |
13599 |
mode=self.op.mode,
|
13600 |
instances=self.op.instances,
|
13601 |
target_groups=self.op.target_groups)
|
13602 |
elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC: |
13603 |
ial = IAllocator(self.cfg, self.rpc, |
13604 |
mode=self.op.mode,
|
13605 |
instances=self.op.instances,
|
13606 |
evac_mode=self.op.evac_mode)
|
13607 |
else:
|
13608 |
raise errors.ProgrammerError("Uncatched mode %s in" |
13609 |
" LUTestAllocator.Exec", self.op.mode) |
13610 |
|
13611 |
if self.op.direction == constants.IALLOCATOR_DIR_IN: |
13612 |
result = ial.in_text |
13613 |
else:
|
13614 |
ial.Run(self.op.allocator, validate=False) |
13615 |
result = ial.out_text |
13616 |
return result
|
13617 |
|
13618 |
|
13619 |
#: Query type implementations
|
13620 |
_QUERY_IMPL = { |
13621 |
constants.QR_INSTANCE: _InstanceQuery, |
13622 |
constants.QR_NODE: _NodeQuery, |
13623 |
constants.QR_GROUP: _GroupQuery, |
13624 |
constants.QR_OS: _OsQuery, |
13625 |
} |
13626 |
|
13627 |
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP |
13628 |
|
13629 |
|
13630 |
def _GetQueryImplementation(name): |
13631 |
"""Returns the implemtnation for a query type.
|
13632 |
|
13633 |
@param name: Query type, must be one of L{constants.QR_VIA_OP}
|
13634 |
|
13635 |
"""
|
13636 |
try:
|
13637 |
return _QUERY_IMPL[name]
|
13638 |
except KeyError: |
13639 |
raise errors.OpPrereqError("Unknown query resource '%s'" % name, |
13640 |
errors.ECODE_INVAL) |