root / lib / cmdlib.py @ a7f5dc98
History | View | Annotate | Download (232.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable-msg=W0613,W0201
|
25 |
|
26 |
import os |
27 |
import os.path |
28 |
import sha |
29 |
import time |
30 |
import tempfile |
31 |
import re |
32 |
import platform |
33 |
import logging |
34 |
import copy |
35 |
import random |
36 |
|
37 |
from ganeti import ssh |
38 |
from ganeti import utils |
39 |
from ganeti import errors |
40 |
from ganeti import hypervisor |
41 |
from ganeti import locking |
42 |
from ganeti import constants |
43 |
from ganeti import objects |
44 |
from ganeti import opcodes |
45 |
from ganeti import serializer |
46 |
from ganeti import ssconf |
47 |
|
48 |
|
49 |
class LogicalUnit(object): |
50 |
"""Logical Unit base class.
|
51 |
|
52 |
Subclasses must follow these rules:
|
53 |
- implement ExpandNames
|
54 |
- implement CheckPrereq
|
55 |
- implement Exec
|
56 |
- implement BuildHooksEnv
|
57 |
- redefine HPATH and HTYPE
|
58 |
- optionally redefine their run requirements:
|
59 |
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
60 |
|
61 |
Note that all commands require root permissions.
|
62 |
|
63 |
"""
|
64 |
HPATH = None
|
65 |
HTYPE = None
|
66 |
_OP_REQP = [] |
67 |
REQ_BGL = True
|
68 |
|
69 |
def __init__(self, processor, op, context, rpc): |
70 |
"""Constructor for LogicalUnit.
|
71 |
|
72 |
This needs to be overriden in derived classes in order to check op
|
73 |
validity.
|
74 |
|
75 |
"""
|
76 |
self.proc = processor
|
77 |
self.op = op
|
78 |
self.cfg = context.cfg
|
79 |
self.context = context
|
80 |
self.rpc = rpc
|
81 |
# Dicts used to declare locking needs to mcpu
|
82 |
self.needed_locks = None |
83 |
self.acquired_locks = {}
|
84 |
self.share_locks = dict(((i, 0) for i in locking.LEVELS)) |
85 |
self.add_locks = {}
|
86 |
self.remove_locks = {}
|
87 |
# Used to force good behavior when calling helper functions
|
88 |
self.recalculate_locks = {}
|
89 |
self.__ssh = None |
90 |
# logging
|
91 |
self.LogWarning = processor.LogWarning
|
92 |
self.LogInfo = processor.LogInfo
|
93 |
|
94 |
for attr_name in self._OP_REQP: |
95 |
attr_val = getattr(op, attr_name, None) |
96 |
if attr_val is None: |
97 |
raise errors.OpPrereqError("Required parameter '%s' missing" % |
98 |
attr_name) |
99 |
self.CheckArguments()
|
100 |
|
101 |
def __GetSSH(self): |
102 |
"""Returns the SshRunner object
|
103 |
|
104 |
"""
|
105 |
if not self.__ssh: |
106 |
self.__ssh = ssh.SshRunner(self.cfg.GetClusterName()) |
107 |
return self.__ssh |
108 |
|
109 |
ssh = property(fget=__GetSSH)
|
110 |
|
111 |
def CheckArguments(self): |
112 |
"""Check syntactic validity for the opcode arguments.
|
113 |
|
114 |
This method is for doing a simple syntactic check and ensure
|
115 |
validity of opcode parameters, without any cluster-related
|
116 |
checks. While the same can be accomplished in ExpandNames and/or
|
117 |
CheckPrereq, doing these separate is better because:
|
118 |
|
119 |
- ExpandNames is left as as purely a lock-related function
|
120 |
- CheckPrereq is run after we have aquired locks (and possible
|
121 |
waited for them)
|
122 |
|
123 |
The function is allowed to change the self.op attribute so that
|
124 |
later methods can no longer worry about missing parameters.
|
125 |
|
126 |
"""
|
127 |
pass
|
128 |
|
129 |
def ExpandNames(self): |
130 |
"""Expand names for this LU.
|
131 |
|
132 |
This method is called before starting to execute the opcode, and it should
|
133 |
update all the parameters of the opcode to their canonical form (e.g. a
|
134 |
short node name must be fully expanded after this method has successfully
|
135 |
completed). This way locking, hooks, logging, ecc. can work correctly.
|
136 |
|
137 |
LUs which implement this method must also populate the self.needed_locks
|
138 |
member, as a dict with lock levels as keys, and a list of needed lock names
|
139 |
as values. Rules:
|
140 |
|
141 |
- use an empty dict if you don't need any lock
|
142 |
- if you don't need any lock at a particular level omit that level
|
143 |
- don't put anything for the BGL level
|
144 |
- if you want all locks at a level use locking.ALL_SET as a value
|
145 |
|
146 |
If you need to share locks (rather than acquire them exclusively) at one
|
147 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
148 |
that level. By default locks are not shared.
|
149 |
|
150 |
Examples::
|
151 |
|
152 |
# Acquire all nodes and one instance
|
153 |
self.needed_locks = {
|
154 |
locking.LEVEL_NODE: locking.ALL_SET,
|
155 |
locking.LEVEL_INSTANCE: ['instance1.example.tld'],
|
156 |
}
|
157 |
# Acquire just two nodes
|
158 |
self.needed_locks = {
|
159 |
locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
|
160 |
}
|
161 |
# Acquire no locks
|
162 |
self.needed_locks = {} # No, you can't leave it to the default value None
|
163 |
|
164 |
"""
|
165 |
# The implementation of this method is mandatory only if the new LU is
|
166 |
# concurrent, so that old LUs don't need to be changed all at the same
|
167 |
# time.
|
168 |
if self.REQ_BGL: |
169 |
self.needed_locks = {} # Exclusive LUs don't need locks. |
170 |
else:
|
171 |
raise NotImplementedError |
172 |
|
173 |
def DeclareLocks(self, level): |
174 |
"""Declare LU locking needs for a level
|
175 |
|
176 |
While most LUs can just declare their locking needs at ExpandNames time,
|
177 |
sometimes there's the need to calculate some locks after having acquired
|
178 |
the ones before. This function is called just before acquiring locks at a
|
179 |
particular level, but after acquiring the ones at lower levels, and permits
|
180 |
such calculations. It can be used to modify self.needed_locks, and by
|
181 |
default it does nothing.
|
182 |
|
183 |
This function is only called if you have something already set in
|
184 |
self.needed_locks for the level.
|
185 |
|
186 |
@param level: Locking level which is going to be locked
|
187 |
@type level: member of ganeti.locking.LEVELS
|
188 |
|
189 |
"""
|
190 |
|
191 |
def CheckPrereq(self): |
192 |
"""Check prerequisites for this LU.
|
193 |
|
194 |
This method should check that the prerequisites for the execution
|
195 |
of this LU are fulfilled. It can do internode communication, but
|
196 |
it should be idempotent - no cluster or system changes are
|
197 |
allowed.
|
198 |
|
199 |
The method should raise errors.OpPrereqError in case something is
|
200 |
not fulfilled. Its return value is ignored.
|
201 |
|
202 |
This method should also update all the parameters of the opcode to
|
203 |
their canonical form if it hasn't been done by ExpandNames before.
|
204 |
|
205 |
"""
|
206 |
raise NotImplementedError |
207 |
|
208 |
def Exec(self, feedback_fn): |
209 |
"""Execute the LU.
|
210 |
|
211 |
This method should implement the actual work. It should raise
|
212 |
errors.OpExecError for failures that are somewhat dealt with in
|
213 |
code, or expected.
|
214 |
|
215 |
"""
|
216 |
raise NotImplementedError |
217 |
|
218 |
def BuildHooksEnv(self): |
219 |
"""Build hooks environment for this LU.
|
220 |
|
221 |
This method should return a three-node tuple consisting of: a dict
|
222 |
containing the environment that will be used for running the
|
223 |
specific hook for this LU, a list of node names on which the hook
|
224 |
should run before the execution, and a list of node names on which
|
225 |
the hook should run after the execution.
|
226 |
|
227 |
The keys of the dict must not have 'GANETI_' prefixed as this will
|
228 |
be handled in the hooks runner. Also note additional keys will be
|
229 |
added by the hooks runner. If the LU doesn't define any
|
230 |
environment, an empty dict (and not None) should be returned.
|
231 |
|
232 |
No nodes should be returned as an empty list (and not None).
|
233 |
|
234 |
Note that if the HPATH for a LU class is None, this function will
|
235 |
not be called.
|
236 |
|
237 |
"""
|
238 |
raise NotImplementedError |
239 |
|
240 |
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result): |
241 |
"""Notify the LU about the results of its hooks.
|
242 |
|
243 |
This method is called every time a hooks phase is executed, and notifies
|
244 |
the Logical Unit about the hooks' result. The LU can then use it to alter
|
245 |
its result based on the hooks. By default the method does nothing and the
|
246 |
previous result is passed back unchanged but any LU can define it if it
|
247 |
wants to use the local cluster hook-scripts somehow.
|
248 |
|
249 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
250 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
251 |
@param hook_results: the results of the multi-node hooks rpc call
|
252 |
@param feedback_fn: function used send feedback back to the caller
|
253 |
@param lu_result: the previous Exec result this LU had, or None
|
254 |
in the PRE phase
|
255 |
@return: the new Exec result, based on the previous result
|
256 |
and hook results
|
257 |
|
258 |
"""
|
259 |
return lu_result
|
260 |
|
261 |
def _ExpandAndLockInstance(self): |
262 |
"""Helper function to expand and lock an instance.
|
263 |
|
264 |
Many LUs that work on an instance take its name in self.op.instance_name
|
265 |
and need to expand it and then declare the expanded name for locking. This
|
266 |
function does it, and then updates self.op.instance_name to the expanded
|
267 |
name. It also initializes needed_locks as a dict, if this hasn't been done
|
268 |
before.
|
269 |
|
270 |
"""
|
271 |
if self.needed_locks is None: |
272 |
self.needed_locks = {}
|
273 |
else:
|
274 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
275 |
"_ExpandAndLockInstance called with instance-level locks set"
|
276 |
expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
277 |
if expanded_name is None: |
278 |
raise errors.OpPrereqError("Instance '%s' not known" % |
279 |
self.op.instance_name)
|
280 |
self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
|
281 |
self.op.instance_name = expanded_name
|
282 |
|
283 |
def _LockInstancesNodes(self, primary_only=False): |
284 |
"""Helper function to declare instances' nodes for locking.
|
285 |
|
286 |
This function should be called after locking one or more instances to lock
|
287 |
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
288 |
with all primary or secondary nodes for instances already locked and
|
289 |
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
290 |
|
291 |
It should be called from DeclareLocks, and for safety only works if
|
292 |
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
293 |
|
294 |
In the future it may grow parameters to just lock some instance's nodes, or
|
295 |
to just lock primaries or secondary nodes, if needed.
|
296 |
|
297 |
If should be called in DeclareLocks in a way similar to::
|
298 |
|
299 |
if level == locking.LEVEL_NODE:
|
300 |
self._LockInstancesNodes()
|
301 |
|
302 |
@type primary_only: boolean
|
303 |
@param primary_only: only lock primary nodes of locked instances
|
304 |
|
305 |
"""
|
306 |
assert locking.LEVEL_NODE in self.recalculate_locks, \ |
307 |
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
308 |
|
309 |
# TODO: check if we're really been called with the instance locks held
|
310 |
|
311 |
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
312 |
# future we might want to have different behaviors depending on the value
|
313 |
# of self.recalculate_locks[locking.LEVEL_NODE]
|
314 |
wanted_nodes = [] |
315 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]: |
316 |
instance = self.context.cfg.GetInstanceInfo(instance_name)
|
317 |
wanted_nodes.append(instance.primary_node) |
318 |
if not primary_only: |
319 |
wanted_nodes.extend(instance.secondary_nodes) |
320 |
|
321 |
if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: |
322 |
self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
|
323 |
elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: |
324 |
self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
|
325 |
|
326 |
del self.recalculate_locks[locking.LEVEL_NODE] |
327 |
|
328 |
|
329 |
class NoHooksLU(LogicalUnit): |
330 |
"""Simple LU which runs no hooks.
|
331 |
|
332 |
This LU is intended as a parent for other LogicalUnits which will
|
333 |
run no hooks, in order to reduce duplicate code.
|
334 |
|
335 |
"""
|
336 |
HPATH = None
|
337 |
HTYPE = None
|
338 |
|
339 |
|
340 |
def _GetWantedNodes(lu, nodes): |
341 |
"""Returns list of checked and expanded node names.
|
342 |
|
343 |
@type lu: L{LogicalUnit}
|
344 |
@param lu: the logical unit on whose behalf we execute
|
345 |
@type nodes: list
|
346 |
@param nodes: list of node names or None for all nodes
|
347 |
@rtype: list
|
348 |
@return: the list of nodes, sorted
|
349 |
@raise errors.OpProgrammerError: if the nodes parameter is wrong type
|
350 |
|
351 |
"""
|
352 |
if not isinstance(nodes, list): |
353 |
raise errors.OpPrereqError("Invalid argument type 'nodes'") |
354 |
|
355 |
if not nodes: |
356 |
raise errors.ProgrammerError("_GetWantedNodes should only be called with a" |
357 |
" non-empty list of nodes whose name is to be expanded.")
|
358 |
|
359 |
wanted = [] |
360 |
for name in nodes: |
361 |
node = lu.cfg.ExpandNodeName(name) |
362 |
if node is None: |
363 |
raise errors.OpPrereqError("No such node name '%s'" % name) |
364 |
wanted.append(node) |
365 |
|
366 |
return utils.NiceSort(wanted)
|
367 |
|
368 |
|
369 |
def _GetWantedInstances(lu, instances): |
370 |
"""Returns list of checked and expanded instance names.
|
371 |
|
372 |
@type lu: L{LogicalUnit}
|
373 |
@param lu: the logical unit on whose behalf we execute
|
374 |
@type instances: list
|
375 |
@param instances: list of instance names or None for all instances
|
376 |
@rtype: list
|
377 |
@return: the list of instances, sorted
|
378 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
379 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
380 |
|
381 |
"""
|
382 |
if not isinstance(instances, list): |
383 |
raise errors.OpPrereqError("Invalid argument type 'instances'") |
384 |
|
385 |
if instances:
|
386 |
wanted = [] |
387 |
|
388 |
for name in instances: |
389 |
instance = lu.cfg.ExpandInstanceName(name) |
390 |
if instance is None: |
391 |
raise errors.OpPrereqError("No such instance name '%s'" % name) |
392 |
wanted.append(instance) |
393 |
|
394 |
else:
|
395 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
396 |
return wanted
|
397 |
|
398 |
|
399 |
def _CheckOutputFields(static, dynamic, selected): |
400 |
"""Checks whether all selected fields are valid.
|
401 |
|
402 |
@type static: L{utils.FieldSet}
|
403 |
@param static: static fields set
|
404 |
@type dynamic: L{utils.FieldSet}
|
405 |
@param dynamic: dynamic fields set
|
406 |
|
407 |
"""
|
408 |
f = utils.FieldSet() |
409 |
f.Extend(static) |
410 |
f.Extend(dynamic) |
411 |
|
412 |
delta = f.NonMatching(selected) |
413 |
if delta:
|
414 |
raise errors.OpPrereqError("Unknown output fields selected: %s" |
415 |
% ",".join(delta))
|
416 |
|
417 |
|
418 |
def _CheckBooleanOpField(op, name): |
419 |
"""Validates boolean opcode parameters.
|
420 |
|
421 |
This will ensure that an opcode parameter is either a boolean value,
|
422 |
or None (but that it always exists).
|
423 |
|
424 |
"""
|
425 |
val = getattr(op, name, None) |
426 |
if not (val is None or isinstance(val, bool)): |
427 |
raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" % |
428 |
(name, str(val)))
|
429 |
setattr(op, name, val)
|
430 |
|
431 |
|
432 |
def _CheckNodeOnline(lu, node): |
433 |
"""Ensure that a given node is online.
|
434 |
|
435 |
@param lu: the LU on behalf of which we make the check
|
436 |
@param node: the node to check
|
437 |
@raise errors.OpPrereqError: if the nodes is offline
|
438 |
|
439 |
"""
|
440 |
if lu.cfg.GetNodeInfo(node).offline:
|
441 |
raise errors.OpPrereqError("Can't use offline node %s" % node) |
442 |
|
443 |
|
444 |
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, |
445 |
memory, vcpus, nics): |
446 |
"""Builds instance related env variables for hooks
|
447 |
|
448 |
This builds the hook environment from individual variables.
|
449 |
|
450 |
@type name: string
|
451 |
@param name: the name of the instance
|
452 |
@type primary_node: string
|
453 |
@param primary_node: the name of the instance's primary node
|
454 |
@type secondary_nodes: list
|
455 |
@param secondary_nodes: list of secondary nodes as strings
|
456 |
@type os_type: string
|
457 |
@param os_type: the name of the instance's OS
|
458 |
@type status: boolean
|
459 |
@param status: the should_run status of the instance
|
460 |
@type memory: string
|
461 |
@param memory: the memory size of the instance
|
462 |
@type vcpus: string
|
463 |
@param vcpus: the count of VCPUs the instance has
|
464 |
@type nics: list
|
465 |
@param nics: list of tuples (ip, bridge, mac) representing
|
466 |
the NICs the instance has
|
467 |
@rtype: dict
|
468 |
@return: the hook environment for this instance
|
469 |
|
470 |
"""
|
471 |
if status:
|
472 |
str_status = "up"
|
473 |
else:
|
474 |
str_status = "down"
|
475 |
env = { |
476 |
"OP_TARGET": name,
|
477 |
"INSTANCE_NAME": name,
|
478 |
"INSTANCE_PRIMARY": primary_node,
|
479 |
"INSTANCE_SECONDARIES": " ".join(secondary_nodes), |
480 |
"INSTANCE_OS_TYPE": os_type,
|
481 |
"INSTANCE_STATUS": str_status,
|
482 |
"INSTANCE_MEMORY": memory,
|
483 |
"INSTANCE_VCPUS": vcpus,
|
484 |
} |
485 |
|
486 |
if nics:
|
487 |
nic_count = len(nics)
|
488 |
for idx, (ip, bridge, mac) in enumerate(nics): |
489 |
if ip is None: |
490 |
ip = ""
|
491 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
492 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
|
493 |
env["INSTANCE_NIC%d_HWADDR" % idx] = mac
|
494 |
else:
|
495 |
nic_count = 0
|
496 |
|
497 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
498 |
|
499 |
return env
|
500 |
|
501 |
|
502 |
def _BuildInstanceHookEnvByObject(lu, instance, override=None): |
503 |
"""Builds instance related env variables for hooks from an object.
|
504 |
|
505 |
@type lu: L{LogicalUnit}
|
506 |
@param lu: the logical unit on whose behalf we execute
|
507 |
@type instance: L{objects.Instance}
|
508 |
@param instance: the instance for which we should build the
|
509 |
environment
|
510 |
@type override: dict
|
511 |
@param override: dictionary with key/values that will override
|
512 |
our values
|
513 |
@rtype: dict
|
514 |
@return: the hook environment dictionary
|
515 |
|
516 |
"""
|
517 |
bep = lu.cfg.GetClusterInfo().FillBE(instance) |
518 |
args = { |
519 |
'name': instance.name,
|
520 |
'primary_node': instance.primary_node,
|
521 |
'secondary_nodes': instance.secondary_nodes,
|
522 |
'os_type': instance.os,
|
523 |
'status': instance.admin_up,
|
524 |
'memory': bep[constants.BE_MEMORY],
|
525 |
'vcpus': bep[constants.BE_VCPUS],
|
526 |
'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], |
527 |
} |
528 |
if override:
|
529 |
args.update(override) |
530 |
return _BuildInstanceHookEnv(**args)
|
531 |
|
532 |
|
533 |
def _AdjustCandidatePool(lu): |
534 |
"""Adjust the candidate pool after node operations.
|
535 |
|
536 |
"""
|
537 |
mod_list = lu.cfg.MaintainCandidatePool() |
538 |
if mod_list:
|
539 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
540 |
", ".join(node.name for node in mod_list)) |
541 |
for name in mod_list: |
542 |
lu.context.ReaddNode(name) |
543 |
mc_now, mc_max = lu.cfg.GetMasterCandidateStats() |
544 |
if mc_now > mc_max:
|
545 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
546 |
(mc_now, mc_max)) |
547 |
|
548 |
|
549 |
def _CheckInstanceBridgesExist(lu, instance): |
550 |
"""Check that the brigdes needed by an instance exist.
|
551 |
|
552 |
"""
|
553 |
# check bridges existance
|
554 |
brlist = [nic.bridge for nic in instance.nics] |
555 |
result = lu.rpc.call_bridges_exist(instance.primary_node, brlist) |
556 |
result.Raise() |
557 |
if not result.data: |
558 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
559 |
" exist on destination node '%s'" %
|
560 |
(brlist, instance.primary_node)) |
561 |
|
562 |
|
563 |
class LUDestroyCluster(NoHooksLU): |
564 |
"""Logical unit for destroying the cluster.
|
565 |
|
566 |
"""
|
567 |
_OP_REQP = [] |
568 |
|
569 |
def CheckPrereq(self): |
570 |
"""Check prerequisites.
|
571 |
|
572 |
This checks whether the cluster is empty.
|
573 |
|
574 |
Any errors are signalled by raising errors.OpPrereqError.
|
575 |
|
576 |
"""
|
577 |
master = self.cfg.GetMasterNode()
|
578 |
|
579 |
nodelist = self.cfg.GetNodeList()
|
580 |
if len(nodelist) != 1 or nodelist[0] != master: |
581 |
raise errors.OpPrereqError("There are still %d node(s) in" |
582 |
" this cluster." % (len(nodelist) - 1)) |
583 |
instancelist = self.cfg.GetInstanceList()
|
584 |
if instancelist:
|
585 |
raise errors.OpPrereqError("There are still %d instance(s) in" |
586 |
" this cluster." % len(instancelist)) |
587 |
|
588 |
def Exec(self, feedback_fn): |
589 |
"""Destroys the cluster.
|
590 |
|
591 |
"""
|
592 |
master = self.cfg.GetMasterNode()
|
593 |
result = self.rpc.call_node_stop_master(master, False) |
594 |
result.Raise() |
595 |
if not result.data: |
596 |
raise errors.OpExecError("Could not disable the master role") |
597 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
598 |
utils.CreateBackup(priv_key) |
599 |
utils.CreateBackup(pub_key) |
600 |
return master
|
601 |
|
602 |
|
603 |
class LUVerifyCluster(LogicalUnit): |
604 |
"""Verifies the cluster status.
|
605 |
|
606 |
"""
|
607 |
HPATH = "cluster-verify"
|
608 |
HTYPE = constants.HTYPE_CLUSTER |
609 |
_OP_REQP = ["skip_checks"]
|
610 |
REQ_BGL = False
|
611 |
|
612 |
def ExpandNames(self): |
613 |
self.needed_locks = {
|
614 |
locking.LEVEL_NODE: locking.ALL_SET, |
615 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
616 |
} |
617 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
618 |
|
619 |
def _VerifyNode(self, nodeinfo, file_list, local_cksum, |
620 |
node_result, feedback_fn, master_files, |
621 |
drbd_map): |
622 |
"""Run multiple tests against a node.
|
623 |
|
624 |
Test list:
|
625 |
|
626 |
- compares ganeti version
|
627 |
- checks vg existance and size > 20G
|
628 |
- checks config file checksum
|
629 |
- checks ssh to other nodes
|
630 |
|
631 |
@type nodeinfo: L{objects.Node}
|
632 |
@param nodeinfo: the node to check
|
633 |
@param file_list: required list of files
|
634 |
@param local_cksum: dictionary of local files and their checksums
|
635 |
@param node_result: the results from the node
|
636 |
@param feedback_fn: function used to accumulate results
|
637 |
@param master_files: list of files that only masters should have
|
638 |
@param drbd_map: the useddrbd minors for this node, in
|
639 |
form of minor: (instance, must_exist) which correspond to instances
|
640 |
and their running status
|
641 |
|
642 |
"""
|
643 |
node = nodeinfo.name |
644 |
|
645 |
# main result, node_result should be a non-empty dict
|
646 |
if not node_result or not isinstance(node_result, dict): |
647 |
feedback_fn(" - ERROR: unable to verify node %s." % (node,))
|
648 |
return True |
649 |
|
650 |
# compares ganeti version
|
651 |
local_version = constants.PROTOCOL_VERSION |
652 |
remote_version = node_result.get('version', None) |
653 |
if not remote_version: |
654 |
feedback_fn(" - ERROR: connection to %s failed" % (node))
|
655 |
return True |
656 |
|
657 |
if local_version != remote_version:
|
658 |
feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" %
|
659 |
(local_version, node, remote_version)) |
660 |
return True |
661 |
|
662 |
# checks vg existance and size > 20G
|
663 |
|
664 |
bad = False
|
665 |
vglist = node_result.get(constants.NV_VGLIST, None)
|
666 |
if not vglist: |
667 |
feedback_fn(" - ERROR: unable to check volume groups on node %s." %
|
668 |
(node,)) |
669 |
bad = True
|
670 |
else:
|
671 |
vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
|
672 |
constants.MIN_VG_SIZE) |
673 |
if vgstatus:
|
674 |
feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
|
675 |
bad = True
|
676 |
|
677 |
# checks config file checksum
|
678 |
|
679 |
remote_cksum = node_result.get(constants.NV_FILELIST, None)
|
680 |
if not isinstance(remote_cksum, dict): |
681 |
bad = True
|
682 |
feedback_fn(" - ERROR: node hasn't returned file checksum data")
|
683 |
else:
|
684 |
for file_name in file_list: |
685 |
node_is_mc = nodeinfo.master_candidate |
686 |
must_have_file = file_name not in master_files |
687 |
if file_name not in remote_cksum: |
688 |
if node_is_mc or must_have_file: |
689 |
bad = True
|
690 |
feedback_fn(" - ERROR: file '%s' missing" % file_name)
|
691 |
elif remote_cksum[file_name] != local_cksum[file_name]:
|
692 |
if node_is_mc or must_have_file: |
693 |
bad = True
|
694 |
feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
|
695 |
else:
|
696 |
# not candidate and this is not a must-have file
|
697 |
bad = True
|
698 |
feedback_fn(" - ERROR: non master-candidate has old/wrong file"
|
699 |
" '%s'" % file_name)
|
700 |
else:
|
701 |
# all good, except non-master/non-must have combination
|
702 |
if not node_is_mc and not must_have_file: |
703 |
feedback_fn(" - ERROR: file '%s' should not exist on non master"
|
704 |
" candidates" % file_name)
|
705 |
|
706 |
# checks ssh to any
|
707 |
|
708 |
if constants.NV_NODELIST not in node_result: |
709 |
bad = True
|
710 |
feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
|
711 |
else:
|
712 |
if node_result[constants.NV_NODELIST]:
|
713 |
bad = True
|
714 |
for node in node_result[constants.NV_NODELIST]: |
715 |
feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
|
716 |
(node, node_result[constants.NV_NODELIST][node])) |
717 |
|
718 |
if constants.NV_NODENETTEST not in node_result: |
719 |
bad = True
|
720 |
feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
|
721 |
else:
|
722 |
if node_result[constants.NV_NODENETTEST]:
|
723 |
bad = True
|
724 |
nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys()) |
725 |
for node in nlist: |
726 |
feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
|
727 |
(node, node_result[constants.NV_NODENETTEST][node])) |
728 |
|
729 |
hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
|
730 |
if isinstance(hyp_result, dict): |
731 |
for hv_name, hv_result in hyp_result.iteritems(): |
732 |
if hv_result is not None: |
733 |
feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
|
734 |
(hv_name, hv_result)) |
735 |
|
736 |
# check used drbd list
|
737 |
used_minors = node_result.get(constants.NV_DRBDLIST, []) |
738 |
for minor, (iname, must_exist) in drbd_map.items(): |
739 |
if minor not in used_minors and must_exist: |
740 |
feedback_fn(" - ERROR: drbd minor %d of instance %s is not active" %
|
741 |
(minor, iname)) |
742 |
bad = True
|
743 |
for minor in used_minors: |
744 |
if minor not in drbd_map: |
745 |
feedback_fn(" - ERROR: unallocated drbd minor %d is in use" % minor)
|
746 |
bad = True
|
747 |
|
748 |
return bad
|
749 |
|
750 |
def _VerifyInstance(self, instance, instanceconfig, node_vol_is, |
751 |
node_instance, feedback_fn, n_offline): |
752 |
"""Verify an instance.
|
753 |
|
754 |
This function checks to see if the required block devices are
|
755 |
available on the instance's node.
|
756 |
|
757 |
"""
|
758 |
bad = False
|
759 |
|
760 |
node_current = instanceconfig.primary_node |
761 |
|
762 |
node_vol_should = {} |
763 |
instanceconfig.MapLVsByNode(node_vol_should) |
764 |
|
765 |
for node in node_vol_should: |
766 |
if node in n_offline: |
767 |
# ignore missing volumes on offline nodes
|
768 |
continue
|
769 |
for volume in node_vol_should[node]: |
770 |
if node not in node_vol_is or volume not in node_vol_is[node]: |
771 |
feedback_fn(" - ERROR: volume %s missing on node %s" %
|
772 |
(volume, node)) |
773 |
bad = True
|
774 |
|
775 |
if instanceconfig.admin_up:
|
776 |
if ((node_current not in node_instance or |
777 |
not instance in node_instance[node_current]) and |
778 |
node_current not in n_offline): |
779 |
feedback_fn(" - ERROR: instance %s not running on node %s" %
|
780 |
(instance, node_current)) |
781 |
bad = True
|
782 |
|
783 |
for node in node_instance: |
784 |
if (not node == node_current): |
785 |
if instance in node_instance[node]: |
786 |
feedback_fn(" - ERROR: instance %s should not run on node %s" %
|
787 |
(instance, node)) |
788 |
bad = True
|
789 |
|
790 |
return bad
|
791 |
|
792 |
def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): |
793 |
"""Verify if there are any unknown volumes in the cluster.
|
794 |
|
795 |
The .os, .swap and backup volumes are ignored. All other volumes are
|
796 |
reported as unknown.
|
797 |
|
798 |
"""
|
799 |
bad = False
|
800 |
|
801 |
for node in node_vol_is: |
802 |
for volume in node_vol_is[node]: |
803 |
if node not in node_vol_should or volume not in node_vol_should[node]: |
804 |
feedback_fn(" - ERROR: volume %s on node %s should not exist" %
|
805 |
(volume, node)) |
806 |
bad = True
|
807 |
return bad
|
808 |
|
809 |
def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn): |
810 |
"""Verify the list of running instances.
|
811 |
|
812 |
This checks what instances are running but unknown to the cluster.
|
813 |
|
814 |
"""
|
815 |
bad = False
|
816 |
for node in node_instance: |
817 |
for runninginstance in node_instance[node]: |
818 |
if runninginstance not in instancelist: |
819 |
feedback_fn(" - ERROR: instance %s on node %s should not exist" %
|
820 |
(runninginstance, node)) |
821 |
bad = True
|
822 |
return bad
|
823 |
|
824 |
def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn): |
825 |
"""Verify N+1 Memory Resilience.
|
826 |
|
827 |
Check that if one single node dies we can still start all the instances it
|
828 |
was primary for.
|
829 |
|
830 |
"""
|
831 |
bad = False
|
832 |
|
833 |
for node, nodeinfo in node_info.iteritems(): |
834 |
# This code checks that every node which is now listed as secondary has
|
835 |
# enough memory to host all instances it is supposed to should a single
|
836 |
# other node in the cluster fail.
|
837 |
# FIXME: not ready for failover to an arbitrary node
|
838 |
# FIXME: does not support file-backed instances
|
839 |
# WARNING: we currently take into account down instances as well as up
|
840 |
# ones, considering that even if they're down someone might want to start
|
841 |
# them even in the event of a node failure.
|
842 |
for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems(): |
843 |
needed_mem = 0
|
844 |
for instance in instances: |
845 |
bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
|
846 |
if bep[constants.BE_AUTO_BALANCE]:
|
847 |
needed_mem += bep[constants.BE_MEMORY] |
848 |
if nodeinfo['mfree'] < needed_mem: |
849 |
feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
|
850 |
" failovers should node %s fail" % (node, prinode))
|
851 |
bad = True
|
852 |
return bad
|
853 |
|
854 |
def CheckPrereq(self): |
855 |
"""Check prerequisites.
|
856 |
|
857 |
Transform the list of checks we're going to skip into a set and check that
|
858 |
all its members are valid.
|
859 |
|
860 |
"""
|
861 |
self.skip_set = frozenset(self.op.skip_checks) |
862 |
if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set): |
863 |
raise errors.OpPrereqError("Invalid checks to be skipped specified") |
864 |
|
865 |
def BuildHooksEnv(self): |
866 |
"""Build hooks env.
|
867 |
|
868 |
Cluster-Verify hooks just rone in the post phase and their failure makes
|
869 |
the output be logged in the verify output and the verification to fail.
|
870 |
|
871 |
"""
|
872 |
all_nodes = self.cfg.GetNodeList()
|
873 |
# TODO: populate the environment with useful information for verify hooks
|
874 |
env = {} |
875 |
return env, [], all_nodes
|
876 |
|
877 |
def Exec(self, feedback_fn): |
878 |
"""Verify integrity of cluster, performing various test on nodes.
|
879 |
|
880 |
"""
|
881 |
bad = False
|
882 |
feedback_fn("* Verifying global settings")
|
883 |
for msg in self.cfg.VerifyConfig(): |
884 |
feedback_fn(" - ERROR: %s" % msg)
|
885 |
|
886 |
vg_name = self.cfg.GetVGName()
|
887 |
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
|
888 |
nodelist = utils.NiceSort(self.cfg.GetNodeList())
|
889 |
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist] |
890 |
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
|
891 |
instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname)) |
892 |
for iname in instancelist) |
893 |
i_non_redundant = [] # Non redundant instances
|
894 |
i_non_a_balanced = [] # Non auto-balanced instances
|
895 |
n_offline = [] # List of offline nodes
|
896 |
node_volume = {} |
897 |
node_instance = {} |
898 |
node_info = {} |
899 |
instance_cfg = {} |
900 |
|
901 |
# FIXME: verify OS list
|
902 |
# do local checksums
|
903 |
master_files = [constants.CLUSTER_CONF_FILE] |
904 |
|
905 |
file_names = ssconf.SimpleStore().GetFileList() |
906 |
file_names.append(constants.SSL_CERT_FILE) |
907 |
file_names.append(constants.RAPI_CERT_FILE) |
908 |
file_names.extend(master_files) |
909 |
|
910 |
local_checksums = utils.FingerprintFiles(file_names) |
911 |
|
912 |
feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
913 |
node_verify_param = { |
914 |
constants.NV_FILELIST: file_names, |
915 |
constants.NV_NODELIST: [node.name for node in nodeinfo |
916 |
if not node.offline], |
917 |
constants.NV_HYPERVISOR: hypervisors, |
918 |
constants.NV_NODENETTEST: [(node.name, node.primary_ip, |
919 |
node.secondary_ip) for node in nodeinfo |
920 |
if not node.offline], |
921 |
constants.NV_LVLIST: vg_name, |
922 |
constants.NV_INSTANCELIST: hypervisors, |
923 |
constants.NV_VGLIST: None,
|
924 |
constants.NV_VERSION: None,
|
925 |
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
|
926 |
constants.NV_DRBDLIST: None,
|
927 |
} |
928 |
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
|
929 |
self.cfg.GetClusterName())
|
930 |
|
931 |
cluster = self.cfg.GetClusterInfo()
|
932 |
master_node = self.cfg.GetMasterNode()
|
933 |
all_drbd_map = self.cfg.ComputeDRBDMap()
|
934 |
|
935 |
for node_i in nodeinfo: |
936 |
node = node_i.name |
937 |
nresult = all_nvinfo[node].data |
938 |
|
939 |
if node_i.offline:
|
940 |
feedback_fn("* Skipping offline node %s" % (node,))
|
941 |
n_offline.append(node) |
942 |
continue
|
943 |
|
944 |
if node == master_node:
|
945 |
ntype = "master"
|
946 |
elif node_i.master_candidate:
|
947 |
ntype = "master candidate"
|
948 |
else:
|
949 |
ntype = "regular"
|
950 |
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
|
951 |
|
952 |
if all_nvinfo[node].failed or not isinstance(nresult, dict): |
953 |
feedback_fn(" - ERROR: connection to %s failed" % (node,))
|
954 |
bad = True
|
955 |
continue
|
956 |
|
957 |
node_drbd = {} |
958 |
for minor, instance in all_drbd_map[node].items(): |
959 |
instance = instanceinfo[instance] |
960 |
node_drbd[minor] = (instance.name, instance.admin_up) |
961 |
result = self._VerifyNode(node_i, file_names, local_checksums,
|
962 |
nresult, feedback_fn, master_files, |
963 |
node_drbd) |
964 |
bad = bad or result
|
965 |
|
966 |
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
|
967 |
if isinstance(lvdata, basestring): |
968 |
feedback_fn(" - ERROR: LVM problem on node %s: %s" %
|
969 |
(node, lvdata.encode('string_escape')))
|
970 |
bad = True
|
971 |
node_volume[node] = {} |
972 |
elif not isinstance(lvdata, dict): |
973 |
feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
|
974 |
bad = True
|
975 |
continue
|
976 |
else:
|
977 |
node_volume[node] = lvdata |
978 |
|
979 |
# node_instance
|
980 |
idata = nresult.get(constants.NV_INSTANCELIST, None)
|
981 |
if not isinstance(idata, list): |
982 |
feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
|
983 |
(node,)) |
984 |
bad = True
|
985 |
continue
|
986 |
|
987 |
node_instance[node] = idata |
988 |
|
989 |
# node_info
|
990 |
nodeinfo = nresult.get(constants.NV_HVINFO, None)
|
991 |
if not isinstance(nodeinfo, dict): |
992 |
feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
|
993 |
bad = True
|
994 |
continue
|
995 |
|
996 |
try:
|
997 |
node_info[node] = { |
998 |
"mfree": int(nodeinfo['memory_free']), |
999 |
"dfree": int(nresult[constants.NV_VGLIST][vg_name]), |
1000 |
"pinst": [],
|
1001 |
"sinst": [],
|
1002 |
# dictionary holding all instances this node is secondary for,
|
1003 |
# grouped by their primary node. Each key is a cluster node, and each
|
1004 |
# value is a list of instances which have the key as primary and the
|
1005 |
# current node as secondary. this is handy to calculate N+1 memory
|
1006 |
# availability if you can only failover from a primary to its
|
1007 |
# secondary.
|
1008 |
"sinst-by-pnode": {},
|
1009 |
} |
1010 |
except ValueError: |
1011 |
feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
|
1012 |
bad = True
|
1013 |
continue
|
1014 |
|
1015 |
node_vol_should = {} |
1016 |
|
1017 |
for instance in instancelist: |
1018 |
feedback_fn("* Verifying instance %s" % instance)
|
1019 |
inst_config = instanceinfo[instance] |
1020 |
result = self._VerifyInstance(instance, inst_config, node_volume,
|
1021 |
node_instance, feedback_fn, n_offline) |
1022 |
bad = bad or result
|
1023 |
inst_nodes_offline = [] |
1024 |
|
1025 |
inst_config.MapLVsByNode(node_vol_should) |
1026 |
|
1027 |
instance_cfg[instance] = inst_config |
1028 |
|
1029 |
pnode = inst_config.primary_node |
1030 |
if pnode in node_info: |
1031 |
node_info[pnode]['pinst'].append(instance)
|
1032 |
elif pnode not in n_offline: |
1033 |
feedback_fn(" - ERROR: instance %s, connection to primary node"
|
1034 |
" %s failed" % (instance, pnode))
|
1035 |
bad = True
|
1036 |
|
1037 |
if pnode in n_offline: |
1038 |
inst_nodes_offline.append(pnode) |
1039 |
|
1040 |
# If the instance is non-redundant we cannot survive losing its primary
|
1041 |
# node, so we are not N+1 compliant. On the other hand we have no disk
|
1042 |
# templates with more than one secondary so that situation is not well
|
1043 |
# supported either.
|
1044 |
# FIXME: does not support file-backed instances
|
1045 |
if len(inst_config.secondary_nodes) == 0: |
1046 |
i_non_redundant.append(instance) |
1047 |
elif len(inst_config.secondary_nodes) > 1: |
1048 |
feedback_fn(" - WARNING: multiple secondaries for instance %s"
|
1049 |
% instance) |
1050 |
|
1051 |
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]: |
1052 |
i_non_a_balanced.append(instance) |
1053 |
|
1054 |
for snode in inst_config.secondary_nodes: |
1055 |
if snode in node_info: |
1056 |
node_info[snode]['sinst'].append(instance)
|
1057 |
if pnode not in node_info[snode]['sinst-by-pnode']: |
1058 |
node_info[snode]['sinst-by-pnode'][pnode] = []
|
1059 |
node_info[snode]['sinst-by-pnode'][pnode].append(instance)
|
1060 |
elif snode not in n_offline: |
1061 |
feedback_fn(" - ERROR: instance %s, connection to secondary node"
|
1062 |
" %s failed" % (instance, snode))
|
1063 |
bad = True
|
1064 |
if snode in n_offline: |
1065 |
inst_nodes_offline.append(snode) |
1066 |
|
1067 |
if inst_nodes_offline:
|
1068 |
# warn that the instance lives on offline nodes, and set bad=True
|
1069 |
feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
|
1070 |
", ".join(inst_nodes_offline))
|
1071 |
bad = True
|
1072 |
|
1073 |
feedback_fn("* Verifying orphan volumes")
|
1074 |
result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
|
1075 |
feedback_fn) |
1076 |
bad = bad or result
|
1077 |
|
1078 |
feedback_fn("* Verifying remaining instances")
|
1079 |
result = self._VerifyOrphanInstances(instancelist, node_instance,
|
1080 |
feedback_fn) |
1081 |
bad = bad or result
|
1082 |
|
1083 |
if constants.VERIFY_NPLUSONE_MEM not in self.skip_set: |
1084 |
feedback_fn("* Verifying N+1 Memory redundancy")
|
1085 |
result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
|
1086 |
bad = bad or result
|
1087 |
|
1088 |
feedback_fn("* Other Notes")
|
1089 |
if i_non_redundant:
|
1090 |
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
|
1091 |
% len(i_non_redundant))
|
1092 |
|
1093 |
if i_non_a_balanced:
|
1094 |
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
|
1095 |
% len(i_non_a_balanced))
|
1096 |
|
1097 |
if n_offline:
|
1098 |
feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline)) |
1099 |
|
1100 |
return not bad |
1101 |
|
1102 |
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): |
1103 |
"""Analize the post-hooks' result
|
1104 |
|
1105 |
This method analyses the hook result, handles it, and sends some
|
1106 |
nicely-formatted feedback back to the user.
|
1107 |
|
1108 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
1109 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
1110 |
@param hooks_results: the results of the multi-node hooks rpc call
|
1111 |
@param feedback_fn: function used send feedback back to the caller
|
1112 |
@param lu_result: previous Exec result
|
1113 |
@return: the new Exec result, based on the previous result
|
1114 |
and hook results
|
1115 |
|
1116 |
"""
|
1117 |
# We only really run POST phase hooks, and are only interested in
|
1118 |
# their results
|
1119 |
if phase == constants.HOOKS_PHASE_POST:
|
1120 |
# Used to change hooks' output to proper indentation
|
1121 |
indent_re = re.compile('^', re.M)
|
1122 |
feedback_fn("* Hooks Results")
|
1123 |
if not hooks_results: |
1124 |
feedback_fn(" - ERROR: general communication failure")
|
1125 |
lu_result = 1
|
1126 |
else:
|
1127 |
for node_name in hooks_results: |
1128 |
show_node_header = True
|
1129 |
res = hooks_results[node_name] |
1130 |
if res.failed or res.data is False or not isinstance(res.data, list): |
1131 |
if res.offline:
|
1132 |
# no need to warn or set fail return value
|
1133 |
continue
|
1134 |
feedback_fn(" Communication failure in hooks execution")
|
1135 |
lu_result = 1
|
1136 |
continue
|
1137 |
for script, hkr, output in res.data: |
1138 |
if hkr == constants.HKR_FAIL:
|
1139 |
# The node header is only shown once, if there are
|
1140 |
# failing hooks on that node
|
1141 |
if show_node_header:
|
1142 |
feedback_fn(" Node %s:" % node_name)
|
1143 |
show_node_header = False
|
1144 |
feedback_fn(" ERROR: Script %s failed, output:" % script)
|
1145 |
output = indent_re.sub(' ', output)
|
1146 |
feedback_fn("%s" % output)
|
1147 |
lu_result = 1
|
1148 |
|
1149 |
return lu_result
|
1150 |
|
1151 |
|
1152 |
class LUVerifyDisks(NoHooksLU): |
1153 |
"""Verifies the cluster disks status.
|
1154 |
|
1155 |
"""
|
1156 |
_OP_REQP = [] |
1157 |
REQ_BGL = False
|
1158 |
|
1159 |
def ExpandNames(self): |
1160 |
self.needed_locks = {
|
1161 |
locking.LEVEL_NODE: locking.ALL_SET, |
1162 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1163 |
} |
1164 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
1165 |
|
1166 |
def CheckPrereq(self): |
1167 |
"""Check prerequisites.
|
1168 |
|
1169 |
This has no prerequisites.
|
1170 |
|
1171 |
"""
|
1172 |
pass
|
1173 |
|
1174 |
def Exec(self, feedback_fn): |
1175 |
"""Verify integrity of cluster disks.
|
1176 |
|
1177 |
"""
|
1178 |
result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {} |
1179 |
|
1180 |
vg_name = self.cfg.GetVGName()
|
1181 |
nodes = utils.NiceSort(self.cfg.GetNodeList())
|
1182 |
instances = [self.cfg.GetInstanceInfo(name)
|
1183 |
for name in self.cfg.GetInstanceList()] |
1184 |
|
1185 |
nv_dict = {} |
1186 |
for inst in instances: |
1187 |
inst_lvs = {} |
1188 |
if (not inst.admin_up or |
1189 |
inst.disk_template not in constants.DTS_NET_MIRROR): |
1190 |
continue
|
1191 |
inst.MapLVsByNode(inst_lvs) |
1192 |
# transform { iname: {node: [vol,],},} to {(node, vol): iname}
|
1193 |
for node, vol_list in inst_lvs.iteritems(): |
1194 |
for vol in vol_list: |
1195 |
nv_dict[(node, vol)] = inst |
1196 |
|
1197 |
if not nv_dict: |
1198 |
return result
|
1199 |
|
1200 |
node_lvs = self.rpc.call_volume_list(nodes, vg_name)
|
1201 |
|
1202 |
to_act = set()
|
1203 |
for node in nodes: |
1204 |
# node_volume
|
1205 |
lvs = node_lvs[node] |
1206 |
if lvs.failed:
|
1207 |
if not lvs.offline: |
1208 |
self.LogWarning("Connection to node %s failed: %s" % |
1209 |
(node, lvs.data)) |
1210 |
continue
|
1211 |
lvs = lvs.data |
1212 |
if isinstance(lvs, basestring): |
1213 |
logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
|
1214 |
res_nlvm[node] = lvs |
1215 |
elif not isinstance(lvs, dict): |
1216 |
logging.warning("Connection to node %s failed or invalid data"
|
1217 |
" returned", node)
|
1218 |
res_nodes.append(node) |
1219 |
continue
|
1220 |
|
1221 |
for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems(): |
1222 |
inst = nv_dict.pop((node, lv_name), None)
|
1223 |
if (not lv_online and inst is not None |
1224 |
and inst.name not in res_instances): |
1225 |
res_instances.append(inst.name) |
1226 |
|
1227 |
# any leftover items in nv_dict are missing LVs, let's arrange the
|
1228 |
# data better
|
1229 |
for key, inst in nv_dict.iteritems(): |
1230 |
if inst.name not in res_missing: |
1231 |
res_missing[inst.name] = [] |
1232 |
res_missing[inst.name].append(key) |
1233 |
|
1234 |
return result
|
1235 |
|
1236 |
|
1237 |
class LURenameCluster(LogicalUnit): |
1238 |
"""Rename the cluster.
|
1239 |
|
1240 |
"""
|
1241 |
HPATH = "cluster-rename"
|
1242 |
HTYPE = constants.HTYPE_CLUSTER |
1243 |
_OP_REQP = ["name"]
|
1244 |
|
1245 |
def BuildHooksEnv(self): |
1246 |
"""Build hooks env.
|
1247 |
|
1248 |
"""
|
1249 |
env = { |
1250 |
"OP_TARGET": self.cfg.GetClusterName(), |
1251 |
"NEW_NAME": self.op.name, |
1252 |
} |
1253 |
mn = self.cfg.GetMasterNode()
|
1254 |
return env, [mn], [mn]
|
1255 |
|
1256 |
def CheckPrereq(self): |
1257 |
"""Verify that the passed name is a valid one.
|
1258 |
|
1259 |
"""
|
1260 |
hostname = utils.HostInfo(self.op.name)
|
1261 |
|
1262 |
new_name = hostname.name |
1263 |
self.ip = new_ip = hostname.ip
|
1264 |
old_name = self.cfg.GetClusterName()
|
1265 |
old_ip = self.cfg.GetMasterIP()
|
1266 |
if new_name == old_name and new_ip == old_ip: |
1267 |
raise errors.OpPrereqError("Neither the name nor the IP address of the" |
1268 |
" cluster has changed")
|
1269 |
if new_ip != old_ip:
|
1270 |
if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
|
1271 |
raise errors.OpPrereqError("The given cluster IP address (%s) is" |
1272 |
" reachable on the network. Aborting." %
|
1273 |
new_ip) |
1274 |
|
1275 |
self.op.name = new_name
|
1276 |
|
1277 |
def Exec(self, feedback_fn): |
1278 |
"""Rename the cluster.
|
1279 |
|
1280 |
"""
|
1281 |
clustername = self.op.name
|
1282 |
ip = self.ip
|
1283 |
|
1284 |
# shutdown the master IP
|
1285 |
master = self.cfg.GetMasterNode()
|
1286 |
result = self.rpc.call_node_stop_master(master, False) |
1287 |
if result.failed or not result.data: |
1288 |
raise errors.OpExecError("Could not disable the master role") |
1289 |
|
1290 |
try:
|
1291 |
cluster = self.cfg.GetClusterInfo()
|
1292 |
cluster.cluster_name = clustername |
1293 |
cluster.master_ip = ip |
1294 |
self.cfg.Update(cluster)
|
1295 |
|
1296 |
# update the known hosts file
|
1297 |
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
|
1298 |
node_list = self.cfg.GetNodeList()
|
1299 |
try:
|
1300 |
node_list.remove(master) |
1301 |
except ValueError: |
1302 |
pass
|
1303 |
result = self.rpc.call_upload_file(node_list,
|
1304 |
constants.SSH_KNOWN_HOSTS_FILE) |
1305 |
for to_node, to_result in result.iteritems(): |
1306 |
if to_result.failed or not to_result.data: |
1307 |
logging.error("Copy of file %s to node %s failed",
|
1308 |
constants.SSH_KNOWN_HOSTS_FILE, to_node) |
1309 |
|
1310 |
finally:
|
1311 |
result = self.rpc.call_node_start_master(master, False) |
1312 |
if result.failed or not result.data: |
1313 |
self.LogWarning("Could not re-enable the master role on" |
1314 |
" the master, please restart manually.")
|
1315 |
|
1316 |
|
1317 |
def _RecursiveCheckIfLVMBased(disk): |
1318 |
"""Check if the given disk or its children are lvm-based.
|
1319 |
|
1320 |
@type disk: L{objects.Disk}
|
1321 |
@param disk: the disk to check
|
1322 |
@rtype: booleean
|
1323 |
@return: boolean indicating whether a LD_LV dev_type was found or not
|
1324 |
|
1325 |
"""
|
1326 |
if disk.children:
|
1327 |
for chdisk in disk.children: |
1328 |
if _RecursiveCheckIfLVMBased(chdisk):
|
1329 |
return True |
1330 |
return disk.dev_type == constants.LD_LV
|
1331 |
|
1332 |
|
1333 |
class LUSetClusterParams(LogicalUnit): |
1334 |
"""Change the parameters of the cluster.
|
1335 |
|
1336 |
"""
|
1337 |
HPATH = "cluster-modify"
|
1338 |
HTYPE = constants.HTYPE_CLUSTER |
1339 |
_OP_REQP = [] |
1340 |
REQ_BGL = False
|
1341 |
|
1342 |
def CheckParameters(self): |
1343 |
"""Check parameters
|
1344 |
|
1345 |
"""
|
1346 |
if not hasattr(self.op, "candidate_pool_size"): |
1347 |
self.op.candidate_pool_size = None |
1348 |
if self.op.candidate_pool_size is not None: |
1349 |
try:
|
1350 |
self.op.candidate_pool_size = int(self.op.candidate_pool_size) |
1351 |
except ValueError, err: |
1352 |
raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" % |
1353 |
str(err))
|
1354 |
if self.op.candidate_pool_size < 1: |
1355 |
raise errors.OpPrereqError("At least one master candidate needed") |
1356 |
|
1357 |
def ExpandNames(self): |
1358 |
# FIXME: in the future maybe other cluster params won't require checking on
|
1359 |
# all nodes to be modified.
|
1360 |
self.needed_locks = {
|
1361 |
locking.LEVEL_NODE: locking.ALL_SET, |
1362 |
} |
1363 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1364 |
|
1365 |
def BuildHooksEnv(self): |
1366 |
"""Build hooks env.
|
1367 |
|
1368 |
"""
|
1369 |
env = { |
1370 |
"OP_TARGET": self.cfg.GetClusterName(), |
1371 |
"NEW_VG_NAME": self.op.vg_name, |
1372 |
} |
1373 |
mn = self.cfg.GetMasterNode()
|
1374 |
return env, [mn], [mn]
|
1375 |
|
1376 |
def CheckPrereq(self): |
1377 |
"""Check prerequisites.
|
1378 |
|
1379 |
This checks whether the given params don't conflict and
|
1380 |
if the given volume group is valid.
|
1381 |
|
1382 |
"""
|
1383 |
# FIXME: This only works because there is only one parameter that can be
|
1384 |
# changed or removed.
|
1385 |
if self.op.vg_name is not None and not self.op.vg_name: |
1386 |
instances = self.cfg.GetAllInstancesInfo().values()
|
1387 |
for inst in instances: |
1388 |
for disk in inst.disks: |
1389 |
if _RecursiveCheckIfLVMBased(disk):
|
1390 |
raise errors.OpPrereqError("Cannot disable lvm storage while" |
1391 |
" lvm-based instances exist")
|
1392 |
|
1393 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
1394 |
|
1395 |
# if vg_name not None, checks given volume group on all nodes
|
1396 |
if self.op.vg_name: |
1397 |
vglist = self.rpc.call_vg_list(node_list)
|
1398 |
for node in node_list: |
1399 |
if vglist[node].failed:
|
1400 |
# ignoring down node
|
1401 |
self.LogWarning("Node %s unreachable/error, ignoring" % node) |
1402 |
continue
|
1403 |
vgstatus = utils.CheckVolumeGroupSize(vglist[node].data, |
1404 |
self.op.vg_name,
|
1405 |
constants.MIN_VG_SIZE) |
1406 |
if vgstatus:
|
1407 |
raise errors.OpPrereqError("Error on node '%s': %s" % |
1408 |
(node, vgstatus)) |
1409 |
|
1410 |
self.cluster = cluster = self.cfg.GetClusterInfo() |
1411 |
# validate beparams changes
|
1412 |
if self.op.beparams: |
1413 |
utils.CheckBEParams(self.op.beparams)
|
1414 |
self.new_beparams = cluster.FillDict(
|
1415 |
cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
|
1416 |
|
1417 |
# hypervisor list/parameters
|
1418 |
self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
|
1419 |
if self.op.hvparams: |
1420 |
if not isinstance(self.op.hvparams, dict): |
1421 |
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input") |
1422 |
for hv_name, hv_dict in self.op.hvparams.items(): |
1423 |
if hv_name not in self.new_hvparams: |
1424 |
self.new_hvparams[hv_name] = hv_dict
|
1425 |
else:
|
1426 |
self.new_hvparams[hv_name].update(hv_dict)
|
1427 |
|
1428 |
if self.op.enabled_hypervisors is not None: |
1429 |
self.hv_list = self.op.enabled_hypervisors |
1430 |
else:
|
1431 |
self.hv_list = cluster.enabled_hypervisors
|
1432 |
|
1433 |
if self.op.hvparams or self.op.enabled_hypervisors is not None: |
1434 |
# either the enabled list has changed, or the parameters have, validate
|
1435 |
for hv_name, hv_params in self.new_hvparams.items(): |
1436 |
if ((self.op.hvparams and hv_name in self.op.hvparams) or |
1437 |
(self.op.enabled_hypervisors and |
1438 |
hv_name in self.op.enabled_hypervisors)): |
1439 |
# either this is a new hypervisor, or its parameters have changed
|
1440 |
hv_class = hypervisor.GetHypervisor(hv_name) |
1441 |
hv_class.CheckParameterSyntax(hv_params) |
1442 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
1443 |
|
1444 |
def Exec(self, feedback_fn): |
1445 |
"""Change the parameters of the cluster.
|
1446 |
|
1447 |
"""
|
1448 |
if self.op.vg_name is not None: |
1449 |
if self.op.vg_name != self.cfg.GetVGName(): |
1450 |
self.cfg.SetVGName(self.op.vg_name) |
1451 |
else:
|
1452 |
feedback_fn("Cluster LVM configuration already in desired"
|
1453 |
" state, not changing")
|
1454 |
if self.op.hvparams: |
1455 |
self.cluster.hvparams = self.new_hvparams |
1456 |
if self.op.enabled_hypervisors is not None: |
1457 |
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors |
1458 |
if self.op.beparams: |
1459 |
self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams |
1460 |
if self.op.candidate_pool_size is not None: |
1461 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
1462 |
|
1463 |
self.cfg.Update(self.cluster) |
1464 |
|
1465 |
# we want to update nodes after the cluster so that if any errors
|
1466 |
# happen, we have recorded and saved the cluster info
|
1467 |
if self.op.candidate_pool_size is not None: |
1468 |
_AdjustCandidatePool(self)
|
1469 |
|
1470 |
|
1471 |
class LURedistributeConfig(NoHooksLU): |
1472 |
"""Force the redistribution of cluster configuration.
|
1473 |
|
1474 |
This is a very simple LU.
|
1475 |
|
1476 |
"""
|
1477 |
_OP_REQP = [] |
1478 |
REQ_BGL = False
|
1479 |
|
1480 |
def ExpandNames(self): |
1481 |
self.needed_locks = {
|
1482 |
locking.LEVEL_NODE: locking.ALL_SET, |
1483 |
} |
1484 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1485 |
|
1486 |
def CheckPrereq(self): |
1487 |
"""Check prerequisites.
|
1488 |
|
1489 |
"""
|
1490 |
|
1491 |
def Exec(self, feedback_fn): |
1492 |
"""Redistribute the configuration.
|
1493 |
|
1494 |
"""
|
1495 |
self.cfg.Update(self.cfg.GetClusterInfo()) |
1496 |
|
1497 |
|
1498 |
def _WaitForSync(lu, instance, oneshot=False, unlock=False): |
1499 |
"""Sleep and poll for an instance's disk to sync.
|
1500 |
|
1501 |
"""
|
1502 |
if not instance.disks: |
1503 |
return True |
1504 |
|
1505 |
if not oneshot: |
1506 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
1507 |
|
1508 |
node = instance.primary_node |
1509 |
|
1510 |
for dev in instance.disks: |
1511 |
lu.cfg.SetDiskID(dev, node) |
1512 |
|
1513 |
retries = 0
|
1514 |
while True: |
1515 |
max_time = 0
|
1516 |
done = True
|
1517 |
cumul_degraded = False
|
1518 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
1519 |
if rstats.failed or not rstats.data: |
1520 |
lu.LogWarning("Can't get any data from node %s", node)
|
1521 |
retries += 1
|
1522 |
if retries >= 10: |
1523 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
1524 |
" aborting." % node)
|
1525 |
time.sleep(6)
|
1526 |
continue
|
1527 |
rstats = rstats.data |
1528 |
retries = 0
|
1529 |
for i, mstat in enumerate(rstats): |
1530 |
if mstat is None: |
1531 |
lu.LogWarning("Can't compute data for node %s/%s",
|
1532 |
node, instance.disks[i].iv_name) |
1533 |
continue
|
1534 |
# we ignore the ldisk parameter
|
1535 |
perc_done, est_time, is_degraded, _ = mstat |
1536 |
cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) |
1537 |
if perc_done is not None: |
1538 |
done = False
|
1539 |
if est_time is not None: |
1540 |
rem_time = "%d estimated seconds remaining" % est_time
|
1541 |
max_time = est_time |
1542 |
else:
|
1543 |
rem_time = "no time estimate"
|
1544 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
1545 |
(instance.disks[i].iv_name, perc_done, rem_time)) |
1546 |
if done or oneshot: |
1547 |
break
|
1548 |
|
1549 |
time.sleep(min(60, max_time)) |
1550 |
|
1551 |
if done:
|
1552 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
1553 |
return not cumul_degraded |
1554 |
|
1555 |
|
1556 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): |
1557 |
"""Check that mirrors are not degraded.
|
1558 |
|
1559 |
The ldisk parameter, if True, will change the test from the
|
1560 |
is_degraded attribute (which represents overall non-ok status for
|
1561 |
the device(s)) to the ldisk (representing the local storage status).
|
1562 |
|
1563 |
"""
|
1564 |
lu.cfg.SetDiskID(dev, node) |
1565 |
if ldisk:
|
1566 |
idx = 6
|
1567 |
else:
|
1568 |
idx = 5
|
1569 |
|
1570 |
result = True
|
1571 |
if on_primary or dev.AssembleOnSecondary(): |
1572 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
1573 |
if rstats.failed or not rstats.data: |
1574 |
logging.warning("Node %s: disk degraded, not found or node down", node)
|
1575 |
result = False
|
1576 |
else:
|
1577 |
result = result and (not rstats.data[idx]) |
1578 |
if dev.children:
|
1579 |
for child in dev.children: |
1580 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
1581 |
|
1582 |
return result
|
1583 |
|
1584 |
|
1585 |
class LUDiagnoseOS(NoHooksLU): |
1586 |
"""Logical unit for OS diagnose/query.
|
1587 |
|
1588 |
"""
|
1589 |
_OP_REQP = ["output_fields", "names"] |
1590 |
REQ_BGL = False
|
1591 |
_FIELDS_STATIC = utils.FieldSet() |
1592 |
_FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status") |
1593 |
|
1594 |
def ExpandNames(self): |
1595 |
if self.op.names: |
1596 |
raise errors.OpPrereqError("Selective OS query not supported") |
1597 |
|
1598 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1599 |
dynamic=self._FIELDS_DYNAMIC,
|
1600 |
selected=self.op.output_fields)
|
1601 |
|
1602 |
# Lock all nodes, in shared mode
|
1603 |
self.needed_locks = {}
|
1604 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1605 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1606 |
|
1607 |
def CheckPrereq(self): |
1608 |
"""Check prerequisites.
|
1609 |
|
1610 |
"""
|
1611 |
|
1612 |
@staticmethod
|
1613 |
def _DiagnoseByOS(node_list, rlist): |
1614 |
"""Remaps a per-node return list into an a per-os per-node dictionary
|
1615 |
|
1616 |
@param node_list: a list with the names of all nodes
|
1617 |
@param rlist: a map with node names as keys and OS objects as values
|
1618 |
|
1619 |
@rtype: dict
|
1620 |
@returns: a dictionary with osnames as keys and as value another map, with
|
1621 |
nodes as keys and list of OS objects as values, eg::
|
1622 |
|
1623 |
{"debian-etch": {"node1": [<object>,...],
|
1624 |
"node2": [<object>,]}
|
1625 |
}
|
1626 |
|
1627 |
"""
|
1628 |
all_os = {} |
1629 |
for node_name, nr in rlist.iteritems(): |
1630 |
if nr.failed or not nr.data: |
1631 |
continue
|
1632 |
for os_obj in nr.data: |
1633 |
if os_obj.name not in all_os: |
1634 |
# build a list of nodes for this os containing empty lists
|
1635 |
# for each node in node_list
|
1636 |
all_os[os_obj.name] = {} |
1637 |
for nname in node_list: |
1638 |
all_os[os_obj.name][nname] = [] |
1639 |
all_os[os_obj.name][node_name].append(os_obj) |
1640 |
return all_os
|
1641 |
|
1642 |
def Exec(self, feedback_fn): |
1643 |
"""Compute the list of OSes.
|
1644 |
|
1645 |
"""
|
1646 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
1647 |
valid_nodes = [node for node in self.cfg.GetOnlineNodeList() |
1648 |
if node in node_list] |
1649 |
node_data = self.rpc.call_os_diagnose(valid_nodes)
|
1650 |
if node_data == False: |
1651 |
raise errors.OpExecError("Can't gather the list of OSes") |
1652 |
pol = self._DiagnoseByOS(valid_nodes, node_data)
|
1653 |
output = [] |
1654 |
for os_name, os_data in pol.iteritems(): |
1655 |
row = [] |
1656 |
for field in self.op.output_fields: |
1657 |
if field == "name": |
1658 |
val = os_name |
1659 |
elif field == "valid": |
1660 |
val = utils.all([osl and osl[0] for osl in os_data.values()]) |
1661 |
elif field == "node_status": |
1662 |
val = {} |
1663 |
for node_name, nos_list in os_data.iteritems(): |
1664 |
val[node_name] = [(v.status, v.path) for v in nos_list] |
1665 |
else:
|
1666 |
raise errors.ParameterError(field)
|
1667 |
row.append(val) |
1668 |
output.append(row) |
1669 |
|
1670 |
return output
|
1671 |
|
1672 |
|
1673 |
class LURemoveNode(LogicalUnit): |
1674 |
"""Logical unit for removing a node.
|
1675 |
|
1676 |
"""
|
1677 |
HPATH = "node-remove"
|
1678 |
HTYPE = constants.HTYPE_NODE |
1679 |
_OP_REQP = ["node_name"]
|
1680 |
|
1681 |
def BuildHooksEnv(self): |
1682 |
"""Build hooks env.
|
1683 |
|
1684 |
This doesn't run on the target node in the pre phase as a failed
|
1685 |
node would then be impossible to remove.
|
1686 |
|
1687 |
"""
|
1688 |
env = { |
1689 |
"OP_TARGET": self.op.node_name, |
1690 |
"NODE_NAME": self.op.node_name, |
1691 |
} |
1692 |
all_nodes = self.cfg.GetNodeList()
|
1693 |
all_nodes.remove(self.op.node_name)
|
1694 |
return env, all_nodes, all_nodes
|
1695 |
|
1696 |
def CheckPrereq(self): |
1697 |
"""Check prerequisites.
|
1698 |
|
1699 |
This checks:
|
1700 |
- the node exists in the configuration
|
1701 |
- it does not have primary or secondary instances
|
1702 |
- it's not the master
|
1703 |
|
1704 |
Any errors are signalled by raising errors.OpPrereqError.
|
1705 |
|
1706 |
"""
|
1707 |
node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) |
1708 |
if node is None: |
1709 |
raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name) |
1710 |
|
1711 |
instance_list = self.cfg.GetInstanceList()
|
1712 |
|
1713 |
masternode = self.cfg.GetMasterNode()
|
1714 |
if node.name == masternode:
|
1715 |
raise errors.OpPrereqError("Node is the master node," |
1716 |
" you need to failover first.")
|
1717 |
|
1718 |
for instance_name in instance_list: |
1719 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
1720 |
if node.name in instance.all_nodes: |
1721 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
1722 |
" please remove first." % instance_name)
|
1723 |
self.op.node_name = node.name
|
1724 |
self.node = node
|
1725 |
|
1726 |
def Exec(self, feedback_fn): |
1727 |
"""Removes the node from the cluster.
|
1728 |
|
1729 |
"""
|
1730 |
node = self.node
|
1731 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
1732 |
node.name) |
1733 |
|
1734 |
self.context.RemoveNode(node.name)
|
1735 |
|
1736 |
self.rpc.call_node_leave_cluster(node.name)
|
1737 |
|
1738 |
# Promote nodes to master candidate as needed
|
1739 |
_AdjustCandidatePool(self)
|
1740 |
|
1741 |
|
1742 |
class LUQueryNodes(NoHooksLU): |
1743 |
"""Logical unit for querying nodes.
|
1744 |
|
1745 |
"""
|
1746 |
_OP_REQP = ["output_fields", "names"] |
1747 |
REQ_BGL = False
|
1748 |
_FIELDS_DYNAMIC = utils.FieldSet( |
1749 |
"dtotal", "dfree", |
1750 |
"mtotal", "mnode", "mfree", |
1751 |
"bootid",
|
1752 |
"ctotal",
|
1753 |
) |
1754 |
|
1755 |
_FIELDS_STATIC = utils.FieldSet( |
1756 |
"name", "pinst_cnt", "sinst_cnt", |
1757 |
"pinst_list", "sinst_list", |
1758 |
"pip", "sip", "tags", |
1759 |
"serial_no",
|
1760 |
"master_candidate",
|
1761 |
"master",
|
1762 |
"offline",
|
1763 |
) |
1764 |
|
1765 |
def ExpandNames(self): |
1766 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1767 |
dynamic=self._FIELDS_DYNAMIC,
|
1768 |
selected=self.op.output_fields)
|
1769 |
|
1770 |
self.needed_locks = {}
|
1771 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1772 |
|
1773 |
if self.op.names: |
1774 |
self.wanted = _GetWantedNodes(self, self.op.names) |
1775 |
else:
|
1776 |
self.wanted = locking.ALL_SET
|
1777 |
|
1778 |
self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
1779 |
if self.do_locking: |
1780 |
# if we don't request only static fields, we need to lock the nodes
|
1781 |
self.needed_locks[locking.LEVEL_NODE] = self.wanted |
1782 |
|
1783 |
|
1784 |
def CheckPrereq(self): |
1785 |
"""Check prerequisites.
|
1786 |
|
1787 |
"""
|
1788 |
# The validation of the node list is done in the _GetWantedNodes,
|
1789 |
# if non empty, and if empty, there's no validation to do
|
1790 |
pass
|
1791 |
|
1792 |
def Exec(self, feedback_fn): |
1793 |
"""Computes the list of nodes and their attributes.
|
1794 |
|
1795 |
"""
|
1796 |
all_info = self.cfg.GetAllNodesInfo()
|
1797 |
if self.do_locking: |
1798 |
nodenames = self.acquired_locks[locking.LEVEL_NODE]
|
1799 |
elif self.wanted != locking.ALL_SET: |
1800 |
nodenames = self.wanted
|
1801 |
missing = set(nodenames).difference(all_info.keys())
|
1802 |
if missing:
|
1803 |
raise errors.OpExecError(
|
1804 |
"Some nodes were removed before retrieving their data: %s" % missing)
|
1805 |
else:
|
1806 |
nodenames = all_info.keys() |
1807 |
|
1808 |
nodenames = utils.NiceSort(nodenames) |
1809 |
nodelist = [all_info[name] for name in nodenames] |
1810 |
|
1811 |
# begin data gathering
|
1812 |
|
1813 |
if self.do_locking: |
1814 |
live_data = {} |
1815 |
node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
1816 |
self.cfg.GetHypervisorType())
|
1817 |
for name in nodenames: |
1818 |
nodeinfo = node_data[name] |
1819 |
if not nodeinfo.failed and nodeinfo.data: |
1820 |
nodeinfo = nodeinfo.data |
1821 |
fn = utils.TryConvert |
1822 |
live_data[name] = { |
1823 |
"mtotal": fn(int, nodeinfo.get('memory_total', None)), |
1824 |
"mnode": fn(int, nodeinfo.get('memory_dom0', None)), |
1825 |
"mfree": fn(int, nodeinfo.get('memory_free', None)), |
1826 |
"dtotal": fn(int, nodeinfo.get('vg_size', None)), |
1827 |
"dfree": fn(int, nodeinfo.get('vg_free', None)), |
1828 |
"ctotal": fn(int, nodeinfo.get('cpu_total', None)), |
1829 |
"bootid": nodeinfo.get('bootid', None), |
1830 |
} |
1831 |
else:
|
1832 |
live_data[name] = {} |
1833 |
else:
|
1834 |
live_data = dict.fromkeys(nodenames, {})
|
1835 |
|
1836 |
node_to_primary = dict([(name, set()) for name in nodenames]) |
1837 |
node_to_secondary = dict([(name, set()) for name in nodenames]) |
1838 |
|
1839 |
inst_fields = frozenset(("pinst_cnt", "pinst_list", |
1840 |
"sinst_cnt", "sinst_list")) |
1841 |
if inst_fields & frozenset(self.op.output_fields): |
1842 |
instancelist = self.cfg.GetInstanceList()
|
1843 |
|
1844 |
for instance_name in instancelist: |
1845 |
inst = self.cfg.GetInstanceInfo(instance_name)
|
1846 |
if inst.primary_node in node_to_primary: |
1847 |
node_to_primary[inst.primary_node].add(inst.name) |
1848 |
for secnode in inst.secondary_nodes: |
1849 |
if secnode in node_to_secondary: |
1850 |
node_to_secondary[secnode].add(inst.name) |
1851 |
|
1852 |
master_node = self.cfg.GetMasterNode()
|
1853 |
|
1854 |
# end data gathering
|
1855 |
|
1856 |
output = [] |
1857 |
for node in nodelist: |
1858 |
node_output = [] |
1859 |
for field in self.op.output_fields: |
1860 |
if field == "name": |
1861 |
val = node.name |
1862 |
elif field == "pinst_list": |
1863 |
val = list(node_to_primary[node.name])
|
1864 |
elif field == "sinst_list": |
1865 |
val = list(node_to_secondary[node.name])
|
1866 |
elif field == "pinst_cnt": |
1867 |
val = len(node_to_primary[node.name])
|
1868 |
elif field == "sinst_cnt": |
1869 |
val = len(node_to_secondary[node.name])
|
1870 |
elif field == "pip": |
1871 |
val = node.primary_ip |
1872 |
elif field == "sip": |
1873 |
val = node.secondary_ip |
1874 |
elif field == "tags": |
1875 |
val = list(node.GetTags())
|
1876 |
elif field == "serial_no": |
1877 |
val = node.serial_no |
1878 |
elif field == "master_candidate": |
1879 |
val = node.master_candidate |
1880 |
elif field == "master": |
1881 |
val = node.name == master_node |
1882 |
elif field == "offline": |
1883 |
val = node.offline |
1884 |
elif self._FIELDS_DYNAMIC.Matches(field): |
1885 |
val = live_data[node.name].get(field, None)
|
1886 |
else:
|
1887 |
raise errors.ParameterError(field)
|
1888 |
node_output.append(val) |
1889 |
output.append(node_output) |
1890 |
|
1891 |
return output
|
1892 |
|
1893 |
|
1894 |
class LUQueryNodeVolumes(NoHooksLU): |
1895 |
"""Logical unit for getting volumes on node(s).
|
1896 |
|
1897 |
"""
|
1898 |
_OP_REQP = ["nodes", "output_fields"] |
1899 |
REQ_BGL = False
|
1900 |
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance") |
1901 |
_FIELDS_STATIC = utils.FieldSet("node")
|
1902 |
|
1903 |
def ExpandNames(self): |
1904 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1905 |
dynamic=self._FIELDS_DYNAMIC,
|
1906 |
selected=self.op.output_fields)
|
1907 |
|
1908 |
self.needed_locks = {}
|
1909 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1910 |
if not self.op.nodes: |
1911 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1912 |
else:
|
1913 |
self.needed_locks[locking.LEVEL_NODE] = \
|
1914 |
_GetWantedNodes(self, self.op.nodes) |
1915 |
|
1916 |
def CheckPrereq(self): |
1917 |
"""Check prerequisites.
|
1918 |
|
1919 |
This checks that the fields required are valid output fields.
|
1920 |
|
1921 |
"""
|
1922 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
1923 |
|
1924 |
def Exec(self, feedback_fn): |
1925 |
"""Computes the list of nodes and their attributes.
|
1926 |
|
1927 |
"""
|
1928 |
nodenames = self.nodes
|
1929 |
volumes = self.rpc.call_node_volumes(nodenames)
|
1930 |
|
1931 |
ilist = [self.cfg.GetInstanceInfo(iname) for iname |
1932 |
in self.cfg.GetInstanceList()] |
1933 |
|
1934 |
lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist]) |
1935 |
|
1936 |
output = [] |
1937 |
for node in nodenames: |
1938 |
if node not in volumes or volumes[node].failed or not volumes[node].data: |
1939 |
continue
|
1940 |
|
1941 |
node_vols = volumes[node].data[:] |
1942 |
node_vols.sort(key=lambda vol: vol['dev']) |
1943 |
|
1944 |
for vol in node_vols: |
1945 |
node_output = [] |
1946 |
for field in self.op.output_fields: |
1947 |
if field == "node": |
1948 |
val = node |
1949 |
elif field == "phys": |
1950 |
val = vol['dev']
|
1951 |
elif field == "vg": |
1952 |
val = vol['vg']
|
1953 |
elif field == "name": |
1954 |
val = vol['name']
|
1955 |
elif field == "size": |
1956 |
val = int(float(vol['size'])) |
1957 |
elif field == "instance": |
1958 |
for inst in ilist: |
1959 |
if node not in lv_by_node[inst]: |
1960 |
continue
|
1961 |
if vol['name'] in lv_by_node[inst][node]: |
1962 |
val = inst.name |
1963 |
break
|
1964 |
else:
|
1965 |
val = '-'
|
1966 |
else:
|
1967 |
raise errors.ParameterError(field)
|
1968 |
node_output.append(str(val))
|
1969 |
|
1970 |
output.append(node_output) |
1971 |
|
1972 |
return output
|
1973 |
|
1974 |
|
1975 |
class LUAddNode(LogicalUnit): |
1976 |
"""Logical unit for adding node to the cluster.
|
1977 |
|
1978 |
"""
|
1979 |
HPATH = "node-add"
|
1980 |
HTYPE = constants.HTYPE_NODE |
1981 |
_OP_REQP = ["node_name"]
|
1982 |
|
1983 |
def BuildHooksEnv(self): |
1984 |
"""Build hooks env.
|
1985 |
|
1986 |
This will run on all nodes before, and on all nodes + the new node after.
|
1987 |
|
1988 |
"""
|
1989 |
env = { |
1990 |
"OP_TARGET": self.op.node_name, |
1991 |
"NODE_NAME": self.op.node_name, |
1992 |
"NODE_PIP": self.op.primary_ip, |
1993 |
"NODE_SIP": self.op.secondary_ip, |
1994 |
} |
1995 |
nodes_0 = self.cfg.GetNodeList()
|
1996 |
nodes_1 = nodes_0 + [self.op.node_name, ]
|
1997 |
return env, nodes_0, nodes_1
|
1998 |
|
1999 |
def CheckPrereq(self): |
2000 |
"""Check prerequisites.
|
2001 |
|
2002 |
This checks:
|
2003 |
- the new node is not already in the config
|
2004 |
- it is resolvable
|
2005 |
- its parameters (single/dual homed) matches the cluster
|
2006 |
|
2007 |
Any errors are signalled by raising errors.OpPrereqError.
|
2008 |
|
2009 |
"""
|
2010 |
node_name = self.op.node_name
|
2011 |
cfg = self.cfg
|
2012 |
|
2013 |
dns_data = utils.HostInfo(node_name) |
2014 |
|
2015 |
node = dns_data.name |
2016 |
primary_ip = self.op.primary_ip = dns_data.ip
|
2017 |
secondary_ip = getattr(self.op, "secondary_ip", None) |
2018 |
if secondary_ip is None: |
2019 |
secondary_ip = primary_ip |
2020 |
if not utils.IsValidIP(secondary_ip): |
2021 |
raise errors.OpPrereqError("Invalid secondary IP given") |
2022 |
self.op.secondary_ip = secondary_ip
|
2023 |
|
2024 |
node_list = cfg.GetNodeList() |
2025 |
if not self.op.readd and node in node_list: |
2026 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
2027 |
node) |
2028 |
elif self.op.readd and node not in node_list: |
2029 |
raise errors.OpPrereqError("Node %s is not in the configuration" % node) |
2030 |
|
2031 |
for existing_node_name in node_list: |
2032 |
existing_node = cfg.GetNodeInfo(existing_node_name) |
2033 |
|
2034 |
if self.op.readd and node == existing_node_name: |
2035 |
if (existing_node.primary_ip != primary_ip or |
2036 |
existing_node.secondary_ip != secondary_ip): |
2037 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
2038 |
" address configuration as before")
|
2039 |
continue
|
2040 |
|
2041 |
if (existing_node.primary_ip == primary_ip or |
2042 |
existing_node.secondary_ip == primary_ip or
|
2043 |
existing_node.primary_ip == secondary_ip or
|
2044 |
existing_node.secondary_ip == secondary_ip): |
2045 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
2046 |
" existing node %s" % existing_node.name)
|
2047 |
|
2048 |
# check that the type of the node (single versus dual homed) is the
|
2049 |
# same as for the master
|
2050 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
2051 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
2052 |
newbie_singlehomed = secondary_ip == primary_ip |
2053 |
if master_singlehomed != newbie_singlehomed:
|
2054 |
if master_singlehomed:
|
2055 |
raise errors.OpPrereqError("The master has no private ip but the" |
2056 |
" new node has one")
|
2057 |
else:
|
2058 |
raise errors.OpPrereqError("The master has a private ip but the" |
2059 |
" new node doesn't have one")
|
2060 |
|
2061 |
# checks reachablity
|
2062 |
if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
2063 |
raise errors.OpPrereqError("Node not reachable by ping") |
2064 |
|
2065 |
if not newbie_singlehomed: |
2066 |
# check reachability from my secondary ip to newbie's secondary ip
|
2067 |
if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
2068 |
source=myself.secondary_ip): |
2069 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
2070 |
" based ping to noded port")
|
2071 |
|
2072 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
|
2073 |
mc_now, _ = self.cfg.GetMasterCandidateStats()
|
2074 |
master_candidate = mc_now < cp_size |
2075 |
|
2076 |
self.new_node = objects.Node(name=node,
|
2077 |
primary_ip=primary_ip, |
2078 |
secondary_ip=secondary_ip, |
2079 |
master_candidate=master_candidate, |
2080 |
offline=False)
|
2081 |
|
2082 |
def Exec(self, feedback_fn): |
2083 |
"""Adds the new node to the cluster.
|
2084 |
|
2085 |
"""
|
2086 |
new_node = self.new_node
|
2087 |
node = new_node.name |
2088 |
|
2089 |
# check connectivity
|
2090 |
result = self.rpc.call_version([node])[node]
|
2091 |
result.Raise() |
2092 |
if result.data:
|
2093 |
if constants.PROTOCOL_VERSION == result.data:
|
2094 |
logging.info("Communication to node %s fine, sw version %s match",
|
2095 |
node, result.data) |
2096 |
else:
|
2097 |
raise errors.OpExecError("Version mismatch master version %s," |
2098 |
" node version %s" %
|
2099 |
(constants.PROTOCOL_VERSION, result.data)) |
2100 |
else:
|
2101 |
raise errors.OpExecError("Cannot get version from the new node") |
2102 |
|
2103 |
# setup ssh on node
|
2104 |
logging.info("Copy ssh key to node %s", node)
|
2105 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
2106 |
keyarray = [] |
2107 |
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, |
2108 |
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, |
2109 |
priv_key, pub_key] |
2110 |
|
2111 |
for i in keyfiles: |
2112 |
f = open(i, 'r') |
2113 |
try:
|
2114 |
keyarray.append(f.read()) |
2115 |
finally:
|
2116 |
f.close() |
2117 |
|
2118 |
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1], |
2119 |
keyarray[2],
|
2120 |
keyarray[3], keyarray[4], keyarray[5]) |
2121 |
|
2122 |
if result.failed or not result.data: |
2123 |
raise errors.OpExecError("Cannot transfer ssh keys to the new node") |
2124 |
|
2125 |
# Add node to our /etc/hosts, and add key to known_hosts
|
2126 |
utils.AddHostToEtcHosts(new_node.name) |
2127 |
|
2128 |
if new_node.secondary_ip != new_node.primary_ip:
|
2129 |
result = self.rpc.call_node_has_ip_address(new_node.name,
|
2130 |
new_node.secondary_ip) |
2131 |
if result.failed or not result.data: |
2132 |
raise errors.OpExecError("Node claims it doesn't have the secondary ip" |
2133 |
" you gave (%s). Please fix and re-run this"
|
2134 |
" command." % new_node.secondary_ip)
|
2135 |
|
2136 |
node_verify_list = [self.cfg.GetMasterNode()]
|
2137 |
node_verify_param = { |
2138 |
'nodelist': [node],
|
2139 |
# TODO: do a node-net-test as well?
|
2140 |
} |
2141 |
|
2142 |
result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
|
2143 |
self.cfg.GetClusterName())
|
2144 |
for verifier in node_verify_list: |
2145 |
if result[verifier].failed or not result[verifier].data: |
2146 |
raise errors.OpExecError("Cannot communicate with %s's node daemon" |
2147 |
" for remote verification" % verifier)
|
2148 |
if result[verifier].data['nodelist']: |
2149 |
for failed in result[verifier].data['nodelist']: |
2150 |
feedback_fn("ssh/hostname verification failed %s -> %s" %
|
2151 |
(verifier, result[verifier]['nodelist'][failed]))
|
2152 |
raise errors.OpExecError("ssh/hostname verification failed.") |
2153 |
|
2154 |
# Distribute updated /etc/hosts and known_hosts to all nodes,
|
2155 |
# including the node just added
|
2156 |
myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode()) |
2157 |
dist_nodes = self.cfg.GetNodeList()
|
2158 |
if not self.op.readd: |
2159 |
dist_nodes.append(node) |
2160 |
if myself.name in dist_nodes: |
2161 |
dist_nodes.remove(myself.name) |
2162 |
|
2163 |
logging.debug("Copying hosts and known_hosts to all nodes")
|
2164 |
for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): |
2165 |
result = self.rpc.call_upload_file(dist_nodes, fname)
|
2166 |
for to_node, to_result in result.iteritems(): |
2167 |
if to_result.failed or not to_result.data: |
2168 |
logging.error("Copy of file %s to node %s failed", fname, to_node)
|
2169 |
|
2170 |
to_copy = [] |
2171 |
if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors: |
2172 |
to_copy.append(constants.VNC_PASSWORD_FILE) |
2173 |
for fname in to_copy: |
2174 |
result = self.rpc.call_upload_file([node], fname)
|
2175 |
if result[node].failed or not result[node]: |
2176 |
logging.error("Could not copy file %s to node %s", fname, node)
|
2177 |
|
2178 |
if self.op.readd: |
2179 |
self.context.ReaddNode(new_node)
|
2180 |
else:
|
2181 |
self.context.AddNode(new_node)
|
2182 |
|
2183 |
|
2184 |
class LUSetNodeParams(LogicalUnit): |
2185 |
"""Modifies the parameters of a node.
|
2186 |
|
2187 |
"""
|
2188 |
HPATH = "node-modify"
|
2189 |
HTYPE = constants.HTYPE_NODE |
2190 |
_OP_REQP = ["node_name"]
|
2191 |
REQ_BGL = False
|
2192 |
|
2193 |
def CheckArguments(self): |
2194 |
node_name = self.cfg.ExpandNodeName(self.op.node_name) |
2195 |
if node_name is None: |
2196 |
raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name) |
2197 |
self.op.node_name = node_name
|
2198 |
_CheckBooleanOpField(self.op, 'master_candidate') |
2199 |
_CheckBooleanOpField(self.op, 'offline') |
2200 |
if self.op.master_candidate is None and self.op.offline is None: |
2201 |
raise errors.OpPrereqError("Please pass at least one modification") |
2202 |
if self.op.offline == True and self.op.master_candidate == True: |
2203 |
raise errors.OpPrereqError("Can't set the node into offline and" |
2204 |
" master_candidate at the same time")
|
2205 |
|
2206 |
def ExpandNames(self): |
2207 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
2208 |
|
2209 |
def BuildHooksEnv(self): |
2210 |
"""Build hooks env.
|
2211 |
|
2212 |
This runs on the master node.
|
2213 |
|
2214 |
"""
|
2215 |
env = { |
2216 |
"OP_TARGET": self.op.node_name, |
2217 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
2218 |
"OFFLINE": str(self.op.offline), |
2219 |
} |
2220 |
nl = [self.cfg.GetMasterNode(),
|
2221 |
self.op.node_name]
|
2222 |
return env, nl, nl
|
2223 |
|
2224 |
def CheckPrereq(self): |
2225 |
"""Check prerequisites.
|
2226 |
|
2227 |
This only checks the instance list against the existing names.
|
2228 |
|
2229 |
"""
|
2230 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
2231 |
|
2232 |
if ((self.op.master_candidate == False or self.op.offline == True) |
2233 |
and node.master_candidate):
|
2234 |
# we will demote the node from master_candidate
|
2235 |
if self.op.node_name == self.cfg.GetMasterNode(): |
2236 |
raise errors.OpPrereqError("The master node has to be a" |
2237 |
" master candidate and online")
|
2238 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
|
2239 |
num_candidates, _ = self.cfg.GetMasterCandidateStats()
|
2240 |
if num_candidates <= cp_size:
|
2241 |
msg = ("Not enough master candidates (desired"
|
2242 |
" %d, new value will be %d)" % (cp_size, num_candidates-1)) |
2243 |
if self.op.force: |
2244 |
self.LogWarning(msg)
|
2245 |
else:
|
2246 |
raise errors.OpPrereqError(msg)
|
2247 |
|
2248 |
if (self.op.master_candidate == True and node.offline and |
2249 |
not self.op.offline == False): |
2250 |
raise errors.OpPrereqError("Can't set an offline node to" |
2251 |
" master_candidate")
|
2252 |
|
2253 |
return
|
2254 |
|
2255 |
def Exec(self, feedback_fn): |
2256 |
"""Modifies a node.
|
2257 |
|
2258 |
"""
|
2259 |
node = self.node
|
2260 |
|
2261 |
result = [] |
2262 |
|
2263 |
if self.op.offline is not None: |
2264 |
node.offline = self.op.offline
|
2265 |
result.append(("offline", str(self.op.offline))) |
2266 |
if self.op.offline == True and node.master_candidate: |
2267 |
node.master_candidate = False
|
2268 |
result.append(("master_candidate", "auto-demotion due to offline")) |
2269 |
|
2270 |
if self.op.master_candidate is not None: |
2271 |
node.master_candidate = self.op.master_candidate
|
2272 |
result.append(("master_candidate", str(self.op.master_candidate))) |
2273 |
if self.op.master_candidate == False: |
2274 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
2275 |
if (rrc.failed or not isinstance(rrc.data, (tuple, list)) |
2276 |
or len(rrc.data) != 2): |
2277 |
self.LogWarning("Node rpc error: %s" % rrc.error) |
2278 |
elif not rrc.data[0]: |
2279 |
self.LogWarning("Node failed to demote itself: %s" % rrc.data[1]) |
2280 |
|
2281 |
# this will trigger configuration file update, if needed
|
2282 |
self.cfg.Update(node)
|
2283 |
# this will trigger job queue propagation or cleanup
|
2284 |
if self.op.node_name != self.cfg.GetMasterNode(): |
2285 |
self.context.ReaddNode(node)
|
2286 |
|
2287 |
return result
|
2288 |
|
2289 |
|
2290 |
class LUQueryClusterInfo(NoHooksLU): |
2291 |
"""Query cluster configuration.
|
2292 |
|
2293 |
"""
|
2294 |
_OP_REQP = [] |
2295 |
REQ_BGL = False
|
2296 |
|
2297 |
def ExpandNames(self): |
2298 |
self.needed_locks = {}
|
2299 |
|
2300 |
def CheckPrereq(self): |
2301 |
"""No prerequsites needed for this LU.
|
2302 |
|
2303 |
"""
|
2304 |
pass
|
2305 |
|
2306 |
def Exec(self, feedback_fn): |
2307 |
"""Return cluster config.
|
2308 |
|
2309 |
"""
|
2310 |
cluster = self.cfg.GetClusterInfo()
|
2311 |
result = { |
2312 |
"software_version": constants.RELEASE_VERSION,
|
2313 |
"protocol_version": constants.PROTOCOL_VERSION,
|
2314 |
"config_version": constants.CONFIG_VERSION,
|
2315 |
"os_api_version": constants.OS_API_VERSION,
|
2316 |
"export_version": constants.EXPORT_VERSION,
|
2317 |
"architecture": (platform.architecture()[0], platform.machine()), |
2318 |
"name": cluster.cluster_name,
|
2319 |
"master": cluster.master_node,
|
2320 |
"default_hypervisor": cluster.default_hypervisor,
|
2321 |
"enabled_hypervisors": cluster.enabled_hypervisors,
|
2322 |
"hvparams": cluster.hvparams,
|
2323 |
"beparams": cluster.beparams,
|
2324 |
"candidate_pool_size": cluster.candidate_pool_size,
|
2325 |
} |
2326 |
|
2327 |
return result
|
2328 |
|
2329 |
|
2330 |
class LUQueryConfigValues(NoHooksLU): |
2331 |
"""Return configuration values.
|
2332 |
|
2333 |
"""
|
2334 |
_OP_REQP = [] |
2335 |
REQ_BGL = False
|
2336 |
_FIELDS_DYNAMIC = utils.FieldSet() |
2337 |
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag") |
2338 |
|
2339 |
def ExpandNames(self): |
2340 |
self.needed_locks = {}
|
2341 |
|
2342 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2343 |
dynamic=self._FIELDS_DYNAMIC,
|
2344 |
selected=self.op.output_fields)
|
2345 |
|
2346 |
def CheckPrereq(self): |
2347 |
"""No prerequisites.
|
2348 |
|
2349 |
"""
|
2350 |
pass
|
2351 |
|
2352 |
def Exec(self, feedback_fn): |
2353 |
"""Dump a representation of the cluster config to the standard output.
|
2354 |
|
2355 |
"""
|
2356 |
values = [] |
2357 |
for field in self.op.output_fields: |
2358 |
if field == "cluster_name": |
2359 |
entry = self.cfg.GetClusterName()
|
2360 |
elif field == "master_node": |
2361 |
entry = self.cfg.GetMasterNode()
|
2362 |
elif field == "drain_flag": |
2363 |
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE) |
2364 |
else:
|
2365 |
raise errors.ParameterError(field)
|
2366 |
values.append(entry) |
2367 |
return values
|
2368 |
|
2369 |
|
2370 |
class LUActivateInstanceDisks(NoHooksLU): |
2371 |
"""Bring up an instance's disks.
|
2372 |
|
2373 |
"""
|
2374 |
_OP_REQP = ["instance_name"]
|
2375 |
REQ_BGL = False
|
2376 |
|
2377 |
def ExpandNames(self): |
2378 |
self._ExpandAndLockInstance()
|
2379 |
self.needed_locks[locking.LEVEL_NODE] = []
|
2380 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2381 |
|
2382 |
def DeclareLocks(self, level): |
2383 |
if level == locking.LEVEL_NODE:
|
2384 |
self._LockInstancesNodes()
|
2385 |
|
2386 |
def CheckPrereq(self): |
2387 |
"""Check prerequisites.
|
2388 |
|
2389 |
This checks that the instance is in the cluster.
|
2390 |
|
2391 |
"""
|
2392 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2393 |
assert self.instance is not None, \ |
2394 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2395 |
_CheckNodeOnline(self, self.instance.primary_node) |
2396 |
|
2397 |
def Exec(self, feedback_fn): |
2398 |
"""Activate the disks.
|
2399 |
|
2400 |
"""
|
2401 |
disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance) |
2402 |
if not disks_ok: |
2403 |
raise errors.OpExecError("Cannot activate block devices") |
2404 |
|
2405 |
return disks_info
|
2406 |
|
2407 |
|
2408 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): |
2409 |
"""Prepare the block devices for an instance.
|
2410 |
|
2411 |
This sets up the block devices on all nodes.
|
2412 |
|
2413 |
@type lu: L{LogicalUnit}
|
2414 |
@param lu: the logical unit on whose behalf we execute
|
2415 |
@type instance: L{objects.Instance}
|
2416 |
@param instance: the instance for whose disks we assemble
|
2417 |
@type ignore_secondaries: boolean
|
2418 |
@param ignore_secondaries: if true, errors on secondary nodes
|
2419 |
won't result in an error return from the function
|
2420 |
@return: False if the operation failed, otherwise a list of
|
2421 |
(host, instance_visible_name, node_visible_name)
|
2422 |
with the mapping from node devices to instance devices
|
2423 |
|
2424 |
"""
|
2425 |
device_info = [] |
2426 |
disks_ok = True
|
2427 |
iname = instance.name |
2428 |
# With the two passes mechanism we try to reduce the window of
|
2429 |
# opportunity for the race condition of switching DRBD to primary
|
2430 |
# before handshaking occured, but we do not eliminate it
|
2431 |
|
2432 |
# The proper fix would be to wait (with some limits) until the
|
2433 |
# connection has been made and drbd transitions from WFConnection
|
2434 |
# into any other network-connected state (Connected, SyncTarget,
|
2435 |
# SyncSource, etc.)
|
2436 |
|
2437 |
# 1st pass, assemble on all nodes in secondary mode
|
2438 |
for inst_disk in instance.disks: |
2439 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
2440 |
lu.cfg.SetDiskID(node_disk, node) |
2441 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
|
2442 |
if result.failed or not result: |
2443 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
2444 |
" (is_primary=False, pass=1)",
|
2445 |
inst_disk.iv_name, node) |
2446 |
if not ignore_secondaries: |
2447 |
disks_ok = False
|
2448 |
|
2449 |
# FIXME: race condition on drbd migration to primary
|
2450 |
|
2451 |
# 2nd pass, do only the primary node
|
2452 |
for inst_disk in instance.disks: |
2453 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
2454 |
if node != instance.primary_node:
|
2455 |
continue
|
2456 |
lu.cfg.SetDiskID(node_disk, node) |
2457 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
|
2458 |
if result.failed or not result: |
2459 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
2460 |
" (is_primary=True, pass=2)",
|
2461 |
inst_disk.iv_name, node) |
2462 |
disks_ok = False
|
2463 |
device_info.append((instance.primary_node, inst_disk.iv_name, result.data)) |
2464 |
|
2465 |
# leave the disks configured for the primary node
|
2466 |
# this is a workaround that would be fixed better by
|
2467 |
# improving the logical/physical id handling
|
2468 |
for disk in instance.disks: |
2469 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
2470 |
|
2471 |
return disks_ok, device_info
|
2472 |
|
2473 |
|
2474 |
def _StartInstanceDisks(lu, instance, force): |
2475 |
"""Start the disks of an instance.
|
2476 |
|
2477 |
"""
|
2478 |
disks_ok, dummy = _AssembleInstanceDisks(lu, instance, |
2479 |
ignore_secondaries=force) |
2480 |
if not disks_ok: |
2481 |
_ShutdownInstanceDisks(lu, instance) |
2482 |
if force is not None and not force: |
2483 |
lu.proc.LogWarning("", hint="If the message above refers to a" |
2484 |
" secondary node,"
|
2485 |
" you can retry the operation using '--force'.")
|
2486 |
raise errors.OpExecError("Disk consistency error") |
2487 |
|
2488 |
|
2489 |
class LUDeactivateInstanceDisks(NoHooksLU): |
2490 |
"""Shutdown an instance's disks.
|
2491 |
|
2492 |
"""
|
2493 |
_OP_REQP = ["instance_name"]
|
2494 |
REQ_BGL = False
|
2495 |
|
2496 |
def ExpandNames(self): |
2497 |
self._ExpandAndLockInstance()
|
2498 |
self.needed_locks[locking.LEVEL_NODE] = []
|
2499 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2500 |
|
2501 |
def DeclareLocks(self, level): |
2502 |
if level == locking.LEVEL_NODE:
|
2503 |
self._LockInstancesNodes()
|
2504 |
|
2505 |
def CheckPrereq(self): |
2506 |
"""Check prerequisites.
|
2507 |
|
2508 |
This checks that the instance is in the cluster.
|
2509 |
|
2510 |
"""
|
2511 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2512 |
assert self.instance is not None, \ |
2513 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2514 |
|
2515 |
def Exec(self, feedback_fn): |
2516 |
"""Deactivate the disks
|
2517 |
|
2518 |
"""
|
2519 |
instance = self.instance
|
2520 |
_SafeShutdownInstanceDisks(self, instance)
|
2521 |
|
2522 |
|
2523 |
def _SafeShutdownInstanceDisks(lu, instance): |
2524 |
"""Shutdown block devices of an instance.
|
2525 |
|
2526 |
This function checks if an instance is running, before calling
|
2527 |
_ShutdownInstanceDisks.
|
2528 |
|
2529 |
"""
|
2530 |
ins_l = lu.rpc.call_instance_list([instance.primary_node], |
2531 |
[instance.hypervisor]) |
2532 |
ins_l = ins_l[instance.primary_node] |
2533 |
if ins_l.failed or not isinstance(ins_l.data, list): |
2534 |
raise errors.OpExecError("Can't contact node '%s'" % |
2535 |
instance.primary_node) |
2536 |
|
2537 |
if instance.name in ins_l.data: |
2538 |
raise errors.OpExecError("Instance is running, can't shutdown" |
2539 |
" block devices.")
|
2540 |
|
2541 |
_ShutdownInstanceDisks(lu, instance) |
2542 |
|
2543 |
|
2544 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): |
2545 |
"""Shutdown block devices of an instance.
|
2546 |
|
2547 |
This does the shutdown on all nodes of the instance.
|
2548 |
|
2549 |
If the ignore_primary is false, errors on the primary node are
|
2550 |
ignored.
|
2551 |
|
2552 |
"""
|
2553 |
result = True
|
2554 |
for disk in instance.disks: |
2555 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
2556 |
lu.cfg.SetDiskID(top_disk, node) |
2557 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
2558 |
if result.failed or not result.data: |
2559 |
logging.error("Could not shutdown block device %s on node %s",
|
2560 |
disk.iv_name, node) |
2561 |
if not ignore_primary or node != instance.primary_node: |
2562 |
result = False
|
2563 |
return result
|
2564 |
|
2565 |
|
2566 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): |
2567 |
"""Checks if a node has enough free memory.
|
2568 |
|
2569 |
This function check if a given node has the needed amount of free
|
2570 |
memory. In case the node has less memory or we cannot get the
|
2571 |
information from the node, this function raise an OpPrereqError
|
2572 |
exception.
|
2573 |
|
2574 |
@type lu: C{LogicalUnit}
|
2575 |
@param lu: a logical unit from which we get configuration data
|
2576 |
@type node: C{str}
|
2577 |
@param node: the node to check
|
2578 |
@type reason: C{str}
|
2579 |
@param reason: string to use in the error message
|
2580 |
@type requested: C{int}
|
2581 |
@param requested: the amount of memory in MiB to check for
|
2582 |
@type hypervisor_name: C{str}
|
2583 |
@param hypervisor_name: the hypervisor to ask for memory stats
|
2584 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
2585 |
we cannot check the node
|
2586 |
|
2587 |
"""
|
2588 |
nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name) |
2589 |
nodeinfo[node].Raise() |
2590 |
free_mem = nodeinfo[node].data.get('memory_free')
|
2591 |
if not isinstance(free_mem, int): |
2592 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
2593 |
" was '%s'" % (node, free_mem))
|
2594 |
if requested > free_mem:
|
2595 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
2596 |
" needed %s MiB, available %s MiB" %
|
2597 |
(node, reason, requested, free_mem)) |
2598 |
|
2599 |
|
2600 |
class LUStartupInstance(LogicalUnit): |
2601 |
"""Starts an instance.
|
2602 |
|
2603 |
"""
|
2604 |
HPATH = "instance-start"
|
2605 |
HTYPE = constants.HTYPE_INSTANCE |
2606 |
_OP_REQP = ["instance_name", "force"] |
2607 |
REQ_BGL = False
|
2608 |
|
2609 |
def ExpandNames(self): |
2610 |
self._ExpandAndLockInstance()
|
2611 |
|
2612 |
def BuildHooksEnv(self): |
2613 |
"""Build hooks env.
|
2614 |
|
2615 |
This runs on master, primary and secondary nodes of the instance.
|
2616 |
|
2617 |
"""
|
2618 |
env = { |
2619 |
"FORCE": self.op.force, |
2620 |
} |
2621 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
2622 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2623 |
return env, nl, nl
|
2624 |
|
2625 |
def CheckPrereq(self): |
2626 |
"""Check prerequisites.
|
2627 |
|
2628 |
This checks that the instance is in the cluster.
|
2629 |
|
2630 |
"""
|
2631 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2632 |
assert self.instance is not None, \ |
2633 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2634 |
|
2635 |
_CheckNodeOnline(self, instance.primary_node)
|
2636 |
|
2637 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
2638 |
# check bridges existance
|
2639 |
_CheckInstanceBridgesExist(self, instance)
|
2640 |
|
2641 |
_CheckNodeFreeMemory(self, instance.primary_node,
|
2642 |
"starting instance %s" % instance.name,
|
2643 |
bep[constants.BE_MEMORY], instance.hypervisor) |
2644 |
|
2645 |
def Exec(self, feedback_fn): |
2646 |
"""Start the instance.
|
2647 |
|
2648 |
"""
|
2649 |
instance = self.instance
|
2650 |
force = self.op.force
|
2651 |
extra_args = getattr(self.op, "extra_args", "") |
2652 |
|
2653 |
self.cfg.MarkInstanceUp(instance.name)
|
2654 |
|
2655 |
node_current = instance.primary_node |
2656 |
|
2657 |
_StartInstanceDisks(self, instance, force)
|
2658 |
|
2659 |
result = self.rpc.call_instance_start(node_current, instance, extra_args)
|
2660 |
msg = result.RemoteFailMsg() |
2661 |
if msg:
|
2662 |
_ShutdownInstanceDisks(self, instance)
|
2663 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
2664 |
|
2665 |
|
2666 |
class LURebootInstance(LogicalUnit): |
2667 |
"""Reboot an instance.
|
2668 |
|
2669 |
"""
|
2670 |
HPATH = "instance-reboot"
|
2671 |
HTYPE = constants.HTYPE_INSTANCE |
2672 |
_OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] |
2673 |
REQ_BGL = False
|
2674 |
|
2675 |
def ExpandNames(self): |
2676 |
if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT, |
2677 |
constants.INSTANCE_REBOOT_HARD, |
2678 |
constants.INSTANCE_REBOOT_FULL]: |
2679 |
raise errors.ParameterError("reboot type not in [%s, %s, %s]" % |
2680 |
(constants.INSTANCE_REBOOT_SOFT, |
2681 |
constants.INSTANCE_REBOOT_HARD, |
2682 |
constants.INSTANCE_REBOOT_FULL)) |
2683 |
self._ExpandAndLockInstance()
|
2684 |
|
2685 |
def BuildHooksEnv(self): |
2686 |
"""Build hooks env.
|
2687 |
|
2688 |
This runs on master, primary and secondary nodes of the instance.
|
2689 |
|
2690 |
"""
|
2691 |
env = { |
2692 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
2693 |
} |
2694 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
2695 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2696 |
return env, nl, nl
|
2697 |
|
2698 |
def CheckPrereq(self): |
2699 |
"""Check prerequisites.
|
2700 |
|
2701 |
This checks that the instance is in the cluster.
|
2702 |
|
2703 |
"""
|
2704 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2705 |
assert self.instance is not None, \ |
2706 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2707 |
|
2708 |
_CheckNodeOnline(self, instance.primary_node)
|
2709 |
|
2710 |
# check bridges existance
|
2711 |
_CheckInstanceBridgesExist(self, instance)
|
2712 |
|
2713 |
def Exec(self, feedback_fn): |
2714 |
"""Reboot the instance.
|
2715 |
|
2716 |
"""
|
2717 |
instance = self.instance
|
2718 |
ignore_secondaries = self.op.ignore_secondaries
|
2719 |
reboot_type = self.op.reboot_type
|
2720 |
extra_args = getattr(self.op, "extra_args", "") |
2721 |
|
2722 |
node_current = instance.primary_node |
2723 |
|
2724 |
if reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
2725 |
constants.INSTANCE_REBOOT_HARD]: |
2726 |
result = self.rpc.call_instance_reboot(node_current, instance,
|
2727 |
reboot_type, extra_args) |
2728 |
if result.failed or not result.data: |
2729 |
raise errors.OpExecError("Could not reboot instance") |
2730 |
else:
|
2731 |
if not self.rpc.call_instance_shutdown(node_current, instance): |
2732 |
raise errors.OpExecError("could not shutdown instance for full reboot") |
2733 |
_ShutdownInstanceDisks(self, instance)
|
2734 |
_StartInstanceDisks(self, instance, ignore_secondaries)
|
2735 |
result = self.rpc.call_instance_start(node_current, instance, extra_args)
|
2736 |
msg = result.RemoteFailMsg() |
2737 |
if msg:
|
2738 |
_ShutdownInstanceDisks(self, instance)
|
2739 |
raise errors.OpExecError("Could not start instance for" |
2740 |
" full reboot: %s" % msg)
|
2741 |
|
2742 |
self.cfg.MarkInstanceUp(instance.name)
|
2743 |
|
2744 |
|
2745 |
class LUShutdownInstance(LogicalUnit): |
2746 |
"""Shutdown an instance.
|
2747 |
|
2748 |
"""
|
2749 |
HPATH = "instance-stop"
|
2750 |
HTYPE = constants.HTYPE_INSTANCE |
2751 |
_OP_REQP = ["instance_name"]
|
2752 |
REQ_BGL = False
|
2753 |
|
2754 |
def ExpandNames(self): |
2755 |
self._ExpandAndLockInstance()
|
2756 |
|
2757 |
def BuildHooksEnv(self): |
2758 |
"""Build hooks env.
|
2759 |
|
2760 |
This runs on master, primary and secondary nodes of the instance.
|
2761 |
|
2762 |
"""
|
2763 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2764 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2765 |
return env, nl, nl
|
2766 |
|
2767 |
def CheckPrereq(self): |
2768 |
"""Check prerequisites.
|
2769 |
|
2770 |
This checks that the instance is in the cluster.
|
2771 |
|
2772 |
"""
|
2773 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2774 |
assert self.instance is not None, \ |
2775 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2776 |
_CheckNodeOnline(self, self.instance.primary_node) |
2777 |
|
2778 |
def Exec(self, feedback_fn): |
2779 |
"""Shutdown the instance.
|
2780 |
|
2781 |
"""
|
2782 |
instance = self.instance
|
2783 |
node_current = instance.primary_node |
2784 |
self.cfg.MarkInstanceDown(instance.name)
|
2785 |
result = self.rpc.call_instance_shutdown(node_current, instance)
|
2786 |
if result.failed or not result.data: |
2787 |
self.proc.LogWarning("Could not shutdown instance") |
2788 |
|
2789 |
_ShutdownInstanceDisks(self, instance)
|
2790 |
|
2791 |
|
2792 |
class LUReinstallInstance(LogicalUnit): |
2793 |
"""Reinstall an instance.
|
2794 |
|
2795 |
"""
|
2796 |
HPATH = "instance-reinstall"
|
2797 |
HTYPE = constants.HTYPE_INSTANCE |
2798 |
_OP_REQP = ["instance_name"]
|
2799 |
REQ_BGL = False
|
2800 |
|
2801 |
def ExpandNames(self): |
2802 |
self._ExpandAndLockInstance()
|
2803 |
|
2804 |
def BuildHooksEnv(self): |
2805 |
"""Build hooks env.
|
2806 |
|
2807 |
This runs on master, primary and secondary nodes of the instance.
|
2808 |
|
2809 |
"""
|
2810 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2811 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2812 |
return env, nl, nl
|
2813 |
|
2814 |
def CheckPrereq(self): |
2815 |
"""Check prerequisites.
|
2816 |
|
2817 |
This checks that the instance is in the cluster and is not running.
|
2818 |
|
2819 |
"""
|
2820 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2821 |
assert instance is not None, \ |
2822 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2823 |
_CheckNodeOnline(self, instance.primary_node)
|
2824 |
|
2825 |
if instance.disk_template == constants.DT_DISKLESS:
|
2826 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
2827 |
self.op.instance_name)
|
2828 |
if instance.admin_up:
|
2829 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2830 |
self.op.instance_name)
|
2831 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
2832 |
instance.name, |
2833 |
instance.hypervisor) |
2834 |
if remote_info.failed or remote_info.data: |
2835 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2836 |
(self.op.instance_name,
|
2837 |
instance.primary_node)) |
2838 |
|
2839 |
self.op.os_type = getattr(self.op, "os_type", None) |
2840 |
if self.op.os_type is not None: |
2841 |
# OS verification
|
2842 |
pnode = self.cfg.GetNodeInfo(
|
2843 |
self.cfg.ExpandNodeName(instance.primary_node))
|
2844 |
if pnode is None: |
2845 |
raise errors.OpPrereqError("Primary node '%s' is unknown" % |
2846 |
self.op.pnode)
|
2847 |
result = self.rpc.call_os_get(pnode.name, self.op.os_type) |
2848 |
result.Raise() |
2849 |
if not isinstance(result.data, objects.OS): |
2850 |
raise errors.OpPrereqError("OS '%s' not in supported OS list for" |
2851 |
" primary node" % self.op.os_type) |
2852 |
|
2853 |
self.instance = instance
|
2854 |
|
2855 |
def Exec(self, feedback_fn): |
2856 |
"""Reinstall the instance.
|
2857 |
|
2858 |
"""
|
2859 |
inst = self.instance
|
2860 |
|
2861 |
if self.op.os_type is not None: |
2862 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
2863 |
inst.os = self.op.os_type
|
2864 |
self.cfg.Update(inst)
|
2865 |
|
2866 |
_StartInstanceDisks(self, inst, None) |
2867 |
try:
|
2868 |
feedback_fn("Running the instance OS create scripts...")
|
2869 |
result = self.rpc.call_instance_os_add(inst.primary_node, inst)
|
2870 |
msg = result.RemoteFailMsg() |
2871 |
if msg:
|
2872 |
raise errors.OpExecError("Could not install OS for instance %s" |
2873 |
" on node %s: %s" %
|
2874 |
(inst.name, inst.primary_node, msg)) |
2875 |
finally:
|
2876 |
_ShutdownInstanceDisks(self, inst)
|
2877 |
|
2878 |
|
2879 |
class LURenameInstance(LogicalUnit): |
2880 |
"""Rename an instance.
|
2881 |
|
2882 |
"""
|
2883 |
HPATH = "instance-rename"
|
2884 |
HTYPE = constants.HTYPE_INSTANCE |
2885 |
_OP_REQP = ["instance_name", "new_name"] |
2886 |
|
2887 |
def BuildHooksEnv(self): |
2888 |
"""Build hooks env.
|
2889 |
|
2890 |
This runs on master, primary and secondary nodes of the instance.
|
2891 |
|
2892 |
"""
|
2893 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2894 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
2895 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2896 |
return env, nl, nl
|
2897 |
|
2898 |
def CheckPrereq(self): |
2899 |
"""Check prerequisites.
|
2900 |
|
2901 |
This checks that the instance is in the cluster and is not running.
|
2902 |
|
2903 |
"""
|
2904 |
instance = self.cfg.GetInstanceInfo(
|
2905 |
self.cfg.ExpandInstanceName(self.op.instance_name)) |
2906 |
if instance is None: |
2907 |
raise errors.OpPrereqError("Instance '%s' not known" % |
2908 |
self.op.instance_name)
|
2909 |
_CheckNodeOnline(self, instance.primary_node)
|
2910 |
|
2911 |
if instance.admin_up:
|
2912 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2913 |
self.op.instance_name)
|
2914 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
2915 |
instance.name, |
2916 |
instance.hypervisor) |
2917 |
remote_info.Raise() |
2918 |
if remote_info.data:
|
2919 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2920 |
(self.op.instance_name,
|
2921 |
instance.primary_node)) |
2922 |
self.instance = instance
|
2923 |
|
2924 |
# new name verification
|
2925 |
name_info = utils.HostInfo(self.op.new_name)
|
2926 |
|
2927 |
self.op.new_name = new_name = name_info.name
|
2928 |
instance_list = self.cfg.GetInstanceList()
|
2929 |
if new_name in instance_list: |
2930 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
2931 |
new_name) |
2932 |
|
2933 |
if not getattr(self.op, "ignore_ip", False): |
2934 |
if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
|
2935 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
2936 |
(name_info.ip, new_name)) |
2937 |
|
2938 |
|
2939 |
def Exec(self, feedback_fn): |
2940 |
"""Reinstall the instance.
|
2941 |
|
2942 |
"""
|
2943 |
inst = self.instance
|
2944 |
old_name = inst.name |
2945 |
|
2946 |
if inst.disk_template == constants.DT_FILE:
|
2947 |
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
2948 |
|
2949 |
self.cfg.RenameInstance(inst.name, self.op.new_name) |
2950 |
# Change the instance lock. This is definitely safe while we hold the BGL
|
2951 |
self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
|
2952 |
self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) |
2953 |
|
2954 |
# re-read the instance from the configuration after rename
|
2955 |
inst = self.cfg.GetInstanceInfo(self.op.new_name) |
2956 |
|
2957 |
if inst.disk_template == constants.DT_FILE:
|
2958 |
new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
2959 |
result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
|
2960 |
old_file_storage_dir, |
2961 |
new_file_storage_dir) |
2962 |
result.Raise() |
2963 |
if not result.data: |
2964 |
raise errors.OpExecError("Could not connect to node '%s' to rename" |
2965 |
" directory '%s' to '%s' (but the instance"
|
2966 |
" has been renamed in Ganeti)" % (
|
2967 |
inst.primary_node, old_file_storage_dir, |
2968 |
new_file_storage_dir)) |
2969 |
|
2970 |
if not result.data[0]: |
2971 |
raise errors.OpExecError("Could not rename directory '%s' to '%s'" |
2972 |
" (but the instance has been renamed in"
|
2973 |
" Ganeti)" % (old_file_storage_dir,
|
2974 |
new_file_storage_dir)) |
2975 |
|
2976 |
_StartInstanceDisks(self, inst, None) |
2977 |
try:
|
2978 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
|
2979 |
old_name) |
2980 |
msg = result.RemoteFailMsg() |
2981 |
if msg:
|
2982 |
msg = ("Could not run OS rename script for instance %s on node %s"
|
2983 |
" (but the instance has been renamed in Ganeti): %s" %
|
2984 |
(inst.name, inst.primary_node, msg)) |
2985 |
self.proc.LogWarning(msg)
|
2986 |
finally:
|
2987 |
_ShutdownInstanceDisks(self, inst)
|
2988 |
|
2989 |
|
2990 |
class LURemoveInstance(LogicalUnit): |
2991 |
"""Remove an instance.
|
2992 |
|
2993 |
"""
|
2994 |
HPATH = "instance-remove"
|
2995 |
HTYPE = constants.HTYPE_INSTANCE |
2996 |
_OP_REQP = ["instance_name", "ignore_failures"] |
2997 |
REQ_BGL = False
|
2998 |
|
2999 |
def ExpandNames(self): |
3000 |
self._ExpandAndLockInstance()
|
3001 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3002 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3003 |
|
3004 |
def DeclareLocks(self, level): |
3005 |
if level == locking.LEVEL_NODE:
|
3006 |
self._LockInstancesNodes()
|
3007 |
|
3008 |
def BuildHooksEnv(self): |
3009 |
"""Build hooks env.
|
3010 |
|
3011 |
This runs on master, primary and secondary nodes of the instance.
|
3012 |
|
3013 |
"""
|
3014 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
3015 |
nl = [self.cfg.GetMasterNode()]
|
3016 |
return env, nl, nl
|
3017 |
|
3018 |
def CheckPrereq(self): |
3019 |
"""Check prerequisites.
|
3020 |
|
3021 |
This checks that the instance is in the cluster.
|
3022 |
|
3023 |
"""
|
3024 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3025 |
assert self.instance is not None, \ |
3026 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3027 |
|
3028 |
def Exec(self, feedback_fn): |
3029 |
"""Remove the instance.
|
3030 |
|
3031 |
"""
|
3032 |
instance = self.instance
|
3033 |
logging.info("Shutting down instance %s on node %s",
|
3034 |
instance.name, instance.primary_node) |
3035 |
|
3036 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
|
3037 |
if result.failed or not result.data: |
3038 |
if self.op.ignore_failures: |
3039 |
feedback_fn("Warning: can't shutdown instance")
|
3040 |
else:
|
3041 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
3042 |
(instance.name, instance.primary_node)) |
3043 |
|
3044 |
logging.info("Removing block devices for instance %s", instance.name)
|
3045 |
|
3046 |
if not _RemoveDisks(self, instance): |
3047 |
if self.op.ignore_failures: |
3048 |
feedback_fn("Warning: can't remove instance's disks")
|
3049 |
else:
|
3050 |
raise errors.OpExecError("Can't remove instance's disks") |
3051 |
|
3052 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
3053 |
|
3054 |
self.cfg.RemoveInstance(instance.name)
|
3055 |
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
|
3056 |
|
3057 |
|
3058 |
class LUQueryInstances(NoHooksLU): |
3059 |
"""Logical unit for querying instances.
|
3060 |
|
3061 |
"""
|
3062 |
_OP_REQP = ["output_fields", "names"] |
3063 |
REQ_BGL = False
|
3064 |
_FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes", |
3065 |
"admin_state", "admin_ram", |
3066 |
"disk_template", "ip", "mac", "bridge", |
3067 |
"sda_size", "sdb_size", "vcpus", "tags", |
3068 |
"network_port", "beparams", |
3069 |
"(disk).(size)/([0-9]+)",
|
3070 |
"(disk).(sizes)",
|
3071 |
"(nic).(mac|ip|bridge)/([0-9]+)",
|
3072 |
"(nic).(macs|ips|bridges)",
|
3073 |
"(disk|nic).(count)",
|
3074 |
"serial_no", "hypervisor", "hvparams",] + |
3075 |
["hv/%s" % name
|
3076 |
for name in constants.HVS_PARAMETERS] + |
3077 |
["be/%s" % name
|
3078 |
for name in constants.BES_PARAMETERS]) |
3079 |
_FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status") |
3080 |
|
3081 |
|
3082 |
def ExpandNames(self): |
3083 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3084 |
dynamic=self._FIELDS_DYNAMIC,
|
3085 |
selected=self.op.output_fields)
|
3086 |
|
3087 |
self.needed_locks = {}
|
3088 |
self.share_locks[locking.LEVEL_INSTANCE] = 1 |
3089 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3090 |
|
3091 |
if self.op.names: |
3092 |
self.wanted = _GetWantedInstances(self, self.op.names) |
3093 |
else:
|
3094 |
self.wanted = locking.ALL_SET
|
3095 |
|
3096 |
self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
3097 |
if self.do_locking: |
3098 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted |
3099 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3100 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3101 |
|
3102 |
def DeclareLocks(self, level): |
3103 |
if level == locking.LEVEL_NODE and self.do_locking: |
3104 |
self._LockInstancesNodes()
|
3105 |
|
3106 |
def CheckPrereq(self): |
3107 |
"""Check prerequisites.
|
3108 |
|
3109 |
"""
|
3110 |
pass
|
3111 |
|
3112 |
def Exec(self, feedback_fn): |
3113 |
"""Computes the list of nodes and their attributes.
|
3114 |
|
3115 |
"""
|
3116 |
all_info = self.cfg.GetAllInstancesInfo()
|
3117 |
if self.wanted == locking.ALL_SET: |
3118 |
# caller didn't specify instance names, so ordering is not important
|
3119 |
if self.do_locking: |
3120 |
instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
|
3121 |
else:
|
3122 |
instance_names = all_info.keys() |
3123 |
instance_names = utils.NiceSort(instance_names) |
3124 |
else:
|
3125 |
# caller did specify names, so we must keep the ordering
|
3126 |
if self.do_locking: |
3127 |
tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
|
3128 |
else:
|
3129 |
tgt_set = all_info.keys() |
3130 |
missing = set(self.wanted).difference(tgt_set) |
3131 |
if missing:
|
3132 |
raise errors.OpExecError("Some instances were removed before" |
3133 |
" retrieving their data: %s" % missing)
|
3134 |
instance_names = self.wanted
|
3135 |
|
3136 |
instance_list = [all_info[iname] for iname in instance_names] |
3137 |
|
3138 |
# begin data gathering
|
3139 |
|
3140 |
nodes = frozenset([inst.primary_node for inst in instance_list]) |
3141 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
3142 |
|
3143 |
bad_nodes = [] |
3144 |
off_nodes = [] |
3145 |
if self.do_locking: |
3146 |
live_data = {} |
3147 |
node_data = self.rpc.call_all_instances_info(nodes, hv_list)
|
3148 |
for name in nodes: |
3149 |
result = node_data[name] |
3150 |
if result.offline:
|
3151 |
# offline nodes will be in both lists
|
3152 |
off_nodes.append(name) |
3153 |
if result.failed:
|
3154 |
bad_nodes.append(name) |
3155 |
else:
|
3156 |
if result.data:
|
3157 |
live_data.update(result.data) |
3158 |
# else no instance is alive
|
3159 |
else:
|
3160 |
live_data = dict([(name, {}) for name in instance_names]) |
3161 |
|
3162 |
# end data gathering
|
3163 |
|
3164 |
HVPREFIX = "hv/"
|
3165 |
BEPREFIX = "be/"
|
3166 |
output = [] |
3167 |
for instance in instance_list: |
3168 |
iout = [] |
3169 |
i_hv = self.cfg.GetClusterInfo().FillHV(instance)
|
3170 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
3171 |
for field in self.op.output_fields: |
3172 |
st_match = self._FIELDS_STATIC.Matches(field)
|
3173 |
if field == "name": |
3174 |
val = instance.name |
3175 |
elif field == "os": |
3176 |
val = instance.os |
3177 |
elif field == "pnode": |
3178 |
val = instance.primary_node |
3179 |
elif field == "snodes": |
3180 |
val = list(instance.secondary_nodes)
|
3181 |
elif field == "admin_state": |
3182 |
val = instance.admin_up |
3183 |
elif field == "oper_state": |
3184 |
if instance.primary_node in bad_nodes: |
3185 |
val = None
|
3186 |
else:
|
3187 |
val = bool(live_data.get(instance.name))
|
3188 |
elif field == "status": |
3189 |
if instance.primary_node in off_nodes: |
3190 |
val = "ERROR_nodeoffline"
|
3191 |
elif instance.primary_node in bad_nodes: |
3192 |
val = "ERROR_nodedown"
|
3193 |
else:
|
3194 |
running = bool(live_data.get(instance.name))
|
3195 |
if running:
|
3196 |
if instance.admin_up:
|
3197 |
val = "running"
|
3198 |
else:
|
3199 |
val = "ERROR_up"
|
3200 |
else:
|
3201 |
if instance.admin_up:
|
3202 |
val = "ERROR_down"
|
3203 |
else:
|
3204 |
val = "ADMIN_down"
|
3205 |
elif field == "oper_ram": |
3206 |
if instance.primary_node in bad_nodes: |
3207 |
val = None
|
3208 |
elif instance.name in live_data: |
3209 |
val = live_data[instance.name].get("memory", "?") |
3210 |
else:
|
3211 |
val = "-"
|
3212 |
elif field == "disk_template": |
3213 |
val = instance.disk_template |
3214 |
elif field == "ip": |
3215 |
val = instance.nics[0].ip
|
3216 |
elif field == "bridge": |
3217 |
val = instance.nics[0].bridge
|
3218 |
elif field == "mac": |
3219 |
val = instance.nics[0].mac
|
3220 |
elif field == "sda_size" or field == "sdb_size": |
3221 |
idx = ord(field[2]) - ord('a') |
3222 |
try:
|
3223 |
val = instance.FindDisk(idx).size |
3224 |
except errors.OpPrereqError:
|
3225 |
val = None
|
3226 |
elif field == "tags": |
3227 |
val = list(instance.GetTags())
|
3228 |
elif field == "serial_no": |
3229 |
val = instance.serial_no |
3230 |
elif field == "network_port": |
3231 |
val = instance.network_port |
3232 |
elif field == "hypervisor": |
3233 |
val = instance.hypervisor |
3234 |
elif field == "hvparams": |
3235 |
val = i_hv |
3236 |
elif (field.startswith(HVPREFIX) and |
3237 |
field[len(HVPREFIX):] in constants.HVS_PARAMETERS): |
3238 |
val = i_hv.get(field[len(HVPREFIX):], None) |
3239 |
elif field == "beparams": |
3240 |
val = i_be |
3241 |
elif (field.startswith(BEPREFIX) and |
3242 |
field[len(BEPREFIX):] in constants.BES_PARAMETERS): |
3243 |
val = i_be.get(field[len(BEPREFIX):], None) |
3244 |
elif st_match and st_match.groups(): |
3245 |
# matches a variable list
|
3246 |
st_groups = st_match.groups() |
3247 |
if st_groups and st_groups[0] == "disk": |
3248 |
if st_groups[1] == "count": |
3249 |
val = len(instance.disks)
|
3250 |
elif st_groups[1] == "sizes": |
3251 |
val = [disk.size for disk in instance.disks] |
3252 |
elif st_groups[1] == "size": |
3253 |
try:
|
3254 |
val = instance.FindDisk(st_groups[2]).size
|
3255 |
except errors.OpPrereqError:
|
3256 |
val = None
|
3257 |
else:
|
3258 |
assert False, "Unhandled disk parameter" |
3259 |
elif st_groups[0] == "nic": |
3260 |
if st_groups[1] == "count": |
3261 |
val = len(instance.nics)
|
3262 |
elif st_groups[1] == "macs": |
3263 |
val = [nic.mac for nic in instance.nics] |
3264 |
elif st_groups[1] == "ips": |
3265 |
val = [nic.ip for nic in instance.nics] |
3266 |
elif st_groups[1] == "bridges": |
3267 |
val = [nic.bridge for nic in instance.nics] |
3268 |
else:
|
3269 |
# index-based item
|
3270 |
nic_idx = int(st_groups[2]) |
3271 |
if nic_idx >= len(instance.nics): |
3272 |
val = None
|
3273 |
else:
|
3274 |
if st_groups[1] == "mac": |
3275 |
val = instance.nics[nic_idx].mac |
3276 |
elif st_groups[1] == "ip": |
3277 |
val = instance.nics[nic_idx].ip |
3278 |
elif st_groups[1] == "bridge": |
3279 |
val = instance.nics[nic_idx].bridge |
3280 |
else:
|
3281 |
assert False, "Unhandled NIC parameter" |
3282 |
else:
|
3283 |
assert False, "Unhandled variable parameter" |
3284 |
else:
|
3285 |
raise errors.ParameterError(field)
|
3286 |
iout.append(val) |
3287 |
output.append(iout) |
3288 |
|
3289 |
return output
|
3290 |
|
3291 |
|
3292 |
class LUFailoverInstance(LogicalUnit): |
3293 |
"""Failover an instance.
|
3294 |
|
3295 |
"""
|
3296 |
HPATH = "instance-failover"
|
3297 |
HTYPE = constants.HTYPE_INSTANCE |
3298 |
_OP_REQP = ["instance_name", "ignore_consistency"] |
3299 |
REQ_BGL = False
|
3300 |
|
3301 |
def ExpandNames(self): |
3302 |
self._ExpandAndLockInstance()
|
3303 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3304 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3305 |
|
3306 |
def DeclareLocks(self, level): |
3307 |
if level == locking.LEVEL_NODE:
|
3308 |
self._LockInstancesNodes()
|
3309 |
|
3310 |
def BuildHooksEnv(self): |
3311 |
"""Build hooks env.
|
3312 |
|
3313 |
This runs on master, primary and secondary nodes of the instance.
|
3314 |
|
3315 |
"""
|
3316 |
env = { |
3317 |
"IGNORE_CONSISTENCY": self.op.ignore_consistency, |
3318 |
} |
3319 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
3320 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) |
3321 |
return env, nl, nl
|
3322 |
|
3323 |
def CheckPrereq(self): |
3324 |
"""Check prerequisites.
|
3325 |
|
3326 |
This checks that the instance is in the cluster.
|
3327 |
|
3328 |
"""
|
3329 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3330 |
assert self.instance is not None, \ |
3331 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3332 |
|
3333 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
3334 |
if instance.disk_template not in constants.DTS_NET_MIRROR: |
3335 |
raise errors.OpPrereqError("Instance's disk layout is not" |
3336 |
" network mirrored, cannot failover.")
|
3337 |
|
3338 |
secondary_nodes = instance.secondary_nodes |
3339 |
if not secondary_nodes: |
3340 |
raise errors.ProgrammerError("no secondary node but using " |
3341 |
"a mirrored disk template")
|
3342 |
|
3343 |
target_node = secondary_nodes[0]
|
3344 |
_CheckNodeOnline(self, target_node)
|
3345 |
# check memory requirements on the secondary node
|
3346 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
3347 |
instance.name, bep[constants.BE_MEMORY], |
3348 |
instance.hypervisor) |
3349 |
|
3350 |
# check bridge existance
|
3351 |
brlist = [nic.bridge for nic in instance.nics] |
3352 |
result = self.rpc.call_bridges_exist(target_node, brlist)
|
3353 |
result.Raise() |
3354 |
if not result.data: |
3355 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
3356 |
" exist on destination node '%s'" %
|
3357 |
(brlist, target_node)) |
3358 |
|
3359 |
def Exec(self, feedback_fn): |
3360 |
"""Failover an instance.
|
3361 |
|
3362 |
The failover is done by shutting it down on its present node and
|
3363 |
starting it on the secondary.
|
3364 |
|
3365 |
"""
|
3366 |
instance = self.instance
|
3367 |
|
3368 |
source_node = instance.primary_node |
3369 |
target_node = instance.secondary_nodes[0]
|
3370 |
|
3371 |
feedback_fn("* checking disk consistency between source and target")
|
3372 |
for dev in instance.disks: |
3373 |
# for drbd, these are drbd over lvm
|
3374 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
3375 |
if instance.admin_up and not self.op.ignore_consistency: |
3376 |
raise errors.OpExecError("Disk %s is degraded on target node," |
3377 |
" aborting failover." % dev.iv_name)
|
3378 |
|
3379 |
feedback_fn("* shutting down instance on source node")
|
3380 |
logging.info("Shutting down instance %s on node %s",
|
3381 |
instance.name, source_node) |
3382 |
|
3383 |
result = self.rpc.call_instance_shutdown(source_node, instance)
|
3384 |
if result.failed or not result.data: |
3385 |
if self.op.ignore_consistency: |
3386 |
self.proc.LogWarning("Could not shutdown instance %s on node %s." |
3387 |
" Proceeding"
|
3388 |
" anyway. Please make sure node %s is down",
|
3389 |
instance.name, source_node, source_node) |
3390 |
else:
|
3391 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
3392 |
(instance.name, source_node)) |
3393 |
|
3394 |
feedback_fn("* deactivating the instance's disks on source node")
|
3395 |
if not _ShutdownInstanceDisks(self, instance, ignore_primary=True): |
3396 |
raise errors.OpExecError("Can't shut down the instance's disks.") |
3397 |
|
3398 |
instance.primary_node = target_node |
3399 |
# distribute new instance config to the other nodes
|
3400 |
self.cfg.Update(instance)
|
3401 |
|
3402 |
# Only start the instance if it's marked as up
|
3403 |
if instance.admin_up:
|
3404 |
feedback_fn("* activating the instance's disks on target node")
|
3405 |
logging.info("Starting instance %s on node %s",
|
3406 |
instance.name, target_node) |
3407 |
|
3408 |
disks_ok, dummy = _AssembleInstanceDisks(self, instance,
|
3409 |
ignore_secondaries=True)
|
3410 |
if not disks_ok: |
3411 |
_ShutdownInstanceDisks(self, instance)
|
3412 |
raise errors.OpExecError("Can't activate the instance's disks") |
3413 |
|
3414 |
feedback_fn("* starting the instance on the target node")
|
3415 |
result = self.rpc.call_instance_start(target_node, instance, None) |
3416 |
msg = result.RemoteFailMsg() |
3417 |
if msg:
|
3418 |
_ShutdownInstanceDisks(self, instance)
|
3419 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
3420 |
(instance.name, target_node, msg)) |
3421 |
|
3422 |
|
3423 |
class LUMigrateInstance(LogicalUnit): |
3424 |
"""Migrate an instance.
|
3425 |
|
3426 |
This is migration without shutting down, compared to the failover,
|
3427 |
which is done with shutdown.
|
3428 |
|
3429 |
"""
|
3430 |
HPATH = "instance-migrate"
|
3431 |
HTYPE = constants.HTYPE_INSTANCE |
3432 |
_OP_REQP = ["instance_name", "live", "cleanup"] |
3433 |
|
3434 |
REQ_BGL = False
|
3435 |
|
3436 |
def ExpandNames(self): |
3437 |
self._ExpandAndLockInstance()
|
3438 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3439 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3440 |
|
3441 |
def DeclareLocks(self, level): |
3442 |
if level == locking.LEVEL_NODE:
|
3443 |
self._LockInstancesNodes()
|
3444 |
|
3445 |
def BuildHooksEnv(self): |
3446 |
"""Build hooks env.
|
3447 |
|
3448 |
This runs on master, primary and secondary nodes of the instance.
|
3449 |
|
3450 |
"""
|
3451 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
3452 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) |
3453 |
return env, nl, nl
|
3454 |
|
3455 |
def CheckPrereq(self): |
3456 |
"""Check prerequisites.
|
3457 |
|
3458 |
This checks that the instance is in the cluster.
|
3459 |
|
3460 |
"""
|
3461 |
instance = self.cfg.GetInstanceInfo(
|
3462 |
self.cfg.ExpandInstanceName(self.op.instance_name)) |
3463 |
if instance is None: |
3464 |
raise errors.OpPrereqError("Instance '%s' not known" % |
3465 |
self.op.instance_name)
|
3466 |
|
3467 |
if instance.disk_template != constants.DT_DRBD8:
|
3468 |
raise errors.OpPrereqError("Instance's disk layout is not" |
3469 |
" drbd8, cannot migrate.")
|
3470 |
|
3471 |
secondary_nodes = instance.secondary_nodes |
3472 |
if not secondary_nodes: |
3473 |
raise errors.ProgrammerError("no secondary node but using " |
3474 |
"drbd8 disk template")
|
3475 |
|
3476 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
3477 |
|
3478 |
target_node = secondary_nodes[0]
|
3479 |
# check memory requirements on the secondary node
|
3480 |
_CheckNodeFreeMemory(self, target_node, "migrating instance %s" % |
3481 |
instance.name, i_be[constants.BE_MEMORY], |
3482 |
instance.hypervisor) |
3483 |
|
3484 |
# check bridge existance
|
3485 |
brlist = [nic.bridge for nic in instance.nics] |
3486 |
result = self.rpc.call_bridges_exist(target_node, brlist)
|
3487 |
if result.failed or not result.data: |
3488 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
3489 |
" exist on destination node '%s'" %
|
3490 |
(brlist, target_node)) |
3491 |
|
3492 |
if not self.op.cleanup: |
3493 |
result = self.rpc.call_instance_migratable(instance.primary_node,
|
3494 |
instance) |
3495 |
msg = result.RemoteFailMsg() |
3496 |
if msg:
|
3497 |
raise errors.OpPrereqError("Can't migrate: %s - please use failover" % |
3498 |
msg) |
3499 |
|
3500 |
self.instance = instance
|
3501 |
|
3502 |
def _WaitUntilSync(self): |
3503 |
"""Poll with custom rpc for disk sync.
|
3504 |
|
3505 |
This uses our own step-based rpc call.
|
3506 |
|
3507 |
"""
|
3508 |
self.feedback_fn("* wait until resync is done") |
3509 |
all_done = False
|
3510 |
while not all_done: |
3511 |
all_done = True
|
3512 |
result = self.rpc.call_drbd_wait_sync(self.all_nodes, |
3513 |
self.nodes_ip,
|
3514 |
self.instance.disks)
|
3515 |
min_percent = 100
|
3516 |
for node, nres in result.items(): |
3517 |
msg = nres.RemoteFailMsg() |
3518 |
if msg:
|
3519 |
raise errors.OpExecError("Cannot resync disks on node %s: %s" % |
3520 |
(node, msg)) |
3521 |
node_done, node_percent = nres.data[1]
|
3522 |
all_done = all_done and node_done
|
3523 |
if node_percent is not None: |
3524 |
min_percent = min(min_percent, node_percent)
|
3525 |
if not all_done: |
3526 |
if min_percent < 100: |
3527 |
self.feedback_fn(" - progress: %.1f%%" % min_percent) |
3528 |
time.sleep(2)
|
3529 |
|
3530 |
def _EnsureSecondary(self, node): |
3531 |
"""Demote a node to secondary.
|
3532 |
|
3533 |
"""
|
3534 |
self.feedback_fn("* switching node %s to secondary mode" % node) |
3535 |
|
3536 |
for dev in self.instance.disks: |
3537 |
self.cfg.SetDiskID(dev, node)
|
3538 |
|
3539 |
result = self.rpc.call_blockdev_close(node, self.instance.name, |
3540 |
self.instance.disks)
|
3541 |
msg = result.RemoteFailMsg() |
3542 |
if msg:
|
3543 |
raise errors.OpExecError("Cannot change disk to secondary on node %s," |
3544 |
" error %s" % (node, msg))
|
3545 |
|
3546 |
def _GoStandalone(self): |
3547 |
"""Disconnect from the network.
|
3548 |
|
3549 |
"""
|
3550 |
self.feedback_fn("* changing into standalone mode") |
3551 |
result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip, |
3552 |
self.instance.disks)
|
3553 |
for node, nres in result.items(): |
3554 |
msg = nres.RemoteFailMsg() |
3555 |
if msg:
|
3556 |
raise errors.OpExecError("Cannot disconnect disks node %s," |
3557 |
" error %s" % (node, msg))
|
3558 |
|
3559 |
def _GoReconnect(self, multimaster): |
3560 |
"""Reconnect to the network.
|
3561 |
|
3562 |
"""
|
3563 |
if multimaster:
|
3564 |
msg = "dual-master"
|
3565 |
else:
|
3566 |
msg = "single-master"
|
3567 |
self.feedback_fn("* changing disks into %s mode" % msg) |
3568 |
result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip, |
3569 |
self.instance.disks,
|
3570 |
self.instance.name, multimaster)
|
3571 |
for node, nres in result.items(): |
3572 |
msg = nres.RemoteFailMsg() |
3573 |
if msg:
|
3574 |
raise errors.OpExecError("Cannot change disks config on node %s," |
3575 |
" error: %s" % (node, msg))
|
3576 |
|
3577 |
def _ExecCleanup(self): |
3578 |
"""Try to cleanup after a failed migration.
|
3579 |
|
3580 |
The cleanup is done by:
|
3581 |
- check that the instance is running only on one node
|
3582 |
(and update the config if needed)
|
3583 |
- change disks on its secondary node to secondary
|
3584 |
- wait until disks are fully synchronized
|
3585 |
- disconnect from the network
|
3586 |
- change disks into single-master mode
|
3587 |
- wait again until disks are fully synchronized
|
3588 |
|
3589 |
"""
|
3590 |
instance = self.instance
|
3591 |
target_node = self.target_node
|
3592 |
source_node = self.source_node
|
3593 |
|
3594 |
# check running on only one node
|
3595 |
self.feedback_fn("* checking where the instance actually runs" |
3596 |
" (if this hangs, the hypervisor might be in"
|
3597 |
" a bad state)")
|
3598 |
ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) |
3599 |
for node, result in ins_l.items(): |
3600 |
result.Raise() |
3601 |
if not isinstance(result.data, list): |
3602 |
raise errors.OpExecError("Can't contact node '%s'" % node) |
3603 |
|
3604 |
runningon_source = instance.name in ins_l[source_node].data
|
3605 |
runningon_target = instance.name in ins_l[target_node].data
|
3606 |
|
3607 |
if runningon_source and runningon_target: |
3608 |
raise errors.OpExecError("Instance seems to be running on two nodes," |
3609 |
" or the hypervisor is confused. You will have"
|
3610 |
" to ensure manually that it runs only on one"
|
3611 |
" and restart this operation.")
|
3612 |
|
3613 |
if not (runningon_source or runningon_target): |
3614 |
raise errors.OpExecError("Instance does not seem to be running at all." |
3615 |
" In this case, it's safer to repair by"
|
3616 |
" running 'gnt-instance stop' to ensure disk"
|
3617 |
" shutdown, and then restarting it.")
|
3618 |
|
3619 |
if runningon_target:
|
3620 |
# the migration has actually succeeded, we need to update the config
|
3621 |
self.feedback_fn("* instance running on secondary node (%s)," |
3622 |
" updating config" % target_node)
|
3623 |
instance.primary_node = target_node |
3624 |
self.cfg.Update(instance)
|
3625 |
demoted_node = source_node |
3626 |
else:
|
3627 |
self.feedback_fn("* instance confirmed to be running on its" |
3628 |
" primary node (%s)" % source_node)
|
3629 |
demoted_node = target_node |
3630 |
|
3631 |
self._EnsureSecondary(demoted_node)
|
3632 |
try:
|
3633 |
self._WaitUntilSync()
|
3634 |
except errors.OpExecError:
|
3635 |
# we ignore here errors, since if the device is standalone, it
|
3636 |
# won't be able to sync
|
3637 |
pass
|
3638 |
self._GoStandalone()
|
3639 |
self._GoReconnect(False) |
3640 |
self._WaitUntilSync()
|
3641 |
|
3642 |
self.feedback_fn("* done") |
3643 |
|
3644 |
def _RevertDiskStatus(self): |
3645 |
"""Try to revert the disk status after a failed migration.
|
3646 |
|
3647 |
"""
|
3648 |
target_node = self.target_node
|
3649 |
try:
|
3650 |
self._EnsureSecondary(target_node)
|
3651 |
self._GoStandalone()
|
3652 |
self._GoReconnect(False) |
3653 |
self._WaitUntilSync()
|
3654 |
except errors.OpExecError, err:
|
3655 |
self.LogWarning("Migration failed and I can't reconnect the" |
3656 |
" drives: error '%s'\n"
|
3657 |
"Please look and recover the instance status" %
|
3658 |
str(err))
|
3659 |
|
3660 |
def _AbortMigration(self): |
3661 |
"""Call the hypervisor code to abort a started migration.
|
3662 |
|
3663 |
"""
|
3664 |
instance = self.instance
|
3665 |
target_node = self.target_node
|
3666 |
migration_info = self.migration_info
|
3667 |
|
3668 |
abort_result = self.rpc.call_finalize_migration(target_node,
|
3669 |
instance, |
3670 |
migration_info, |
3671 |
False)
|
3672 |
abort_msg = abort_result.RemoteFailMsg() |
3673 |
if abort_msg:
|
3674 |
logging.error("Aborting migration failed on target node %s: %s" %
|
3675 |
(target_node, abort_msg)) |
3676 |
# Don't raise an exception here, as we stil have to try to revert the
|
3677 |
# disk status, even if this step failed.
|
3678 |
|
3679 |
def _ExecMigration(self): |
3680 |
"""Migrate an instance.
|
3681 |
|
3682 |
The migrate is done by:
|
3683 |
- change the disks into dual-master mode
|
3684 |
- wait until disks are fully synchronized again
|
3685 |
- migrate the instance
|
3686 |
- change disks on the new secondary node (the old primary) to secondary
|
3687 |
- wait until disks are fully synchronized
|
3688 |
- change disks into single-master mode
|
3689 |
|
3690 |
"""
|
3691 |
instance = self.instance
|
3692 |
target_node = self.target_node
|
3693 |
source_node = self.source_node
|
3694 |
|
3695 |
self.feedback_fn("* checking disk consistency between source and target") |
3696 |
for dev in instance.disks: |
3697 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
3698 |
raise errors.OpExecError("Disk %s is degraded or not fully" |
3699 |
" synchronized on target node,"
|
3700 |
" aborting migrate." % dev.iv_name)
|
3701 |
|
3702 |
# First get the migration information from the remote node
|
3703 |
result = self.rpc.call_migration_info(source_node, instance)
|
3704 |
msg = result.RemoteFailMsg() |
3705 |
if msg:
|
3706 |
log_err = ("Failed fetching source migration information from %s: %s" %
|
3707 |
(source_node, msg)) |
3708 |
logging.error(log_err) |
3709 |
raise errors.OpExecError(log_err)
|
3710 |
|
3711 |
self.migration_info = migration_info = result.data[1] |
3712 |
|
3713 |
# Then switch the disks to master/master mode
|
3714 |
self._EnsureSecondary(target_node)
|
3715 |
self._GoStandalone()
|
3716 |
self._GoReconnect(True) |
3717 |
self._WaitUntilSync()
|
3718 |
|
3719 |
self.feedback_fn("* preparing %s to accept the instance" % target_node) |
3720 |
result = self.rpc.call_accept_instance(target_node,
|
3721 |
instance, |
3722 |
migration_info, |
3723 |
self.nodes_ip[target_node])
|
3724 |
|
3725 |
msg = result.RemoteFailMsg() |
3726 |
if msg:
|
3727 |
logging.error("Instance pre-migration failed, trying to revert"
|
3728 |
" disk status: %s", msg)
|
3729 |
self._AbortMigration()
|
3730 |
self._RevertDiskStatus()
|
3731 |
raise errors.OpExecError("Could not pre-migrate instance %s: %s" % |
3732 |
(instance.name, msg)) |
3733 |
|
3734 |
self.feedback_fn("* migrating instance to %s" % target_node) |
3735 |
time.sleep(10)
|
3736 |
result = self.rpc.call_instance_migrate(source_node, instance,
|
3737 |
self.nodes_ip[target_node],
|
3738 |
self.op.live)
|
3739 |
msg = result.RemoteFailMsg() |
3740 |
if msg:
|
3741 |
logging.error("Instance migration failed, trying to revert"
|
3742 |
" disk status: %s", msg)
|
3743 |
self._AbortMigration()
|
3744 |
self._RevertDiskStatus()
|
3745 |
raise errors.OpExecError("Could not migrate instance %s: %s" % |
3746 |
(instance.name, msg)) |
3747 |
time.sleep(10)
|
3748 |
|
3749 |
instance.primary_node = target_node |
3750 |
# distribute new instance config to the other nodes
|
3751 |
self.cfg.Update(instance)
|
3752 |
|
3753 |
result = self.rpc.call_finalize_migration(target_node,
|
3754 |
instance, |
3755 |
migration_info, |
3756 |
True)
|
3757 |
msg = result.RemoteFailMsg() |
3758 |
if msg:
|
3759 |
logging.error("Instance migration succeeded, but finalization failed:"
|
3760 |
" %s" % msg)
|
3761 |
raise errors.OpExecError("Could not finalize instance migration: %s" % |
3762 |
msg) |
3763 |
|
3764 |
self._EnsureSecondary(source_node)
|
3765 |
self._WaitUntilSync()
|
3766 |
self._GoStandalone()
|
3767 |
self._GoReconnect(False) |
3768 |
self._WaitUntilSync()
|
3769 |
|
3770 |
self.feedback_fn("* done") |
3771 |
|
3772 |
def Exec(self, feedback_fn): |
3773 |
"""Perform the migration.
|
3774 |
|
3775 |
"""
|
3776 |
self.feedback_fn = feedback_fn
|
3777 |
|
3778 |
self.source_node = self.instance.primary_node |
3779 |
self.target_node = self.instance.secondary_nodes[0] |
3780 |
self.all_nodes = [self.source_node, self.target_node] |
3781 |
self.nodes_ip = {
|
3782 |
self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, |
3783 |
self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip, |
3784 |
} |
3785 |
if self.op.cleanup: |
3786 |
return self._ExecCleanup() |
3787 |
else:
|
3788 |
return self._ExecMigration() |
3789 |
|
3790 |
|
3791 |
def _CreateBlockDev(lu, node, instance, device, force_create, |
3792 |
info, force_open): |
3793 |
"""Create a tree of block devices on a given node.
|
3794 |
|
3795 |
If this device type has to be created on secondaries, create it and
|
3796 |
all its children.
|
3797 |
|
3798 |
If not, just recurse to children keeping the same 'force' value.
|
3799 |
|
3800 |
@param lu: the lu on whose behalf we execute
|
3801 |
@param node: the node on which to create the device
|
3802 |
@type instance: L{objects.Instance}
|
3803 |
@param instance: the instance which owns the device
|
3804 |
@type device: L{objects.Disk}
|
3805 |
@param device: the device to create
|
3806 |
@type force_create: boolean
|
3807 |
@param force_create: whether to force creation of this device; this
|
3808 |
will be change to True whenever we find a device which has
|
3809 |
CreateOnSecondary() attribute
|
3810 |
@param info: the extra 'metadata' we should attach to the device
|
3811 |
(this will be represented as a LVM tag)
|
3812 |
@type force_open: boolean
|
3813 |
@param force_open: this parameter will be passes to the
|
3814 |
L{backend.CreateBlockDevice} function where it specifies
|
3815 |
whether we run on primary or not, and it affects both
|
3816 |
the child assembly and the device own Open() execution
|
3817 |
|
3818 |
"""
|
3819 |
if device.CreateOnSecondary():
|
3820 |
force_create = True
|
3821 |
|
3822 |
if device.children:
|
3823 |
for child in device.children: |
3824 |
_CreateBlockDev(lu, node, instance, child, force_create, |
3825 |
info, force_open) |
3826 |
|
3827 |
if not force_create: |
3828 |
return
|
3829 |
|
3830 |
_CreateSingleBlockDev(lu, node, instance, device, info, force_open) |
3831 |
|
3832 |
|
3833 |
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): |
3834 |
"""Create a single block device on a given node.
|
3835 |
|
3836 |
This will not recurse over children of the device, so they must be
|
3837 |
created in advance.
|
3838 |
|
3839 |
@param lu: the lu on whose behalf we execute
|
3840 |
@param node: the node on which to create the device
|
3841 |
@type instance: L{objects.Instance}
|
3842 |
@param instance: the instance which owns the device
|
3843 |
@type device: L{objects.Disk}
|
3844 |
@param device: the device to create
|
3845 |
@param info: the extra 'metadata' we should attach to the device
|
3846 |
(this will be represented as a LVM tag)
|
3847 |
@type force_open: boolean
|
3848 |
@param force_open: this parameter will be passes to the
|
3849 |
L{backend.CreateBlockDevice} function where it specifies
|
3850 |
whether we run on primary or not, and it affects both
|
3851 |
the child assembly and the device own Open() execution
|
3852 |
|
3853 |
"""
|
3854 |
lu.cfg.SetDiskID(device, node) |
3855 |
result = lu.rpc.call_blockdev_create(node, device, device.size, |
3856 |
instance.name, force_open, info) |
3857 |
msg = result.RemoteFailMsg() |
3858 |
if msg:
|
3859 |
raise errors.OpExecError("Can't create block device %s on" |
3860 |
" node %s for instance %s: %s" %
|
3861 |
(device, node, instance.name, msg)) |
3862 |
if device.physical_id is None: |
3863 |
device.physical_id = result.data[1]
|
3864 |
|
3865 |
|
3866 |
def _GenerateUniqueNames(lu, exts): |
3867 |
"""Generate a suitable LV name.
|
3868 |
|
3869 |
This will generate a logical volume name for the given instance.
|
3870 |
|
3871 |
"""
|
3872 |
results = [] |
3873 |
for val in exts: |
3874 |
new_id = lu.cfg.GenerateUniqueID() |
3875 |
results.append("%s%s" % (new_id, val))
|
3876 |
return results
|
3877 |
|
3878 |
|
3879 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name, |
3880 |
p_minor, s_minor): |
3881 |
"""Generate a drbd8 device complete with its children.
|
3882 |
|
3883 |
"""
|
3884 |
port = lu.cfg.AllocatePort() |
3885 |
vgname = lu.cfg.GetVGName() |
3886 |
shared_secret = lu.cfg.GenerateDRBDSecret() |
3887 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
3888 |
logical_id=(vgname, names[0]))
|
3889 |
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
3890 |
logical_id=(vgname, names[1]))
|
3891 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, |
3892 |
logical_id=(primary, secondary, port, |
3893 |
p_minor, s_minor, |
3894 |
shared_secret), |
3895 |
children=[dev_data, dev_meta], |
3896 |
iv_name=iv_name) |
3897 |
return drbd_dev
|
3898 |
|
3899 |
|
3900 |
def _GenerateDiskTemplate(lu, template_name, |
3901 |
instance_name, primary_node, |
3902 |
secondary_nodes, disk_info, |
3903 |
file_storage_dir, file_driver, |
3904 |
base_index): |
3905 |
"""Generate the entire disk layout for a given template type.
|
3906 |
|
3907 |
"""
|
3908 |
#TODO: compute space requirements
|
3909 |
|
3910 |
vgname = lu.cfg.GetVGName() |
3911 |
disk_count = len(disk_info)
|
3912 |
disks = [] |
3913 |
if template_name == constants.DT_DISKLESS:
|
3914 |
pass
|
3915 |
elif template_name == constants.DT_PLAIN:
|
3916 |
if len(secondary_nodes) != 0: |
3917 |
raise errors.ProgrammerError("Wrong template configuration") |
3918 |
|
3919 |
names = _GenerateUniqueNames(lu, [".disk%d" % i
|
3920 |
for i in range(disk_count)]) |
3921 |
for idx, disk in enumerate(disk_info): |
3922 |
disk_index = idx + base_index |
3923 |
disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
|
3924 |
logical_id=(vgname, names[idx]), |
3925 |
iv_name="disk/%d" % disk_index,
|
3926 |
mode=disk["mode"])
|
3927 |
disks.append(disk_dev) |
3928 |
elif template_name == constants.DT_DRBD8:
|
3929 |
if len(secondary_nodes) != 1: |
3930 |
raise errors.ProgrammerError("Wrong template configuration") |
3931 |
remote_node = secondary_nodes[0]
|
3932 |
minors = lu.cfg.AllocateDRBDMinor( |
3933 |
[primary_node, remote_node] * len(disk_info), instance_name)
|
3934 |
|
3935 |
names = [] |
3936 |
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i |
3937 |
for i in range(disk_count)]): |
3938 |
names.append(lv_prefix + "_data")
|
3939 |
names.append(lv_prefix + "_meta")
|
3940 |
for idx, disk in enumerate(disk_info): |
3941 |
disk_index = idx + base_index |
3942 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, |
3943 |
disk["size"], names[idx*2:idx*2+2], |
3944 |
"disk/%d" % disk_index,
|
3945 |
minors[idx*2], minors[idx*2+1]) |
3946 |
disk_dev.mode = disk["mode"]
|
3947 |
disks.append(disk_dev) |
3948 |
elif template_name == constants.DT_FILE:
|
3949 |
if len(secondary_nodes) != 0: |
3950 |
raise errors.ProgrammerError("Wrong template configuration") |
3951 |
|
3952 |
for idx, disk in enumerate(disk_info): |
3953 |
disk_index = idx + base_index |
3954 |
disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
|
3955 |
iv_name="disk/%d" % disk_index,
|
3956 |
logical_id=(file_driver, |
3957 |
"%s/disk%d" % (file_storage_dir,
|
3958 |
idx)), |
3959 |
mode=disk["mode"])
|
3960 |
disks.append(disk_dev) |
3961 |
else:
|
3962 |
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
3963 |
return disks
|
3964 |
|
3965 |
|
3966 |
def _GetInstanceInfoText(instance): |
3967 |
"""Compute that text that should be added to the disk's metadata.
|
3968 |
|
3969 |
"""
|
3970 |
return "originstname+%s" % instance.name |
3971 |
|
3972 |
|
3973 |
def _CreateDisks(lu, instance): |
3974 |
"""Create all disks for an instance.
|
3975 |
|
3976 |
This abstracts away some work from AddInstance.
|
3977 |
|
3978 |
@type lu: L{LogicalUnit}
|
3979 |
@param lu: the logical unit on whose behalf we execute
|
3980 |
@type instance: L{objects.Instance}
|
3981 |
@param instance: the instance whose disks we should create
|
3982 |
@rtype: boolean
|
3983 |
@return: the success of the creation
|
3984 |
|
3985 |
"""
|
3986 |
info = _GetInstanceInfoText(instance) |
3987 |
pnode = instance.primary_node |
3988 |
|
3989 |
if instance.disk_template == constants.DT_FILE:
|
3990 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
3991 |
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) |
3992 |
|
3993 |
if result.failed or not result.data: |
3994 |
raise errors.OpExecError("Could not connect to node '%s'" % pnode) |
3995 |
|
3996 |
if |