root / lib / cmdlib.py @ 12649e35
History | View | Annotate | Download (234.3 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable-msg=W0613,W0201
|
25 |
|
26 |
import os |
27 |
import os.path |
28 |
import sha |
29 |
import time |
30 |
import tempfile |
31 |
import re |
32 |
import platform |
33 |
import logging |
34 |
import copy |
35 |
import random |
36 |
|
37 |
from ganeti import ssh |
38 |
from ganeti import utils |
39 |
from ganeti import errors |
40 |
from ganeti import hypervisor |
41 |
from ganeti import locking |
42 |
from ganeti import constants |
43 |
from ganeti import objects |
44 |
from ganeti import opcodes |
45 |
from ganeti import serializer |
46 |
from ganeti import ssconf |
47 |
|
48 |
|
49 |
class LogicalUnit(object): |
50 |
"""Logical Unit base class.
|
51 |
|
52 |
Subclasses must follow these rules:
|
53 |
- implement ExpandNames
|
54 |
- implement CheckPrereq
|
55 |
- implement Exec
|
56 |
- implement BuildHooksEnv
|
57 |
- redefine HPATH and HTYPE
|
58 |
- optionally redefine their run requirements:
|
59 |
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
60 |
|
61 |
Note that all commands require root permissions.
|
62 |
|
63 |
"""
|
64 |
HPATH = None
|
65 |
HTYPE = None
|
66 |
_OP_REQP = [] |
67 |
REQ_BGL = True
|
68 |
|
69 |
def __init__(self, processor, op, context, rpc): |
70 |
"""Constructor for LogicalUnit.
|
71 |
|
72 |
This needs to be overriden in derived classes in order to check op
|
73 |
validity.
|
74 |
|
75 |
"""
|
76 |
self.proc = processor
|
77 |
self.op = op
|
78 |
self.cfg = context.cfg
|
79 |
self.context = context
|
80 |
self.rpc = rpc
|
81 |
# Dicts used to declare locking needs to mcpu
|
82 |
self.needed_locks = None |
83 |
self.acquired_locks = {}
|
84 |
self.share_locks = dict(((i, 0) for i in locking.LEVELS)) |
85 |
self.add_locks = {}
|
86 |
self.remove_locks = {}
|
87 |
# Used to force good behavior when calling helper functions
|
88 |
self.recalculate_locks = {}
|
89 |
self.__ssh = None |
90 |
# logging
|
91 |
self.LogWarning = processor.LogWarning
|
92 |
self.LogInfo = processor.LogInfo
|
93 |
|
94 |
for attr_name in self._OP_REQP: |
95 |
attr_val = getattr(op, attr_name, None) |
96 |
if attr_val is None: |
97 |
raise errors.OpPrereqError("Required parameter '%s' missing" % |
98 |
attr_name) |
99 |
self.CheckArguments()
|
100 |
|
101 |
def __GetSSH(self): |
102 |
"""Returns the SshRunner object
|
103 |
|
104 |
"""
|
105 |
if not self.__ssh: |
106 |
self.__ssh = ssh.SshRunner(self.cfg.GetClusterName()) |
107 |
return self.__ssh |
108 |
|
109 |
ssh = property(fget=__GetSSH)
|
110 |
|
111 |
def CheckArguments(self): |
112 |
"""Check syntactic validity for the opcode arguments.
|
113 |
|
114 |
This method is for doing a simple syntactic check and ensure
|
115 |
validity of opcode parameters, without any cluster-related
|
116 |
checks. While the same can be accomplished in ExpandNames and/or
|
117 |
CheckPrereq, doing these separate is better because:
|
118 |
|
119 |
- ExpandNames is left as as purely a lock-related function
|
120 |
- CheckPrereq is run after we have aquired locks (and possible
|
121 |
waited for them)
|
122 |
|
123 |
The function is allowed to change the self.op attribute so that
|
124 |
later methods can no longer worry about missing parameters.
|
125 |
|
126 |
"""
|
127 |
pass
|
128 |
|
129 |
def ExpandNames(self): |
130 |
"""Expand names for this LU.
|
131 |
|
132 |
This method is called before starting to execute the opcode, and it should
|
133 |
update all the parameters of the opcode to their canonical form (e.g. a
|
134 |
short node name must be fully expanded after this method has successfully
|
135 |
completed). This way locking, hooks, logging, ecc. can work correctly.
|
136 |
|
137 |
LUs which implement this method must also populate the self.needed_locks
|
138 |
member, as a dict with lock levels as keys, and a list of needed lock names
|
139 |
as values. Rules:
|
140 |
|
141 |
- use an empty dict if you don't need any lock
|
142 |
- if you don't need any lock at a particular level omit that level
|
143 |
- don't put anything for the BGL level
|
144 |
- if you want all locks at a level use locking.ALL_SET as a value
|
145 |
|
146 |
If you need to share locks (rather than acquire them exclusively) at one
|
147 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
148 |
that level. By default locks are not shared.
|
149 |
|
150 |
Examples::
|
151 |
|
152 |
# Acquire all nodes and one instance
|
153 |
self.needed_locks = {
|
154 |
locking.LEVEL_NODE: locking.ALL_SET,
|
155 |
locking.LEVEL_INSTANCE: ['instance1.example.tld'],
|
156 |
}
|
157 |
# Acquire just two nodes
|
158 |
self.needed_locks = {
|
159 |
locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
|
160 |
}
|
161 |
# Acquire no locks
|
162 |
self.needed_locks = {} # No, you can't leave it to the default value None
|
163 |
|
164 |
"""
|
165 |
# The implementation of this method is mandatory only if the new LU is
|
166 |
# concurrent, so that old LUs don't need to be changed all at the same
|
167 |
# time.
|
168 |
if self.REQ_BGL: |
169 |
self.needed_locks = {} # Exclusive LUs don't need locks. |
170 |
else:
|
171 |
raise NotImplementedError |
172 |
|
173 |
def DeclareLocks(self, level): |
174 |
"""Declare LU locking needs for a level
|
175 |
|
176 |
While most LUs can just declare their locking needs at ExpandNames time,
|
177 |
sometimes there's the need to calculate some locks after having acquired
|
178 |
the ones before. This function is called just before acquiring locks at a
|
179 |
particular level, but after acquiring the ones at lower levels, and permits
|
180 |
such calculations. It can be used to modify self.needed_locks, and by
|
181 |
default it does nothing.
|
182 |
|
183 |
This function is only called if you have something already set in
|
184 |
self.needed_locks for the level.
|
185 |
|
186 |
@param level: Locking level which is going to be locked
|
187 |
@type level: member of ganeti.locking.LEVELS
|
188 |
|
189 |
"""
|
190 |
|
191 |
def CheckPrereq(self): |
192 |
"""Check prerequisites for this LU.
|
193 |
|
194 |
This method should check that the prerequisites for the execution
|
195 |
of this LU are fulfilled. It can do internode communication, but
|
196 |
it should be idempotent - no cluster or system changes are
|
197 |
allowed.
|
198 |
|
199 |
The method should raise errors.OpPrereqError in case something is
|
200 |
not fulfilled. Its return value is ignored.
|
201 |
|
202 |
This method should also update all the parameters of the opcode to
|
203 |
their canonical form if it hasn't been done by ExpandNames before.
|
204 |
|
205 |
"""
|
206 |
raise NotImplementedError |
207 |
|
208 |
def Exec(self, feedback_fn): |
209 |
"""Execute the LU.
|
210 |
|
211 |
This method should implement the actual work. It should raise
|
212 |
errors.OpExecError for failures that are somewhat dealt with in
|
213 |
code, or expected.
|
214 |
|
215 |
"""
|
216 |
raise NotImplementedError |
217 |
|
218 |
def BuildHooksEnv(self): |
219 |
"""Build hooks environment for this LU.
|
220 |
|
221 |
This method should return a three-node tuple consisting of: a dict
|
222 |
containing the environment that will be used for running the
|
223 |
specific hook for this LU, a list of node names on which the hook
|
224 |
should run before the execution, and a list of node names on which
|
225 |
the hook should run after the execution.
|
226 |
|
227 |
The keys of the dict must not have 'GANETI_' prefixed as this will
|
228 |
be handled in the hooks runner. Also note additional keys will be
|
229 |
added by the hooks runner. If the LU doesn't define any
|
230 |
environment, an empty dict (and not None) should be returned.
|
231 |
|
232 |
No nodes should be returned as an empty list (and not None).
|
233 |
|
234 |
Note that if the HPATH for a LU class is None, this function will
|
235 |
not be called.
|
236 |
|
237 |
"""
|
238 |
raise NotImplementedError |
239 |
|
240 |
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result): |
241 |
"""Notify the LU about the results of its hooks.
|
242 |
|
243 |
This method is called every time a hooks phase is executed, and notifies
|
244 |
the Logical Unit about the hooks' result. The LU can then use it to alter
|
245 |
its result based on the hooks. By default the method does nothing and the
|
246 |
previous result is passed back unchanged but any LU can define it if it
|
247 |
wants to use the local cluster hook-scripts somehow.
|
248 |
|
249 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
250 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
251 |
@param hook_results: the results of the multi-node hooks rpc call
|
252 |
@param feedback_fn: function used send feedback back to the caller
|
253 |
@param lu_result: the previous Exec result this LU had, or None
|
254 |
in the PRE phase
|
255 |
@return: the new Exec result, based on the previous result
|
256 |
and hook results
|
257 |
|
258 |
"""
|
259 |
return lu_result
|
260 |
|
261 |
def _ExpandAndLockInstance(self): |
262 |
"""Helper function to expand and lock an instance.
|
263 |
|
264 |
Many LUs that work on an instance take its name in self.op.instance_name
|
265 |
and need to expand it and then declare the expanded name for locking. This
|
266 |
function does it, and then updates self.op.instance_name to the expanded
|
267 |
name. It also initializes needed_locks as a dict, if this hasn't been done
|
268 |
before.
|
269 |
|
270 |
"""
|
271 |
if self.needed_locks is None: |
272 |
self.needed_locks = {}
|
273 |
else:
|
274 |
assert locking.LEVEL_INSTANCE not in self.needed_locks, \ |
275 |
"_ExpandAndLockInstance called with instance-level locks set"
|
276 |
expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
277 |
if expanded_name is None: |
278 |
raise errors.OpPrereqError("Instance '%s' not known" % |
279 |
self.op.instance_name)
|
280 |
self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
|
281 |
self.op.instance_name = expanded_name
|
282 |
|
283 |
def _LockInstancesNodes(self, primary_only=False): |
284 |
"""Helper function to declare instances' nodes for locking.
|
285 |
|
286 |
This function should be called after locking one or more instances to lock
|
287 |
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
288 |
with all primary or secondary nodes for instances already locked and
|
289 |
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
290 |
|
291 |
It should be called from DeclareLocks, and for safety only works if
|
292 |
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
293 |
|
294 |
In the future it may grow parameters to just lock some instance's nodes, or
|
295 |
to just lock primaries or secondary nodes, if needed.
|
296 |
|
297 |
If should be called in DeclareLocks in a way similar to::
|
298 |
|
299 |
if level == locking.LEVEL_NODE:
|
300 |
self._LockInstancesNodes()
|
301 |
|
302 |
@type primary_only: boolean
|
303 |
@param primary_only: only lock primary nodes of locked instances
|
304 |
|
305 |
"""
|
306 |
assert locking.LEVEL_NODE in self.recalculate_locks, \ |
307 |
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
308 |
|
309 |
# TODO: check if we're really been called with the instance locks held
|
310 |
|
311 |
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
312 |
# future we might want to have different behaviors depending on the value
|
313 |
# of self.recalculate_locks[locking.LEVEL_NODE]
|
314 |
wanted_nodes = [] |
315 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]: |
316 |
instance = self.context.cfg.GetInstanceInfo(instance_name)
|
317 |
wanted_nodes.append(instance.primary_node) |
318 |
if not primary_only: |
319 |
wanted_nodes.extend(instance.secondary_nodes) |
320 |
|
321 |
if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE: |
322 |
self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
|
323 |
elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND: |
324 |
self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
|
325 |
|
326 |
del self.recalculate_locks[locking.LEVEL_NODE] |
327 |
|
328 |
|
329 |
class NoHooksLU(LogicalUnit): |
330 |
"""Simple LU which runs no hooks.
|
331 |
|
332 |
This LU is intended as a parent for other LogicalUnits which will
|
333 |
run no hooks, in order to reduce duplicate code.
|
334 |
|
335 |
"""
|
336 |
HPATH = None
|
337 |
HTYPE = None
|
338 |
|
339 |
|
340 |
def _GetWantedNodes(lu, nodes): |
341 |
"""Returns list of checked and expanded node names.
|
342 |
|
343 |
@type lu: L{LogicalUnit}
|
344 |
@param lu: the logical unit on whose behalf we execute
|
345 |
@type nodes: list
|
346 |
@param nodes: list of node names or None for all nodes
|
347 |
@rtype: list
|
348 |
@return: the list of nodes, sorted
|
349 |
@raise errors.OpProgrammerError: if the nodes parameter is wrong type
|
350 |
|
351 |
"""
|
352 |
if not isinstance(nodes, list): |
353 |
raise errors.OpPrereqError("Invalid argument type 'nodes'") |
354 |
|
355 |
if not nodes: |
356 |
raise errors.ProgrammerError("_GetWantedNodes should only be called with a" |
357 |
" non-empty list of nodes whose name is to be expanded.")
|
358 |
|
359 |
wanted = [] |
360 |
for name in nodes: |
361 |
node = lu.cfg.ExpandNodeName(name) |
362 |
if node is None: |
363 |
raise errors.OpPrereqError("No such node name '%s'" % name) |
364 |
wanted.append(node) |
365 |
|
366 |
return utils.NiceSort(wanted)
|
367 |
|
368 |
|
369 |
def _GetWantedInstances(lu, instances): |
370 |
"""Returns list of checked and expanded instance names.
|
371 |
|
372 |
@type lu: L{LogicalUnit}
|
373 |
@param lu: the logical unit on whose behalf we execute
|
374 |
@type instances: list
|
375 |
@param instances: list of instance names or None for all instances
|
376 |
@rtype: list
|
377 |
@return: the list of instances, sorted
|
378 |
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
379 |
@raise errors.OpPrereqError: if any of the passed instances is not found
|
380 |
|
381 |
"""
|
382 |
if not isinstance(instances, list): |
383 |
raise errors.OpPrereqError("Invalid argument type 'instances'") |
384 |
|
385 |
if instances:
|
386 |
wanted = [] |
387 |
|
388 |
for name in instances: |
389 |
instance = lu.cfg.ExpandInstanceName(name) |
390 |
if instance is None: |
391 |
raise errors.OpPrereqError("No such instance name '%s'" % name) |
392 |
wanted.append(instance) |
393 |
|
394 |
else:
|
395 |
wanted = utils.NiceSort(lu.cfg.GetInstanceList()) |
396 |
return wanted
|
397 |
|
398 |
|
399 |
def _CheckOutputFields(static, dynamic, selected): |
400 |
"""Checks whether all selected fields are valid.
|
401 |
|
402 |
@type static: L{utils.FieldSet}
|
403 |
@param static: static fields set
|
404 |
@type dynamic: L{utils.FieldSet}
|
405 |
@param dynamic: dynamic fields set
|
406 |
|
407 |
"""
|
408 |
f = utils.FieldSet() |
409 |
f.Extend(static) |
410 |
f.Extend(dynamic) |
411 |
|
412 |
delta = f.NonMatching(selected) |
413 |
if delta:
|
414 |
raise errors.OpPrereqError("Unknown output fields selected: %s" |
415 |
% ",".join(delta))
|
416 |
|
417 |
|
418 |
def _CheckBooleanOpField(op, name): |
419 |
"""Validates boolean opcode parameters.
|
420 |
|
421 |
This will ensure that an opcode parameter is either a boolean value,
|
422 |
or None (but that it always exists).
|
423 |
|
424 |
"""
|
425 |
val = getattr(op, name, None) |
426 |
if not (val is None or isinstance(val, bool)): |
427 |
raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" % |
428 |
(name, str(val)))
|
429 |
setattr(op, name, val)
|
430 |
|
431 |
|
432 |
def _CheckNodeOnline(lu, node): |
433 |
"""Ensure that a given node is online.
|
434 |
|
435 |
@param lu: the LU on behalf of which we make the check
|
436 |
@param node: the node to check
|
437 |
@raise errors.OpPrereqError: if the nodes is offline
|
438 |
|
439 |
"""
|
440 |
if lu.cfg.GetNodeInfo(node).offline:
|
441 |
raise errors.OpPrereqError("Can't use offline node %s" % node) |
442 |
|
443 |
|
444 |
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, |
445 |
memory, vcpus, nics): |
446 |
"""Builds instance related env variables for hooks
|
447 |
|
448 |
This builds the hook environment from individual variables.
|
449 |
|
450 |
@type name: string
|
451 |
@param name: the name of the instance
|
452 |
@type primary_node: string
|
453 |
@param primary_node: the name of the instance's primary node
|
454 |
@type secondary_nodes: list
|
455 |
@param secondary_nodes: list of secondary nodes as strings
|
456 |
@type os_type: string
|
457 |
@param os_type: the name of the instance's OS
|
458 |
@type status: boolean
|
459 |
@param status: the should_run status of the instance
|
460 |
@type memory: string
|
461 |
@param memory: the memory size of the instance
|
462 |
@type vcpus: string
|
463 |
@param vcpus: the count of VCPUs the instance has
|
464 |
@type nics: list
|
465 |
@param nics: list of tuples (ip, bridge, mac) representing
|
466 |
the NICs the instance has
|
467 |
@rtype: dict
|
468 |
@return: the hook environment for this instance
|
469 |
|
470 |
"""
|
471 |
if status:
|
472 |
str_status = "up"
|
473 |
else:
|
474 |
str_status = "down"
|
475 |
env = { |
476 |
"OP_TARGET": name,
|
477 |
"INSTANCE_NAME": name,
|
478 |
"INSTANCE_PRIMARY": primary_node,
|
479 |
"INSTANCE_SECONDARIES": " ".join(secondary_nodes), |
480 |
"INSTANCE_OS_TYPE": os_type,
|
481 |
"INSTANCE_STATUS": str_status,
|
482 |
"INSTANCE_MEMORY": memory,
|
483 |
"INSTANCE_VCPUS": vcpus,
|
484 |
} |
485 |
|
486 |
if nics:
|
487 |
nic_count = len(nics)
|
488 |
for idx, (ip, bridge, mac) in enumerate(nics): |
489 |
if ip is None: |
490 |
ip = ""
|
491 |
env["INSTANCE_NIC%d_IP" % idx] = ip
|
492 |
env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
|
493 |
env["INSTANCE_NIC%d_HWADDR" % idx] = mac
|
494 |
else:
|
495 |
nic_count = 0
|
496 |
|
497 |
env["INSTANCE_NIC_COUNT"] = nic_count
|
498 |
|
499 |
return env
|
500 |
|
501 |
|
502 |
def _BuildInstanceHookEnvByObject(lu, instance, override=None): |
503 |
"""Builds instance related env variables for hooks from an object.
|
504 |
|
505 |
@type lu: L{LogicalUnit}
|
506 |
@param lu: the logical unit on whose behalf we execute
|
507 |
@type instance: L{objects.Instance}
|
508 |
@param instance: the instance for which we should build the
|
509 |
environment
|
510 |
@type override: dict
|
511 |
@param override: dictionary with key/values that will override
|
512 |
our values
|
513 |
@rtype: dict
|
514 |
@return: the hook environment dictionary
|
515 |
|
516 |
"""
|
517 |
bep = lu.cfg.GetClusterInfo().FillBE(instance) |
518 |
args = { |
519 |
'name': instance.name,
|
520 |
'primary_node': instance.primary_node,
|
521 |
'secondary_nodes': instance.secondary_nodes,
|
522 |
'os_type': instance.os,
|
523 |
'status': instance.admin_up,
|
524 |
'memory': bep[constants.BE_MEMORY],
|
525 |
'vcpus': bep[constants.BE_VCPUS],
|
526 |
'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics], |
527 |
} |
528 |
if override:
|
529 |
args.update(override) |
530 |
return _BuildInstanceHookEnv(**args)
|
531 |
|
532 |
|
533 |
def _AdjustCandidatePool(lu): |
534 |
"""Adjust the candidate pool after node operations.
|
535 |
|
536 |
"""
|
537 |
mod_list = lu.cfg.MaintainCandidatePool() |
538 |
if mod_list:
|
539 |
lu.LogInfo("Promoted nodes to master candidate role: %s",
|
540 |
", ".join(node.name for node in mod_list)) |
541 |
for name in mod_list: |
542 |
lu.context.ReaddNode(name) |
543 |
mc_now, mc_max = lu.cfg.GetMasterCandidateStats() |
544 |
if mc_now > mc_max:
|
545 |
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
|
546 |
(mc_now, mc_max)) |
547 |
|
548 |
|
549 |
def _CheckInstanceBridgesExist(lu, instance): |
550 |
"""Check that the brigdes needed by an instance exist.
|
551 |
|
552 |
"""
|
553 |
# check bridges existance
|
554 |
brlist = [nic.bridge for nic in instance.nics] |
555 |
result = lu.rpc.call_bridges_exist(instance.primary_node, brlist) |
556 |
result.Raise() |
557 |
if not result.data: |
558 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
559 |
" exist on destination node '%s'" %
|
560 |
(brlist, instance.primary_node)) |
561 |
|
562 |
|
563 |
class LUDestroyCluster(NoHooksLU): |
564 |
"""Logical unit for destroying the cluster.
|
565 |
|
566 |
"""
|
567 |
_OP_REQP = [] |
568 |
|
569 |
def CheckPrereq(self): |
570 |
"""Check prerequisites.
|
571 |
|
572 |
This checks whether the cluster is empty.
|
573 |
|
574 |
Any errors are signalled by raising errors.OpPrereqError.
|
575 |
|
576 |
"""
|
577 |
master = self.cfg.GetMasterNode()
|
578 |
|
579 |
nodelist = self.cfg.GetNodeList()
|
580 |
if len(nodelist) != 1 or nodelist[0] != master: |
581 |
raise errors.OpPrereqError("There are still %d node(s) in" |
582 |
" this cluster." % (len(nodelist) - 1)) |
583 |
instancelist = self.cfg.GetInstanceList()
|
584 |
if instancelist:
|
585 |
raise errors.OpPrereqError("There are still %d instance(s) in" |
586 |
" this cluster." % len(instancelist)) |
587 |
|
588 |
def Exec(self, feedback_fn): |
589 |
"""Destroys the cluster.
|
590 |
|
591 |
"""
|
592 |
master = self.cfg.GetMasterNode()
|
593 |
result = self.rpc.call_node_stop_master(master, False) |
594 |
result.Raise() |
595 |
if not result.data: |
596 |
raise errors.OpExecError("Could not disable the master role") |
597 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
598 |
utils.CreateBackup(priv_key) |
599 |
utils.CreateBackup(pub_key) |
600 |
return master
|
601 |
|
602 |
|
603 |
class LUVerifyCluster(LogicalUnit): |
604 |
"""Verifies the cluster status.
|
605 |
|
606 |
"""
|
607 |
HPATH = "cluster-verify"
|
608 |
HTYPE = constants.HTYPE_CLUSTER |
609 |
_OP_REQP = ["skip_checks"]
|
610 |
REQ_BGL = False
|
611 |
|
612 |
def ExpandNames(self): |
613 |
self.needed_locks = {
|
614 |
locking.LEVEL_NODE: locking.ALL_SET, |
615 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
616 |
} |
617 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
618 |
|
619 |
def _VerifyNode(self, nodeinfo, file_list, local_cksum, |
620 |
node_result, feedback_fn, master_files, |
621 |
drbd_map): |
622 |
"""Run multiple tests against a node.
|
623 |
|
624 |
Test list:
|
625 |
|
626 |
- compares ganeti version
|
627 |
- checks vg existance and size > 20G
|
628 |
- checks config file checksum
|
629 |
- checks ssh to other nodes
|
630 |
|
631 |
@type nodeinfo: L{objects.Node}
|
632 |
@param nodeinfo: the node to check
|
633 |
@param file_list: required list of files
|
634 |
@param local_cksum: dictionary of local files and their checksums
|
635 |
@param node_result: the results from the node
|
636 |
@param feedback_fn: function used to accumulate results
|
637 |
@param master_files: list of files that only masters should have
|
638 |
@param drbd_map: the useddrbd minors for this node, in
|
639 |
form of minor: (instance, must_exist) which correspond to instances
|
640 |
and their running status
|
641 |
|
642 |
"""
|
643 |
node = nodeinfo.name |
644 |
|
645 |
# main result, node_result should be a non-empty dict
|
646 |
if not node_result or not isinstance(node_result, dict): |
647 |
feedback_fn(" - ERROR: unable to verify node %s." % (node,))
|
648 |
return True |
649 |
|
650 |
# compares ganeti version
|
651 |
local_version = constants.PROTOCOL_VERSION |
652 |
remote_version = node_result.get('version', None) |
653 |
if not (remote_version and isinstance(remote_version, (list, tuple)) and |
654 |
len(remote_version) == 2): |
655 |
feedback_fn(" - ERROR: connection to %s failed" % (node))
|
656 |
return True |
657 |
|
658 |
if local_version != remote_version[0]: |
659 |
feedback_fn(" - ERROR: incompatible protocol versions: master %s,"
|
660 |
" node %s %s" % (local_version, node, remote_version[0])) |
661 |
return True |
662 |
|
663 |
# node seems compatible, we can actually try to look into its results
|
664 |
|
665 |
bad = False
|
666 |
|
667 |
# full package version
|
668 |
if constants.RELEASE_VERSION != remote_version[1]: |
669 |
feedback_fn(" - WARNING: software version mismatch: master %s,"
|
670 |
" node %s %s" %
|
671 |
(constants.RELEASE_VERSION, node, remote_version[1]))
|
672 |
|
673 |
# checks vg existence and size > 20G
|
674 |
|
675 |
vglist = node_result.get(constants.NV_VGLIST, None)
|
676 |
if not vglist: |
677 |
feedback_fn(" - ERROR: unable to check volume groups on node %s." %
|
678 |
(node,)) |
679 |
bad = True
|
680 |
else:
|
681 |
vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
|
682 |
constants.MIN_VG_SIZE) |
683 |
if vgstatus:
|
684 |
feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
|
685 |
bad = True
|
686 |
|
687 |
# checks config file checksum
|
688 |
|
689 |
remote_cksum = node_result.get(constants.NV_FILELIST, None)
|
690 |
if not isinstance(remote_cksum, dict): |
691 |
bad = True
|
692 |
feedback_fn(" - ERROR: node hasn't returned file checksum data")
|
693 |
else:
|
694 |
for file_name in file_list: |
695 |
node_is_mc = nodeinfo.master_candidate |
696 |
must_have_file = file_name not in master_files |
697 |
if file_name not in remote_cksum: |
698 |
if node_is_mc or must_have_file: |
699 |
bad = True
|
700 |
feedback_fn(" - ERROR: file '%s' missing" % file_name)
|
701 |
elif remote_cksum[file_name] != local_cksum[file_name]:
|
702 |
if node_is_mc or must_have_file: |
703 |
bad = True
|
704 |
feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
|
705 |
else:
|
706 |
# not candidate and this is not a must-have file
|
707 |
bad = True
|
708 |
feedback_fn(" - ERROR: non master-candidate has old/wrong file"
|
709 |
" '%s'" % file_name)
|
710 |
else:
|
711 |
# all good, except non-master/non-must have combination
|
712 |
if not node_is_mc and not must_have_file: |
713 |
feedback_fn(" - ERROR: file '%s' should not exist on non master"
|
714 |
" candidates" % file_name)
|
715 |
|
716 |
# checks ssh to any
|
717 |
|
718 |
if constants.NV_NODELIST not in node_result: |
719 |
bad = True
|
720 |
feedback_fn(" - ERROR: node hasn't returned node ssh connectivity data")
|
721 |
else:
|
722 |
if node_result[constants.NV_NODELIST]:
|
723 |
bad = True
|
724 |
for node in node_result[constants.NV_NODELIST]: |
725 |
feedback_fn(" - ERROR: ssh communication with node '%s': %s" %
|
726 |
(node, node_result[constants.NV_NODELIST][node])) |
727 |
|
728 |
if constants.NV_NODENETTEST not in node_result: |
729 |
bad = True
|
730 |
feedback_fn(" - ERROR: node hasn't returned node tcp connectivity data")
|
731 |
else:
|
732 |
if node_result[constants.NV_NODENETTEST]:
|
733 |
bad = True
|
734 |
nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys()) |
735 |
for node in nlist: |
736 |
feedback_fn(" - ERROR: tcp communication with node '%s': %s" %
|
737 |
(node, node_result[constants.NV_NODENETTEST][node])) |
738 |
|
739 |
hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
|
740 |
if isinstance(hyp_result, dict): |
741 |
for hv_name, hv_result in hyp_result.iteritems(): |
742 |
if hv_result is not None: |
743 |
feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" %
|
744 |
(hv_name, hv_result)) |
745 |
|
746 |
# check used drbd list
|
747 |
used_minors = node_result.get(constants.NV_DRBDLIST, []) |
748 |
for minor, (iname, must_exist) in drbd_map.items(): |
749 |
if minor not in used_minors and must_exist: |
750 |
feedback_fn(" - ERROR: drbd minor %d of instance %s is not active" %
|
751 |
(minor, iname)) |
752 |
bad = True
|
753 |
for minor in used_minors: |
754 |
if minor not in drbd_map: |
755 |
feedback_fn(" - ERROR: unallocated drbd minor %d is in use" % minor)
|
756 |
bad = True
|
757 |
|
758 |
return bad
|
759 |
|
760 |
def _VerifyInstance(self, instance, instanceconfig, node_vol_is, |
761 |
node_instance, feedback_fn, n_offline): |
762 |
"""Verify an instance.
|
763 |
|
764 |
This function checks to see if the required block devices are
|
765 |
available on the instance's node.
|
766 |
|
767 |
"""
|
768 |
bad = False
|
769 |
|
770 |
node_current = instanceconfig.primary_node |
771 |
|
772 |
node_vol_should = {} |
773 |
instanceconfig.MapLVsByNode(node_vol_should) |
774 |
|
775 |
for node in node_vol_should: |
776 |
if node in n_offline: |
777 |
# ignore missing volumes on offline nodes
|
778 |
continue
|
779 |
for volume in node_vol_should[node]: |
780 |
if node not in node_vol_is or volume not in node_vol_is[node]: |
781 |
feedback_fn(" - ERROR: volume %s missing on node %s" %
|
782 |
(volume, node)) |
783 |
bad = True
|
784 |
|
785 |
if instanceconfig.admin_up:
|
786 |
if ((node_current not in node_instance or |
787 |
not instance in node_instance[node_current]) and |
788 |
node_current not in n_offline): |
789 |
feedback_fn(" - ERROR: instance %s not running on node %s" %
|
790 |
(instance, node_current)) |
791 |
bad = True
|
792 |
|
793 |
for node in node_instance: |
794 |
if (not node == node_current): |
795 |
if instance in node_instance[node]: |
796 |
feedback_fn(" - ERROR: instance %s should not run on node %s" %
|
797 |
(instance, node)) |
798 |
bad = True
|
799 |
|
800 |
return bad
|
801 |
|
802 |
def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): |
803 |
"""Verify if there are any unknown volumes in the cluster.
|
804 |
|
805 |
The .os, .swap and backup volumes are ignored. All other volumes are
|
806 |
reported as unknown.
|
807 |
|
808 |
"""
|
809 |
bad = False
|
810 |
|
811 |
for node in node_vol_is: |
812 |
for volume in node_vol_is[node]: |
813 |
if node not in node_vol_should or volume not in node_vol_should[node]: |
814 |
feedback_fn(" - ERROR: volume %s on node %s should not exist" %
|
815 |
(volume, node)) |
816 |
bad = True
|
817 |
return bad
|
818 |
|
819 |
def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn): |
820 |
"""Verify the list of running instances.
|
821 |
|
822 |
This checks what instances are running but unknown to the cluster.
|
823 |
|
824 |
"""
|
825 |
bad = False
|
826 |
for node in node_instance: |
827 |
for runninginstance in node_instance[node]: |
828 |
if runninginstance not in instancelist: |
829 |
feedback_fn(" - ERROR: instance %s on node %s should not exist" %
|
830 |
(runninginstance, node)) |
831 |
bad = True
|
832 |
return bad
|
833 |
|
834 |
def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn): |
835 |
"""Verify N+1 Memory Resilience.
|
836 |
|
837 |
Check that if one single node dies we can still start all the instances it
|
838 |
was primary for.
|
839 |
|
840 |
"""
|
841 |
bad = False
|
842 |
|
843 |
for node, nodeinfo in node_info.iteritems(): |
844 |
# This code checks that every node which is now listed as secondary has
|
845 |
# enough memory to host all instances it is supposed to should a single
|
846 |
# other node in the cluster fail.
|
847 |
# FIXME: not ready for failover to an arbitrary node
|
848 |
# FIXME: does not support file-backed instances
|
849 |
# WARNING: we currently take into account down instances as well as up
|
850 |
# ones, considering that even if they're down someone might want to start
|
851 |
# them even in the event of a node failure.
|
852 |
for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems(): |
853 |
needed_mem = 0
|
854 |
for instance in instances: |
855 |
bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
|
856 |
if bep[constants.BE_AUTO_BALANCE]:
|
857 |
needed_mem += bep[constants.BE_MEMORY] |
858 |
if nodeinfo['mfree'] < needed_mem: |
859 |
feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
|
860 |
" failovers should node %s fail" % (node, prinode))
|
861 |
bad = True
|
862 |
return bad
|
863 |
|
864 |
def CheckPrereq(self): |
865 |
"""Check prerequisites.
|
866 |
|
867 |
Transform the list of checks we're going to skip into a set and check that
|
868 |
all its members are valid.
|
869 |
|
870 |
"""
|
871 |
self.skip_set = frozenset(self.op.skip_checks) |
872 |
if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set): |
873 |
raise errors.OpPrereqError("Invalid checks to be skipped specified") |
874 |
|
875 |
def BuildHooksEnv(self): |
876 |
"""Build hooks env.
|
877 |
|
878 |
Cluster-Verify hooks just rone in the post phase and their failure makes
|
879 |
the output be logged in the verify output and the verification to fail.
|
880 |
|
881 |
"""
|
882 |
all_nodes = self.cfg.GetNodeList()
|
883 |
# TODO: populate the environment with useful information for verify hooks
|
884 |
env = {} |
885 |
return env, [], all_nodes
|
886 |
|
887 |
def Exec(self, feedback_fn): |
888 |
"""Verify integrity of cluster, performing various test on nodes.
|
889 |
|
890 |
"""
|
891 |
bad = False
|
892 |
feedback_fn("* Verifying global settings")
|
893 |
for msg in self.cfg.VerifyConfig(): |
894 |
feedback_fn(" - ERROR: %s" % msg)
|
895 |
|
896 |
vg_name = self.cfg.GetVGName()
|
897 |
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
|
898 |
nodelist = utils.NiceSort(self.cfg.GetNodeList())
|
899 |
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist] |
900 |
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
|
901 |
instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname)) |
902 |
for iname in instancelist) |
903 |
i_non_redundant = [] # Non redundant instances
|
904 |
i_non_a_balanced = [] # Non auto-balanced instances
|
905 |
n_offline = [] # List of offline nodes
|
906 |
node_volume = {} |
907 |
node_instance = {} |
908 |
node_info = {} |
909 |
instance_cfg = {} |
910 |
|
911 |
# FIXME: verify OS list
|
912 |
# do local checksums
|
913 |
master_files = [constants.CLUSTER_CONF_FILE] |
914 |
|
915 |
file_names = ssconf.SimpleStore().GetFileList() |
916 |
file_names.append(constants.SSL_CERT_FILE) |
917 |
file_names.append(constants.RAPI_CERT_FILE) |
918 |
file_names.extend(master_files) |
919 |
|
920 |
local_checksums = utils.FingerprintFiles(file_names) |
921 |
|
922 |
feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
923 |
node_verify_param = { |
924 |
constants.NV_FILELIST: file_names, |
925 |
constants.NV_NODELIST: [node.name for node in nodeinfo |
926 |
if not node.offline], |
927 |
constants.NV_HYPERVISOR: hypervisors, |
928 |
constants.NV_NODENETTEST: [(node.name, node.primary_ip, |
929 |
node.secondary_ip) for node in nodeinfo |
930 |
if not node.offline], |
931 |
constants.NV_LVLIST: vg_name, |
932 |
constants.NV_INSTANCELIST: hypervisors, |
933 |
constants.NV_VGLIST: None,
|
934 |
constants.NV_VERSION: None,
|
935 |
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
|
936 |
constants.NV_DRBDLIST: None,
|
937 |
} |
938 |
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
|
939 |
self.cfg.GetClusterName())
|
940 |
|
941 |
cluster = self.cfg.GetClusterInfo()
|
942 |
master_node = self.cfg.GetMasterNode()
|
943 |
all_drbd_map = self.cfg.ComputeDRBDMap()
|
944 |
|
945 |
for node_i in nodeinfo: |
946 |
node = node_i.name |
947 |
nresult = all_nvinfo[node].data |
948 |
|
949 |
if node_i.offline:
|
950 |
feedback_fn("* Skipping offline node %s" % (node,))
|
951 |
n_offline.append(node) |
952 |
continue
|
953 |
|
954 |
if node == master_node:
|
955 |
ntype = "master"
|
956 |
elif node_i.master_candidate:
|
957 |
ntype = "master candidate"
|
958 |
else:
|
959 |
ntype = "regular"
|
960 |
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
|
961 |
|
962 |
if all_nvinfo[node].failed or not isinstance(nresult, dict): |
963 |
feedback_fn(" - ERROR: connection to %s failed" % (node,))
|
964 |
bad = True
|
965 |
continue
|
966 |
|
967 |
node_drbd = {} |
968 |
for minor, instance in all_drbd_map[node].items(): |
969 |
instance = instanceinfo[instance] |
970 |
node_drbd[minor] = (instance.name, instance.admin_up) |
971 |
result = self._VerifyNode(node_i, file_names, local_checksums,
|
972 |
nresult, feedback_fn, master_files, |
973 |
node_drbd) |
974 |
bad = bad or result
|
975 |
|
976 |
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
|
977 |
if isinstance(lvdata, basestring): |
978 |
feedback_fn(" - ERROR: LVM problem on node %s: %s" %
|
979 |
(node, utils.SafeEncode(lvdata))) |
980 |
bad = True
|
981 |
node_volume[node] = {} |
982 |
elif not isinstance(lvdata, dict): |
983 |
feedback_fn(" - ERROR: connection to %s failed (lvlist)" % (node,))
|
984 |
bad = True
|
985 |
continue
|
986 |
else:
|
987 |
node_volume[node] = lvdata |
988 |
|
989 |
# node_instance
|
990 |
idata = nresult.get(constants.NV_INSTANCELIST, None)
|
991 |
if not isinstance(idata, list): |
992 |
feedback_fn(" - ERROR: connection to %s failed (instancelist)" %
|
993 |
(node,)) |
994 |
bad = True
|
995 |
continue
|
996 |
|
997 |
node_instance[node] = idata |
998 |
|
999 |
# node_info
|
1000 |
nodeinfo = nresult.get(constants.NV_HVINFO, None)
|
1001 |
if not isinstance(nodeinfo, dict): |
1002 |
feedback_fn(" - ERROR: connection to %s failed (hvinfo)" % (node,))
|
1003 |
bad = True
|
1004 |
continue
|
1005 |
|
1006 |
try:
|
1007 |
node_info[node] = { |
1008 |
"mfree": int(nodeinfo['memory_free']), |
1009 |
"dfree": int(nresult[constants.NV_VGLIST][vg_name]), |
1010 |
"pinst": [],
|
1011 |
"sinst": [],
|
1012 |
# dictionary holding all instances this node is secondary for,
|
1013 |
# grouped by their primary node. Each key is a cluster node, and each
|
1014 |
# value is a list of instances which have the key as primary and the
|
1015 |
# current node as secondary. this is handy to calculate N+1 memory
|
1016 |
# availability if you can only failover from a primary to its
|
1017 |
# secondary.
|
1018 |
"sinst-by-pnode": {},
|
1019 |
} |
1020 |
except ValueError: |
1021 |
feedback_fn(" - ERROR: invalid value returned from node %s" % (node,))
|
1022 |
bad = True
|
1023 |
continue
|
1024 |
|
1025 |
node_vol_should = {} |
1026 |
|
1027 |
for instance in instancelist: |
1028 |
feedback_fn("* Verifying instance %s" % instance)
|
1029 |
inst_config = instanceinfo[instance] |
1030 |
result = self._VerifyInstance(instance, inst_config, node_volume,
|
1031 |
node_instance, feedback_fn, n_offline) |
1032 |
bad = bad or result
|
1033 |
inst_nodes_offline = [] |
1034 |
|
1035 |
inst_config.MapLVsByNode(node_vol_should) |
1036 |
|
1037 |
instance_cfg[instance] = inst_config |
1038 |
|
1039 |
pnode = inst_config.primary_node |
1040 |
if pnode in node_info: |
1041 |
node_info[pnode]['pinst'].append(instance)
|
1042 |
elif pnode not in n_offline: |
1043 |
feedback_fn(" - ERROR: instance %s, connection to primary node"
|
1044 |
" %s failed" % (instance, pnode))
|
1045 |
bad = True
|
1046 |
|
1047 |
if pnode in n_offline: |
1048 |
inst_nodes_offline.append(pnode) |
1049 |
|
1050 |
# If the instance is non-redundant we cannot survive losing its primary
|
1051 |
# node, so we are not N+1 compliant. On the other hand we have no disk
|
1052 |
# templates with more than one secondary so that situation is not well
|
1053 |
# supported either.
|
1054 |
# FIXME: does not support file-backed instances
|
1055 |
if len(inst_config.secondary_nodes) == 0: |
1056 |
i_non_redundant.append(instance) |
1057 |
elif len(inst_config.secondary_nodes) > 1: |
1058 |
feedback_fn(" - WARNING: multiple secondaries for instance %s"
|
1059 |
% instance) |
1060 |
|
1061 |
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]: |
1062 |
i_non_a_balanced.append(instance) |
1063 |
|
1064 |
for snode in inst_config.secondary_nodes: |
1065 |
if snode in node_info: |
1066 |
node_info[snode]['sinst'].append(instance)
|
1067 |
if pnode not in node_info[snode]['sinst-by-pnode']: |
1068 |
node_info[snode]['sinst-by-pnode'][pnode] = []
|
1069 |
node_info[snode]['sinst-by-pnode'][pnode].append(instance)
|
1070 |
elif snode not in n_offline: |
1071 |
feedback_fn(" - ERROR: instance %s, connection to secondary node"
|
1072 |
" %s failed" % (instance, snode))
|
1073 |
bad = True
|
1074 |
if snode in n_offline: |
1075 |
inst_nodes_offline.append(snode) |
1076 |
|
1077 |
if inst_nodes_offline:
|
1078 |
# warn that the instance lives on offline nodes, and set bad=True
|
1079 |
feedback_fn(" - ERROR: instance lives on offline node(s) %s" %
|
1080 |
", ".join(inst_nodes_offline))
|
1081 |
bad = True
|
1082 |
|
1083 |
feedback_fn("* Verifying orphan volumes")
|
1084 |
result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
|
1085 |
feedback_fn) |
1086 |
bad = bad or result
|
1087 |
|
1088 |
feedback_fn("* Verifying remaining instances")
|
1089 |
result = self._VerifyOrphanInstances(instancelist, node_instance,
|
1090 |
feedback_fn) |
1091 |
bad = bad or result
|
1092 |
|
1093 |
if constants.VERIFY_NPLUSONE_MEM not in self.skip_set: |
1094 |
feedback_fn("* Verifying N+1 Memory redundancy")
|
1095 |
result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
|
1096 |
bad = bad or result
|
1097 |
|
1098 |
feedback_fn("* Other Notes")
|
1099 |
if i_non_redundant:
|
1100 |
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
|
1101 |
% len(i_non_redundant))
|
1102 |
|
1103 |
if i_non_a_balanced:
|
1104 |
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
|
1105 |
% len(i_non_a_balanced))
|
1106 |
|
1107 |
if n_offline:
|
1108 |
feedback_fn(" - NOTICE: %d offline node(s) found." % len(n_offline)) |
1109 |
|
1110 |
return not bad |
1111 |
|
1112 |
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): |
1113 |
"""Analize the post-hooks' result
|
1114 |
|
1115 |
This method analyses the hook result, handles it, and sends some
|
1116 |
nicely-formatted feedback back to the user.
|
1117 |
|
1118 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
1119 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
1120 |
@param hooks_results: the results of the multi-node hooks rpc call
|
1121 |
@param feedback_fn: function used send feedback back to the caller
|
1122 |
@param lu_result: previous Exec result
|
1123 |
@return: the new Exec result, based on the previous result
|
1124 |
and hook results
|
1125 |
|
1126 |
"""
|
1127 |
# We only really run POST phase hooks, and are only interested in
|
1128 |
# their results
|
1129 |
if phase == constants.HOOKS_PHASE_POST:
|
1130 |
# Used to change hooks' output to proper indentation
|
1131 |
indent_re = re.compile('^', re.M)
|
1132 |
feedback_fn("* Hooks Results")
|
1133 |
if not hooks_results: |
1134 |
feedback_fn(" - ERROR: general communication failure")
|
1135 |
lu_result = 1
|
1136 |
else:
|
1137 |
for node_name in hooks_results: |
1138 |
show_node_header = True
|
1139 |
res = hooks_results[node_name] |
1140 |
if res.failed or res.data is False or not isinstance(res.data, list): |
1141 |
if res.offline:
|
1142 |
# no need to warn or set fail return value
|
1143 |
continue
|
1144 |
feedback_fn(" Communication failure in hooks execution")
|
1145 |
lu_result = 1
|
1146 |
continue
|
1147 |
for script, hkr, output in res.data: |
1148 |
if hkr == constants.HKR_FAIL:
|
1149 |
# The node header is only shown once, if there are
|
1150 |
# failing hooks on that node
|
1151 |
if show_node_header:
|
1152 |
feedback_fn(" Node %s:" % node_name)
|
1153 |
show_node_header = False
|
1154 |
feedback_fn(" ERROR: Script %s failed, output:" % script)
|
1155 |
output = indent_re.sub(' ', output)
|
1156 |
feedback_fn("%s" % output)
|
1157 |
lu_result = 1
|
1158 |
|
1159 |
return lu_result
|
1160 |
|
1161 |
|
1162 |
class LUVerifyDisks(NoHooksLU): |
1163 |
"""Verifies the cluster disks status.
|
1164 |
|
1165 |
"""
|
1166 |
_OP_REQP = [] |
1167 |
REQ_BGL = False
|
1168 |
|
1169 |
def ExpandNames(self): |
1170 |
self.needed_locks = {
|
1171 |
locking.LEVEL_NODE: locking.ALL_SET, |
1172 |
locking.LEVEL_INSTANCE: locking.ALL_SET, |
1173 |
} |
1174 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
1175 |
|
1176 |
def CheckPrereq(self): |
1177 |
"""Check prerequisites.
|
1178 |
|
1179 |
This has no prerequisites.
|
1180 |
|
1181 |
"""
|
1182 |
pass
|
1183 |
|
1184 |
def Exec(self, feedback_fn): |
1185 |
"""Verify integrity of cluster disks.
|
1186 |
|
1187 |
"""
|
1188 |
result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {} |
1189 |
|
1190 |
vg_name = self.cfg.GetVGName()
|
1191 |
nodes = utils.NiceSort(self.cfg.GetNodeList())
|
1192 |
instances = [self.cfg.GetInstanceInfo(name)
|
1193 |
for name in self.cfg.GetInstanceList()] |
1194 |
|
1195 |
nv_dict = {} |
1196 |
for inst in instances: |
1197 |
inst_lvs = {} |
1198 |
if (not inst.admin_up or |
1199 |
inst.disk_template not in constants.DTS_NET_MIRROR): |
1200 |
continue
|
1201 |
inst.MapLVsByNode(inst_lvs) |
1202 |
# transform { iname: {node: [vol,],},} to {(node, vol): iname}
|
1203 |
for node, vol_list in inst_lvs.iteritems(): |
1204 |
for vol in vol_list: |
1205 |
nv_dict[(node, vol)] = inst |
1206 |
|
1207 |
if not nv_dict: |
1208 |
return result
|
1209 |
|
1210 |
node_lvs = self.rpc.call_volume_list(nodes, vg_name)
|
1211 |
|
1212 |
to_act = set()
|
1213 |
for node in nodes: |
1214 |
# node_volume
|
1215 |
lvs = node_lvs[node] |
1216 |
if lvs.failed:
|
1217 |
if not lvs.offline: |
1218 |
self.LogWarning("Connection to node %s failed: %s" % |
1219 |
(node, lvs.data)) |
1220 |
continue
|
1221 |
lvs = lvs.data |
1222 |
if isinstance(lvs, basestring): |
1223 |
logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
|
1224 |
res_nlvm[node] = lvs |
1225 |
elif not isinstance(lvs, dict): |
1226 |
logging.warning("Connection to node %s failed or invalid data"
|
1227 |
" returned", node)
|
1228 |
res_nodes.append(node) |
1229 |
continue
|
1230 |
|
1231 |
for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems(): |
1232 |
inst = nv_dict.pop((node, lv_name), None)
|
1233 |
if (not lv_online and inst is not None |
1234 |
and inst.name not in res_instances): |
1235 |
res_instances.append(inst.name) |
1236 |
|
1237 |
# any leftover items in nv_dict are missing LVs, let's arrange the
|
1238 |
# data better
|
1239 |
for key, inst in nv_dict.iteritems(): |
1240 |
if inst.name not in res_missing: |
1241 |
res_missing[inst.name] = [] |
1242 |
res_missing[inst.name].append(key) |
1243 |
|
1244 |
return result
|
1245 |
|
1246 |
|
1247 |
class LURenameCluster(LogicalUnit): |
1248 |
"""Rename the cluster.
|
1249 |
|
1250 |
"""
|
1251 |
HPATH = "cluster-rename"
|
1252 |
HTYPE = constants.HTYPE_CLUSTER |
1253 |
_OP_REQP = ["name"]
|
1254 |
|
1255 |
def BuildHooksEnv(self): |
1256 |
"""Build hooks env.
|
1257 |
|
1258 |
"""
|
1259 |
env = { |
1260 |
"OP_TARGET": self.cfg.GetClusterName(), |
1261 |
"NEW_NAME": self.op.name, |
1262 |
} |
1263 |
mn = self.cfg.GetMasterNode()
|
1264 |
return env, [mn], [mn]
|
1265 |
|
1266 |
def CheckPrereq(self): |
1267 |
"""Verify that the passed name is a valid one.
|
1268 |
|
1269 |
"""
|
1270 |
hostname = utils.HostInfo(self.op.name)
|
1271 |
|
1272 |
new_name = hostname.name |
1273 |
self.ip = new_ip = hostname.ip
|
1274 |
old_name = self.cfg.GetClusterName()
|
1275 |
old_ip = self.cfg.GetMasterIP()
|
1276 |
if new_name == old_name and new_ip == old_ip: |
1277 |
raise errors.OpPrereqError("Neither the name nor the IP address of the" |
1278 |
" cluster has changed")
|
1279 |
if new_ip != old_ip:
|
1280 |
if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
|
1281 |
raise errors.OpPrereqError("The given cluster IP address (%s) is" |
1282 |
" reachable on the network. Aborting." %
|
1283 |
new_ip) |
1284 |
|
1285 |
self.op.name = new_name
|
1286 |
|
1287 |
def Exec(self, feedback_fn): |
1288 |
"""Rename the cluster.
|
1289 |
|
1290 |
"""
|
1291 |
clustername = self.op.name
|
1292 |
ip = self.ip
|
1293 |
|
1294 |
# shutdown the master IP
|
1295 |
master = self.cfg.GetMasterNode()
|
1296 |
result = self.rpc.call_node_stop_master(master, False) |
1297 |
if result.failed or not result.data: |
1298 |
raise errors.OpExecError("Could not disable the master role") |
1299 |
|
1300 |
try:
|
1301 |
cluster = self.cfg.GetClusterInfo()
|
1302 |
cluster.cluster_name = clustername |
1303 |
cluster.master_ip = ip |
1304 |
self.cfg.Update(cluster)
|
1305 |
|
1306 |
# update the known hosts file
|
1307 |
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
|
1308 |
node_list = self.cfg.GetNodeList()
|
1309 |
try:
|
1310 |
node_list.remove(master) |
1311 |
except ValueError: |
1312 |
pass
|
1313 |
result = self.rpc.call_upload_file(node_list,
|
1314 |
constants.SSH_KNOWN_HOSTS_FILE) |
1315 |
for to_node, to_result in result.iteritems(): |
1316 |
if to_result.failed or not to_result.data: |
1317 |
logging.error("Copy of file %s to node %s failed",
|
1318 |
constants.SSH_KNOWN_HOSTS_FILE, to_node) |
1319 |
|
1320 |
finally:
|
1321 |
result = self.rpc.call_node_start_master(master, False) |
1322 |
if result.failed or not result.data: |
1323 |
self.LogWarning("Could not re-enable the master role on" |
1324 |
" the master, please restart manually.")
|
1325 |
|
1326 |
|
1327 |
def _RecursiveCheckIfLVMBased(disk): |
1328 |
"""Check if the given disk or its children are lvm-based.
|
1329 |
|
1330 |
@type disk: L{objects.Disk}
|
1331 |
@param disk: the disk to check
|
1332 |
@rtype: booleean
|
1333 |
@return: boolean indicating whether a LD_LV dev_type was found or not
|
1334 |
|
1335 |
"""
|
1336 |
if disk.children:
|
1337 |
for chdisk in disk.children: |
1338 |
if _RecursiveCheckIfLVMBased(chdisk):
|
1339 |
return True |
1340 |
return disk.dev_type == constants.LD_LV
|
1341 |
|
1342 |
|
1343 |
class LUSetClusterParams(LogicalUnit): |
1344 |
"""Change the parameters of the cluster.
|
1345 |
|
1346 |
"""
|
1347 |
HPATH = "cluster-modify"
|
1348 |
HTYPE = constants.HTYPE_CLUSTER |
1349 |
_OP_REQP = [] |
1350 |
REQ_BGL = False
|
1351 |
|
1352 |
def CheckParameters(self): |
1353 |
"""Check parameters
|
1354 |
|
1355 |
"""
|
1356 |
if not hasattr(self.op, "candidate_pool_size"): |
1357 |
self.op.candidate_pool_size = None |
1358 |
if self.op.candidate_pool_size is not None: |
1359 |
try:
|
1360 |
self.op.candidate_pool_size = int(self.op.candidate_pool_size) |
1361 |
except ValueError, err: |
1362 |
raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" % |
1363 |
str(err))
|
1364 |
if self.op.candidate_pool_size < 1: |
1365 |
raise errors.OpPrereqError("At least one master candidate needed") |
1366 |
|
1367 |
def ExpandNames(self): |
1368 |
# FIXME: in the future maybe other cluster params won't require checking on
|
1369 |
# all nodes to be modified.
|
1370 |
self.needed_locks = {
|
1371 |
locking.LEVEL_NODE: locking.ALL_SET, |
1372 |
} |
1373 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1374 |
|
1375 |
def BuildHooksEnv(self): |
1376 |
"""Build hooks env.
|
1377 |
|
1378 |
"""
|
1379 |
env = { |
1380 |
"OP_TARGET": self.cfg.GetClusterName(), |
1381 |
"NEW_VG_NAME": self.op.vg_name, |
1382 |
} |
1383 |
mn = self.cfg.GetMasterNode()
|
1384 |
return env, [mn], [mn]
|
1385 |
|
1386 |
def CheckPrereq(self): |
1387 |
"""Check prerequisites.
|
1388 |
|
1389 |
This checks whether the given params don't conflict and
|
1390 |
if the given volume group is valid.
|
1391 |
|
1392 |
"""
|
1393 |
# FIXME: This only works because there is only one parameter that can be
|
1394 |
# changed or removed.
|
1395 |
if self.op.vg_name is not None and not self.op.vg_name: |
1396 |
instances = self.cfg.GetAllInstancesInfo().values()
|
1397 |
for inst in instances: |
1398 |
for disk in inst.disks: |
1399 |
if _RecursiveCheckIfLVMBased(disk):
|
1400 |
raise errors.OpPrereqError("Cannot disable lvm storage while" |
1401 |
" lvm-based instances exist")
|
1402 |
|
1403 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
1404 |
|
1405 |
# if vg_name not None, checks given volume group on all nodes
|
1406 |
if self.op.vg_name: |
1407 |
vglist = self.rpc.call_vg_list(node_list)
|
1408 |
for node in node_list: |
1409 |
if vglist[node].failed:
|
1410 |
# ignoring down node
|
1411 |
self.LogWarning("Node %s unreachable/error, ignoring" % node) |
1412 |
continue
|
1413 |
vgstatus = utils.CheckVolumeGroupSize(vglist[node].data, |
1414 |
self.op.vg_name,
|
1415 |
constants.MIN_VG_SIZE) |
1416 |
if vgstatus:
|
1417 |
raise errors.OpPrereqError("Error on node '%s': %s" % |
1418 |
(node, vgstatus)) |
1419 |
|
1420 |
self.cluster = cluster = self.cfg.GetClusterInfo() |
1421 |
# validate beparams changes
|
1422 |
if self.op.beparams: |
1423 |
utils.CheckBEParams(self.op.beparams)
|
1424 |
self.new_beparams = cluster.FillDict(
|
1425 |
cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
|
1426 |
|
1427 |
# hypervisor list/parameters
|
1428 |
self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
|
1429 |
if self.op.hvparams: |
1430 |
if not isinstance(self.op.hvparams, dict): |
1431 |
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input") |
1432 |
for hv_name, hv_dict in self.op.hvparams.items(): |
1433 |
if hv_name not in self.new_hvparams: |
1434 |
self.new_hvparams[hv_name] = hv_dict
|
1435 |
else:
|
1436 |
self.new_hvparams[hv_name].update(hv_dict)
|
1437 |
|
1438 |
if self.op.enabled_hypervisors is not None: |
1439 |
self.hv_list = self.op.enabled_hypervisors |
1440 |
else:
|
1441 |
self.hv_list = cluster.enabled_hypervisors
|
1442 |
|
1443 |
if self.op.hvparams or self.op.enabled_hypervisors is not None: |
1444 |
# either the enabled list has changed, or the parameters have, validate
|
1445 |
for hv_name, hv_params in self.new_hvparams.items(): |
1446 |
if ((self.op.hvparams and hv_name in self.op.hvparams) or |
1447 |
(self.op.enabled_hypervisors and |
1448 |
hv_name in self.op.enabled_hypervisors)): |
1449 |
# either this is a new hypervisor, or its parameters have changed
|
1450 |
hv_class = hypervisor.GetHypervisor(hv_name) |
1451 |
hv_class.CheckParameterSyntax(hv_params) |
1452 |
_CheckHVParams(self, node_list, hv_name, hv_params)
|
1453 |
|
1454 |
def Exec(self, feedback_fn): |
1455 |
"""Change the parameters of the cluster.
|
1456 |
|
1457 |
"""
|
1458 |
if self.op.vg_name is not None: |
1459 |
if self.op.vg_name != self.cfg.GetVGName(): |
1460 |
self.cfg.SetVGName(self.op.vg_name) |
1461 |
else:
|
1462 |
feedback_fn("Cluster LVM configuration already in desired"
|
1463 |
" state, not changing")
|
1464 |
if self.op.hvparams: |
1465 |
self.cluster.hvparams = self.new_hvparams |
1466 |
if self.op.enabled_hypervisors is not None: |
1467 |
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors |
1468 |
if self.op.beparams: |
1469 |
self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams |
1470 |
if self.op.candidate_pool_size is not None: |
1471 |
self.cluster.candidate_pool_size = self.op.candidate_pool_size |
1472 |
|
1473 |
self.cfg.Update(self.cluster) |
1474 |
|
1475 |
# we want to update nodes after the cluster so that if any errors
|
1476 |
# happen, we have recorded and saved the cluster info
|
1477 |
if self.op.candidate_pool_size is not None: |
1478 |
_AdjustCandidatePool(self)
|
1479 |
|
1480 |
|
1481 |
class LURedistributeConfig(NoHooksLU): |
1482 |
"""Force the redistribution of cluster configuration.
|
1483 |
|
1484 |
This is a very simple LU.
|
1485 |
|
1486 |
"""
|
1487 |
_OP_REQP = [] |
1488 |
REQ_BGL = False
|
1489 |
|
1490 |
def ExpandNames(self): |
1491 |
self.needed_locks = {
|
1492 |
locking.LEVEL_NODE: locking.ALL_SET, |
1493 |
} |
1494 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1495 |
|
1496 |
def CheckPrereq(self): |
1497 |
"""Check prerequisites.
|
1498 |
|
1499 |
"""
|
1500 |
|
1501 |
def Exec(self, feedback_fn): |
1502 |
"""Redistribute the configuration.
|
1503 |
|
1504 |
"""
|
1505 |
self.cfg.Update(self.cfg.GetClusterInfo()) |
1506 |
|
1507 |
|
1508 |
def _WaitForSync(lu, instance, oneshot=False, unlock=False): |
1509 |
"""Sleep and poll for an instance's disk to sync.
|
1510 |
|
1511 |
"""
|
1512 |
if not instance.disks: |
1513 |
return True |
1514 |
|
1515 |
if not oneshot: |
1516 |
lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
|
1517 |
|
1518 |
node = instance.primary_node |
1519 |
|
1520 |
for dev in instance.disks: |
1521 |
lu.cfg.SetDiskID(dev, node) |
1522 |
|
1523 |
retries = 0
|
1524 |
while True: |
1525 |
max_time = 0
|
1526 |
done = True
|
1527 |
cumul_degraded = False
|
1528 |
rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
1529 |
if rstats.failed or not rstats.data: |
1530 |
lu.LogWarning("Can't get any data from node %s", node)
|
1531 |
retries += 1
|
1532 |
if retries >= 10: |
1533 |
raise errors.RemoteError("Can't contact node %s for mirror data," |
1534 |
" aborting." % node)
|
1535 |
time.sleep(6)
|
1536 |
continue
|
1537 |
rstats = rstats.data |
1538 |
retries = 0
|
1539 |
for i, mstat in enumerate(rstats): |
1540 |
if mstat is None: |
1541 |
lu.LogWarning("Can't compute data for node %s/%s",
|
1542 |
node, instance.disks[i].iv_name) |
1543 |
continue
|
1544 |
# we ignore the ldisk parameter
|
1545 |
perc_done, est_time, is_degraded, _ = mstat |
1546 |
cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) |
1547 |
if perc_done is not None: |
1548 |
done = False
|
1549 |
if est_time is not None: |
1550 |
rem_time = "%d estimated seconds remaining" % est_time
|
1551 |
max_time = est_time |
1552 |
else:
|
1553 |
rem_time = "no time estimate"
|
1554 |
lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
|
1555 |
(instance.disks[i].iv_name, perc_done, rem_time)) |
1556 |
if done or oneshot: |
1557 |
break
|
1558 |
|
1559 |
time.sleep(min(60, max_time)) |
1560 |
|
1561 |
if done:
|
1562 |
lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
|
1563 |
return not cumul_degraded |
1564 |
|
1565 |
|
1566 |
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): |
1567 |
"""Check that mirrors are not degraded.
|
1568 |
|
1569 |
The ldisk parameter, if True, will change the test from the
|
1570 |
is_degraded attribute (which represents overall non-ok status for
|
1571 |
the device(s)) to the ldisk (representing the local storage status).
|
1572 |
|
1573 |
"""
|
1574 |
lu.cfg.SetDiskID(dev, node) |
1575 |
if ldisk:
|
1576 |
idx = 6
|
1577 |
else:
|
1578 |
idx = 5
|
1579 |
|
1580 |
result = True
|
1581 |
if on_primary or dev.AssembleOnSecondary(): |
1582 |
rstats = lu.rpc.call_blockdev_find(node, dev) |
1583 |
msg = rstats.RemoteFailMsg() |
1584 |
if msg:
|
1585 |
lu.LogWarning("Can't find disk on node %s: %s", node, msg)
|
1586 |
result = False
|
1587 |
elif not rstats.payload: |
1588 |
lu.LogWarning("Can't find disk on node %s", node)
|
1589 |
result = False
|
1590 |
else:
|
1591 |
result = result and (not rstats.payload[idx]) |
1592 |
if dev.children:
|
1593 |
for child in dev.children: |
1594 |
result = result and _CheckDiskConsistency(lu, child, node, on_primary)
|
1595 |
|
1596 |
return result
|
1597 |
|
1598 |
|
1599 |
class LUDiagnoseOS(NoHooksLU): |
1600 |
"""Logical unit for OS diagnose/query.
|
1601 |
|
1602 |
"""
|
1603 |
_OP_REQP = ["output_fields", "names"] |
1604 |
REQ_BGL = False
|
1605 |
_FIELDS_STATIC = utils.FieldSet() |
1606 |
_FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status") |
1607 |
|
1608 |
def ExpandNames(self): |
1609 |
if self.op.names: |
1610 |
raise errors.OpPrereqError("Selective OS query not supported") |
1611 |
|
1612 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1613 |
dynamic=self._FIELDS_DYNAMIC,
|
1614 |
selected=self.op.output_fields)
|
1615 |
|
1616 |
# Lock all nodes, in shared mode
|
1617 |
self.needed_locks = {}
|
1618 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1619 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1620 |
|
1621 |
def CheckPrereq(self): |
1622 |
"""Check prerequisites.
|
1623 |
|
1624 |
"""
|
1625 |
|
1626 |
@staticmethod
|
1627 |
def _DiagnoseByOS(node_list, rlist): |
1628 |
"""Remaps a per-node return list into an a per-os per-node dictionary
|
1629 |
|
1630 |
@param node_list: a list with the names of all nodes
|
1631 |
@param rlist: a map with node names as keys and OS objects as values
|
1632 |
|
1633 |
@rtype: dict
|
1634 |
@returns: a dictionary with osnames as keys and as value another map, with
|
1635 |
nodes as keys and list of OS objects as values, eg::
|
1636 |
|
1637 |
{"debian-etch": {"node1": [<object>,...],
|
1638 |
"node2": [<object>,]}
|
1639 |
}
|
1640 |
|
1641 |
"""
|
1642 |
all_os = {} |
1643 |
for node_name, nr in rlist.iteritems(): |
1644 |
if nr.failed or not nr.data: |
1645 |
continue
|
1646 |
for os_obj in nr.data: |
1647 |
if os_obj.name not in all_os: |
1648 |
# build a list of nodes for this os containing empty lists
|
1649 |
# for each node in node_list
|
1650 |
all_os[os_obj.name] = {} |
1651 |
for nname in node_list: |
1652 |
all_os[os_obj.name][nname] = [] |
1653 |
all_os[os_obj.name][node_name].append(os_obj) |
1654 |
return all_os
|
1655 |
|
1656 |
def Exec(self, feedback_fn): |
1657 |
"""Compute the list of OSes.
|
1658 |
|
1659 |
"""
|
1660 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
1661 |
valid_nodes = [node for node in self.cfg.GetOnlineNodeList() |
1662 |
if node in node_list] |
1663 |
node_data = self.rpc.call_os_diagnose(valid_nodes)
|
1664 |
if node_data == False: |
1665 |
raise errors.OpExecError("Can't gather the list of OSes") |
1666 |
pol = self._DiagnoseByOS(valid_nodes, node_data)
|
1667 |
output = [] |
1668 |
for os_name, os_data in pol.iteritems(): |
1669 |
row = [] |
1670 |
for field in self.op.output_fields: |
1671 |
if field == "name": |
1672 |
val = os_name |
1673 |
elif field == "valid": |
1674 |
val = utils.all([osl and osl[0] for osl in os_data.values()]) |
1675 |
elif field == "node_status": |
1676 |
val = {} |
1677 |
for node_name, nos_list in os_data.iteritems(): |
1678 |
val[node_name] = [(v.status, v.path) for v in nos_list] |
1679 |
else:
|
1680 |
raise errors.ParameterError(field)
|
1681 |
row.append(val) |
1682 |
output.append(row) |
1683 |
|
1684 |
return output
|
1685 |
|
1686 |
|
1687 |
class LURemoveNode(LogicalUnit): |
1688 |
"""Logical unit for removing a node.
|
1689 |
|
1690 |
"""
|
1691 |
HPATH = "node-remove"
|
1692 |
HTYPE = constants.HTYPE_NODE |
1693 |
_OP_REQP = ["node_name"]
|
1694 |
|
1695 |
def BuildHooksEnv(self): |
1696 |
"""Build hooks env.
|
1697 |
|
1698 |
This doesn't run on the target node in the pre phase as a failed
|
1699 |
node would then be impossible to remove.
|
1700 |
|
1701 |
"""
|
1702 |
env = { |
1703 |
"OP_TARGET": self.op.node_name, |
1704 |
"NODE_NAME": self.op.node_name, |
1705 |
} |
1706 |
all_nodes = self.cfg.GetNodeList()
|
1707 |
all_nodes.remove(self.op.node_name)
|
1708 |
return env, all_nodes, all_nodes
|
1709 |
|
1710 |
def CheckPrereq(self): |
1711 |
"""Check prerequisites.
|
1712 |
|
1713 |
This checks:
|
1714 |
- the node exists in the configuration
|
1715 |
- it does not have primary or secondary instances
|
1716 |
- it's not the master
|
1717 |
|
1718 |
Any errors are signalled by raising errors.OpPrereqError.
|
1719 |
|
1720 |
"""
|
1721 |
node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) |
1722 |
if node is None: |
1723 |
raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name) |
1724 |
|
1725 |
instance_list = self.cfg.GetInstanceList()
|
1726 |
|
1727 |
masternode = self.cfg.GetMasterNode()
|
1728 |
if node.name == masternode:
|
1729 |
raise errors.OpPrereqError("Node is the master node," |
1730 |
" you need to failover first.")
|
1731 |
|
1732 |
for instance_name in instance_list: |
1733 |
instance = self.cfg.GetInstanceInfo(instance_name)
|
1734 |
if node.name in instance.all_nodes: |
1735 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
1736 |
" please remove first." % instance_name)
|
1737 |
self.op.node_name = node.name
|
1738 |
self.node = node
|
1739 |
|
1740 |
def Exec(self, feedback_fn): |
1741 |
"""Removes the node from the cluster.
|
1742 |
|
1743 |
"""
|
1744 |
node = self.node
|
1745 |
logging.info("Stopping the node daemon and removing configs from node %s",
|
1746 |
node.name) |
1747 |
|
1748 |
self.context.RemoveNode(node.name)
|
1749 |
|
1750 |
self.rpc.call_node_leave_cluster(node.name)
|
1751 |
|
1752 |
# Promote nodes to master candidate as needed
|
1753 |
_AdjustCandidatePool(self)
|
1754 |
|
1755 |
|
1756 |
class LUQueryNodes(NoHooksLU): |
1757 |
"""Logical unit for querying nodes.
|
1758 |
|
1759 |
"""
|
1760 |
_OP_REQP = ["output_fields", "names", "use_locking"] |
1761 |
REQ_BGL = False
|
1762 |
_FIELDS_DYNAMIC = utils.FieldSet( |
1763 |
"dtotal", "dfree", |
1764 |
"mtotal", "mnode", "mfree", |
1765 |
"bootid",
|
1766 |
"ctotal", "cnodes", "csockets", |
1767 |
) |
1768 |
|
1769 |
_FIELDS_STATIC = utils.FieldSet( |
1770 |
"name", "pinst_cnt", "sinst_cnt", |
1771 |
"pinst_list", "sinst_list", |
1772 |
"pip", "sip", "tags", |
1773 |
"serial_no",
|
1774 |
"master_candidate",
|
1775 |
"master",
|
1776 |
"offline",
|
1777 |
) |
1778 |
|
1779 |
def ExpandNames(self): |
1780 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1781 |
dynamic=self._FIELDS_DYNAMIC,
|
1782 |
selected=self.op.output_fields)
|
1783 |
|
1784 |
self.needed_locks = {}
|
1785 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1786 |
|
1787 |
if self.op.names: |
1788 |
self.wanted = _GetWantedNodes(self, self.op.names) |
1789 |
else:
|
1790 |
self.wanted = locking.ALL_SET
|
1791 |
|
1792 |
self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
1793 |
self.do_locking = self.do_node_query and self.op.use_locking |
1794 |
if self.do_locking: |
1795 |
# if we don't request only static fields, we need to lock the nodes
|
1796 |
self.needed_locks[locking.LEVEL_NODE] = self.wanted |
1797 |
|
1798 |
|
1799 |
def CheckPrereq(self): |
1800 |
"""Check prerequisites.
|
1801 |
|
1802 |
"""
|
1803 |
# The validation of the node list is done in the _GetWantedNodes,
|
1804 |
# if non empty, and if empty, there's no validation to do
|
1805 |
pass
|
1806 |
|
1807 |
def Exec(self, feedback_fn): |
1808 |
"""Computes the list of nodes and their attributes.
|
1809 |
|
1810 |
"""
|
1811 |
all_info = self.cfg.GetAllNodesInfo()
|
1812 |
if self.do_locking: |
1813 |
nodenames = self.acquired_locks[locking.LEVEL_NODE]
|
1814 |
elif self.wanted != locking.ALL_SET: |
1815 |
nodenames = self.wanted
|
1816 |
missing = set(nodenames).difference(all_info.keys())
|
1817 |
if missing:
|
1818 |
raise errors.OpExecError(
|
1819 |
"Some nodes were removed before retrieving their data: %s" % missing)
|
1820 |
else:
|
1821 |
nodenames = all_info.keys() |
1822 |
|
1823 |
nodenames = utils.NiceSort(nodenames) |
1824 |
nodelist = [all_info[name] for name in nodenames] |
1825 |
|
1826 |
# begin data gathering
|
1827 |
|
1828 |
if self.do_node_query: |
1829 |
live_data = {} |
1830 |
node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
1831 |
self.cfg.GetHypervisorType())
|
1832 |
for name in nodenames: |
1833 |
nodeinfo = node_data[name] |
1834 |
if not nodeinfo.failed and nodeinfo.data: |
1835 |
nodeinfo = nodeinfo.data |
1836 |
fn = utils.TryConvert |
1837 |
live_data[name] = { |
1838 |
"mtotal": fn(int, nodeinfo.get('memory_total', None)), |
1839 |
"mnode": fn(int, nodeinfo.get('memory_dom0', None)), |
1840 |
"mfree": fn(int, nodeinfo.get('memory_free', None)), |
1841 |
"dtotal": fn(int, nodeinfo.get('vg_size', None)), |
1842 |
"dfree": fn(int, nodeinfo.get('vg_free', None)), |
1843 |
"ctotal": fn(int, nodeinfo.get('cpu_total', None)), |
1844 |
"bootid": nodeinfo.get('bootid', None), |
1845 |
"cnodes": fn(int, nodeinfo.get('cpu_nodes', None)), |
1846 |
"csockets": fn(int, nodeinfo.get('cpu_sockets', None)), |
1847 |
} |
1848 |
else:
|
1849 |
live_data[name] = {} |
1850 |
else:
|
1851 |
live_data = dict.fromkeys(nodenames, {})
|
1852 |
|
1853 |
node_to_primary = dict([(name, set()) for name in nodenames]) |
1854 |
node_to_secondary = dict([(name, set()) for name in nodenames]) |
1855 |
|
1856 |
inst_fields = frozenset(("pinst_cnt", "pinst_list", |
1857 |
"sinst_cnt", "sinst_list")) |
1858 |
if inst_fields & frozenset(self.op.output_fields): |
1859 |
instancelist = self.cfg.GetInstanceList()
|
1860 |
|
1861 |
for instance_name in instancelist: |
1862 |
inst = self.cfg.GetInstanceInfo(instance_name)
|
1863 |
if inst.primary_node in node_to_primary: |
1864 |
node_to_primary[inst.primary_node].add(inst.name) |
1865 |
for secnode in inst.secondary_nodes: |
1866 |
if secnode in node_to_secondary: |
1867 |
node_to_secondary[secnode].add(inst.name) |
1868 |
|
1869 |
master_node = self.cfg.GetMasterNode()
|
1870 |
|
1871 |
# end data gathering
|
1872 |
|
1873 |
output = [] |
1874 |
for node in nodelist: |
1875 |
node_output = [] |
1876 |
for field in self.op.output_fields: |
1877 |
if field == "name": |
1878 |
val = node.name |
1879 |
elif field == "pinst_list": |
1880 |
val = list(node_to_primary[node.name])
|
1881 |
elif field == "sinst_list": |
1882 |
val = list(node_to_secondary[node.name])
|
1883 |
elif field == "pinst_cnt": |
1884 |
val = len(node_to_primary[node.name])
|
1885 |
elif field == "sinst_cnt": |
1886 |
val = len(node_to_secondary[node.name])
|
1887 |
elif field == "pip": |
1888 |
val = node.primary_ip |
1889 |
elif field == "sip": |
1890 |
val = node.secondary_ip |
1891 |
elif field == "tags": |
1892 |
val = list(node.GetTags())
|
1893 |
elif field == "serial_no": |
1894 |
val = node.serial_no |
1895 |
elif field == "master_candidate": |
1896 |
val = node.master_candidate |
1897 |
elif field == "master": |
1898 |
val = node.name == master_node |
1899 |
elif field == "offline": |
1900 |
val = node.offline |
1901 |
elif self._FIELDS_DYNAMIC.Matches(field): |
1902 |
val = live_data[node.name].get(field, None)
|
1903 |
else:
|
1904 |
raise errors.ParameterError(field)
|
1905 |
node_output.append(val) |
1906 |
output.append(node_output) |
1907 |
|
1908 |
return output
|
1909 |
|
1910 |
|
1911 |
class LUQueryNodeVolumes(NoHooksLU): |
1912 |
"""Logical unit for getting volumes on node(s).
|
1913 |
|
1914 |
"""
|
1915 |
_OP_REQP = ["nodes", "output_fields"] |
1916 |
REQ_BGL = False
|
1917 |
_FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance") |
1918 |
_FIELDS_STATIC = utils.FieldSet("node")
|
1919 |
|
1920 |
def ExpandNames(self): |
1921 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
1922 |
dynamic=self._FIELDS_DYNAMIC,
|
1923 |
selected=self.op.output_fields)
|
1924 |
|
1925 |
self.needed_locks = {}
|
1926 |
self.share_locks[locking.LEVEL_NODE] = 1 |
1927 |
if not self.op.nodes: |
1928 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1929 |
else:
|
1930 |
self.needed_locks[locking.LEVEL_NODE] = \
|
1931 |
_GetWantedNodes(self, self.op.nodes) |
1932 |
|
1933 |
def CheckPrereq(self): |
1934 |
"""Check prerequisites.
|
1935 |
|
1936 |
This checks that the fields required are valid output fields.
|
1937 |
|
1938 |
"""
|
1939 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
1940 |
|
1941 |
def Exec(self, feedback_fn): |
1942 |
"""Computes the list of nodes and their attributes.
|
1943 |
|
1944 |
"""
|
1945 |
nodenames = self.nodes
|
1946 |
volumes = self.rpc.call_node_volumes(nodenames)
|
1947 |
|
1948 |
ilist = [self.cfg.GetInstanceInfo(iname) for iname |
1949 |
in self.cfg.GetInstanceList()] |
1950 |
|
1951 |
lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist]) |
1952 |
|
1953 |
output = [] |
1954 |
for node in nodenames: |
1955 |
if node not in volumes or volumes[node].failed or not volumes[node].data: |
1956 |
continue
|
1957 |
|
1958 |
node_vols = volumes[node].data[:] |
1959 |
node_vols.sort(key=lambda vol: vol['dev']) |
1960 |
|
1961 |
for vol in node_vols: |
1962 |
node_output = [] |
1963 |
for field in self.op.output_fields: |
1964 |
if field == "node": |
1965 |
val = node |
1966 |
elif field == "phys": |
1967 |
val = vol['dev']
|
1968 |
elif field == "vg": |
1969 |
val = vol['vg']
|
1970 |
elif field == "name": |
1971 |
val = vol['name']
|
1972 |
elif field == "size": |
1973 |
val = int(float(vol['size'])) |
1974 |
elif field == "instance": |
1975 |
for inst in ilist: |
1976 |
if node not in lv_by_node[inst]: |
1977 |
continue
|
1978 |
if vol['name'] in lv_by_node[inst][node]: |
1979 |
val = inst.name |
1980 |
break
|
1981 |
else:
|
1982 |
val = '-'
|
1983 |
else:
|
1984 |
raise errors.ParameterError(field)
|
1985 |
node_output.append(str(val))
|
1986 |
|
1987 |
output.append(node_output) |
1988 |
|
1989 |
return output
|
1990 |
|
1991 |
|
1992 |
class LUAddNode(LogicalUnit): |
1993 |
"""Logical unit for adding node to the cluster.
|
1994 |
|
1995 |
"""
|
1996 |
HPATH = "node-add"
|
1997 |
HTYPE = constants.HTYPE_NODE |
1998 |
_OP_REQP = ["node_name"]
|
1999 |
|
2000 |
def BuildHooksEnv(self): |
2001 |
"""Build hooks env.
|
2002 |
|
2003 |
This will run on all nodes before, and on all nodes + the new node after.
|
2004 |
|
2005 |
"""
|
2006 |
env = { |
2007 |
"OP_TARGET": self.op.node_name, |
2008 |
"NODE_NAME": self.op.node_name, |
2009 |
"NODE_PIP": self.op.primary_ip, |
2010 |
"NODE_SIP": self.op.secondary_ip, |
2011 |
} |
2012 |
nodes_0 = self.cfg.GetNodeList()
|
2013 |
nodes_1 = nodes_0 + [self.op.node_name, ]
|
2014 |
return env, nodes_0, nodes_1
|
2015 |
|
2016 |
def CheckPrereq(self): |
2017 |
"""Check prerequisites.
|
2018 |
|
2019 |
This checks:
|
2020 |
- the new node is not already in the config
|
2021 |
- it is resolvable
|
2022 |
- its parameters (single/dual homed) matches the cluster
|
2023 |
|
2024 |
Any errors are signalled by raising errors.OpPrereqError.
|
2025 |
|
2026 |
"""
|
2027 |
node_name = self.op.node_name
|
2028 |
cfg = self.cfg
|
2029 |
|
2030 |
dns_data = utils.HostInfo(node_name) |
2031 |
|
2032 |
node = dns_data.name |
2033 |
primary_ip = self.op.primary_ip = dns_data.ip
|
2034 |
secondary_ip = getattr(self.op, "secondary_ip", None) |
2035 |
if secondary_ip is None: |
2036 |
secondary_ip = primary_ip |
2037 |
if not utils.IsValidIP(secondary_ip): |
2038 |
raise errors.OpPrereqError("Invalid secondary IP given") |
2039 |
self.op.secondary_ip = secondary_ip
|
2040 |
|
2041 |
node_list = cfg.GetNodeList() |
2042 |
if not self.op.readd and node in node_list: |
2043 |
raise errors.OpPrereqError("Node %s is already in the configuration" % |
2044 |
node) |
2045 |
elif self.op.readd and node not in node_list: |
2046 |
raise errors.OpPrereqError("Node %s is not in the configuration" % node) |
2047 |
|
2048 |
for existing_node_name in node_list: |
2049 |
existing_node = cfg.GetNodeInfo(existing_node_name) |
2050 |
|
2051 |
if self.op.readd and node == existing_node_name: |
2052 |
if (existing_node.primary_ip != primary_ip or |
2053 |
existing_node.secondary_ip != secondary_ip): |
2054 |
raise errors.OpPrereqError("Readded node doesn't have the same IP" |
2055 |
" address configuration as before")
|
2056 |
continue
|
2057 |
|
2058 |
if (existing_node.primary_ip == primary_ip or |
2059 |
existing_node.secondary_ip == primary_ip or
|
2060 |
existing_node.primary_ip == secondary_ip or
|
2061 |
existing_node.secondary_ip == secondary_ip): |
2062 |
raise errors.OpPrereqError("New node ip address(es) conflict with" |
2063 |
" existing node %s" % existing_node.name)
|
2064 |
|
2065 |
# check that the type of the node (single versus dual homed) is the
|
2066 |
# same as for the master
|
2067 |
myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
|
2068 |
master_singlehomed = myself.secondary_ip == myself.primary_ip |
2069 |
newbie_singlehomed = secondary_ip == primary_ip |
2070 |
if master_singlehomed != newbie_singlehomed:
|
2071 |
if master_singlehomed:
|
2072 |
raise errors.OpPrereqError("The master has no private ip but the" |
2073 |
" new node has one")
|
2074 |
else:
|
2075 |
raise errors.OpPrereqError("The master has a private ip but the" |
2076 |
" new node doesn't have one")
|
2077 |
|
2078 |
# checks reachablity
|
2079 |
if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT): |
2080 |
raise errors.OpPrereqError("Node not reachable by ping") |
2081 |
|
2082 |
if not newbie_singlehomed: |
2083 |
# check reachability from my secondary ip to newbie's secondary ip
|
2084 |
if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT, |
2085 |
source=myself.secondary_ip): |
2086 |
raise errors.OpPrereqError("Node secondary ip not reachable by TCP" |
2087 |
" based ping to noded port")
|
2088 |
|
2089 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
|
2090 |
mc_now, _ = self.cfg.GetMasterCandidateStats()
|
2091 |
master_candidate = mc_now < cp_size |
2092 |
|
2093 |
self.new_node = objects.Node(name=node,
|
2094 |
primary_ip=primary_ip, |
2095 |
secondary_ip=secondary_ip, |
2096 |
master_candidate=master_candidate, |
2097 |
offline=False)
|
2098 |
|
2099 |
def Exec(self, feedback_fn): |
2100 |
"""Adds the new node to the cluster.
|
2101 |
|
2102 |
"""
|
2103 |
new_node = self.new_node
|
2104 |
node = new_node.name |
2105 |
|
2106 |
# check connectivity
|
2107 |
result = self.rpc.call_version([node])[node]
|
2108 |
result.Raise() |
2109 |
if result.data:
|
2110 |
if constants.PROTOCOL_VERSION == result.data:
|
2111 |
logging.info("Communication to node %s fine, sw version %s match",
|
2112 |
node, result.data) |
2113 |
else:
|
2114 |
raise errors.OpExecError("Version mismatch master version %s," |
2115 |
" node version %s" %
|
2116 |
(constants.PROTOCOL_VERSION, result.data)) |
2117 |
else:
|
2118 |
raise errors.OpExecError("Cannot get version from the new node") |
2119 |
|
2120 |
# setup ssh on node
|
2121 |
logging.info("Copy ssh key to node %s", node)
|
2122 |
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS) |
2123 |
keyarray = [] |
2124 |
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB, |
2125 |
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB, |
2126 |
priv_key, pub_key] |
2127 |
|
2128 |
for i in keyfiles: |
2129 |
f = open(i, 'r') |
2130 |
try:
|
2131 |
keyarray.append(f.read()) |
2132 |
finally:
|
2133 |
f.close() |
2134 |
|
2135 |
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1], |
2136 |
keyarray[2],
|
2137 |
keyarray[3], keyarray[4], keyarray[5]) |
2138 |
|
2139 |
msg = result.RemoteFailMsg() |
2140 |
if msg:
|
2141 |
raise errors.OpExecError("Cannot transfer ssh keys to the" |
2142 |
" new node: %s" % msg)
|
2143 |
|
2144 |
# Add node to our /etc/hosts, and add key to known_hosts
|
2145 |
utils.AddHostToEtcHosts(new_node.name) |
2146 |
|
2147 |
if new_node.secondary_ip != new_node.primary_ip:
|
2148 |
result = self.rpc.call_node_has_ip_address(new_node.name,
|
2149 |
new_node.secondary_ip) |
2150 |
if result.failed or not result.data: |
2151 |
raise errors.OpExecError("Node claims it doesn't have the secondary ip" |
2152 |
" you gave (%s). Please fix and re-run this"
|
2153 |
" command." % new_node.secondary_ip)
|
2154 |
|
2155 |
node_verify_list = [self.cfg.GetMasterNode()]
|
2156 |
node_verify_param = { |
2157 |
'nodelist': [node],
|
2158 |
# TODO: do a node-net-test as well?
|
2159 |
} |
2160 |
|
2161 |
result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
|
2162 |
self.cfg.GetClusterName())
|
2163 |
for verifier in node_verify_list: |
2164 |
if result[verifier].failed or not result[verifier].data: |
2165 |
raise errors.OpExecError("Cannot communicate with %s's node daemon" |
2166 |
" for remote verification" % verifier)
|
2167 |
if result[verifier].data['nodelist']: |
2168 |
for failed in result[verifier].data['nodelist']: |
2169 |
feedback_fn("ssh/hostname verification failed %s -> %s" %
|
2170 |
(verifier, result[verifier].data['nodelist'][failed]))
|
2171 |
raise errors.OpExecError("ssh/hostname verification failed.") |
2172 |
|
2173 |
# Distribute updated /etc/hosts and known_hosts to all nodes,
|
2174 |
# including the node just added
|
2175 |
myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode()) |
2176 |
dist_nodes = self.cfg.GetNodeList()
|
2177 |
if not self.op.readd: |
2178 |
dist_nodes.append(node) |
2179 |
if myself.name in dist_nodes: |
2180 |
dist_nodes.remove(myself.name) |
2181 |
|
2182 |
logging.debug("Copying hosts and known_hosts to all nodes")
|
2183 |
for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE): |
2184 |
result = self.rpc.call_upload_file(dist_nodes, fname)
|
2185 |
for to_node, to_result in result.iteritems(): |
2186 |
if to_result.failed or not to_result.data: |
2187 |
logging.error("Copy of file %s to node %s failed", fname, to_node)
|
2188 |
|
2189 |
to_copy = [] |
2190 |
enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
|
2191 |
if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
|
2192 |
to_copy.append(constants.VNC_PASSWORD_FILE) |
2193 |
|
2194 |
for fname in to_copy: |
2195 |
result = self.rpc.call_upload_file([node], fname)
|
2196 |
if result[node].failed or not result[node]: |
2197 |
logging.error("Could not copy file %s to node %s", fname, node)
|
2198 |
|
2199 |
if self.op.readd: |
2200 |
self.context.ReaddNode(new_node)
|
2201 |
else:
|
2202 |
self.context.AddNode(new_node)
|
2203 |
|
2204 |
|
2205 |
class LUSetNodeParams(LogicalUnit): |
2206 |
"""Modifies the parameters of a node.
|
2207 |
|
2208 |
"""
|
2209 |
HPATH = "node-modify"
|
2210 |
HTYPE = constants.HTYPE_NODE |
2211 |
_OP_REQP = ["node_name"]
|
2212 |
REQ_BGL = False
|
2213 |
|
2214 |
def CheckArguments(self): |
2215 |
node_name = self.cfg.ExpandNodeName(self.op.node_name) |
2216 |
if node_name is None: |
2217 |
raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name) |
2218 |
self.op.node_name = node_name
|
2219 |
_CheckBooleanOpField(self.op, 'master_candidate') |
2220 |
_CheckBooleanOpField(self.op, 'offline') |
2221 |
if self.op.master_candidate is None and self.op.offline is None: |
2222 |
raise errors.OpPrereqError("Please pass at least one modification") |
2223 |
if self.op.offline == True and self.op.master_candidate == True: |
2224 |
raise errors.OpPrereqError("Can't set the node into offline and" |
2225 |
" master_candidate at the same time")
|
2226 |
|
2227 |
def ExpandNames(self): |
2228 |
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name} |
2229 |
|
2230 |
def BuildHooksEnv(self): |
2231 |
"""Build hooks env.
|
2232 |
|
2233 |
This runs on the master node.
|
2234 |
|
2235 |
"""
|
2236 |
env = { |
2237 |
"OP_TARGET": self.op.node_name, |
2238 |
"MASTER_CANDIDATE": str(self.op.master_candidate), |
2239 |
"OFFLINE": str(self.op.offline), |
2240 |
} |
2241 |
nl = [self.cfg.GetMasterNode(),
|
2242 |
self.op.node_name]
|
2243 |
return env, nl, nl
|
2244 |
|
2245 |
def CheckPrereq(self): |
2246 |
"""Check prerequisites.
|
2247 |
|
2248 |
This only checks the instance list against the existing names.
|
2249 |
|
2250 |
"""
|
2251 |
node = self.node = self.cfg.GetNodeInfo(self.op.node_name) |
2252 |
|
2253 |
if ((self.op.master_candidate == False or self.op.offline == True) |
2254 |
and node.master_candidate):
|
2255 |
# we will demote the node from master_candidate
|
2256 |
if self.op.node_name == self.cfg.GetMasterNode(): |
2257 |
raise errors.OpPrereqError("The master node has to be a" |
2258 |
" master candidate and online")
|
2259 |
cp_size = self.cfg.GetClusterInfo().candidate_pool_size
|
2260 |
num_candidates, _ = self.cfg.GetMasterCandidateStats()
|
2261 |
if num_candidates <= cp_size:
|
2262 |
msg = ("Not enough master candidates (desired"
|
2263 |
" %d, new value will be %d)" % (cp_size, num_candidates-1)) |
2264 |
if self.op.force: |
2265 |
self.LogWarning(msg)
|
2266 |
else:
|
2267 |
raise errors.OpPrereqError(msg)
|
2268 |
|
2269 |
if (self.op.master_candidate == True and node.offline and |
2270 |
not self.op.offline == False): |
2271 |
raise errors.OpPrereqError("Can't set an offline node to" |
2272 |
" master_candidate")
|
2273 |
|
2274 |
return
|
2275 |
|
2276 |
def Exec(self, feedback_fn): |
2277 |
"""Modifies a node.
|
2278 |
|
2279 |
"""
|
2280 |
node = self.node
|
2281 |
|
2282 |
result = [] |
2283 |
|
2284 |
if self.op.offline is not None: |
2285 |
node.offline = self.op.offline
|
2286 |
result.append(("offline", str(self.op.offline))) |
2287 |
if self.op.offline == True and node.master_candidate: |
2288 |
node.master_candidate = False
|
2289 |
result.append(("master_candidate", "auto-demotion due to offline")) |
2290 |
|
2291 |
if self.op.master_candidate is not None: |
2292 |
node.master_candidate = self.op.master_candidate
|
2293 |
result.append(("master_candidate", str(self.op.master_candidate))) |
2294 |
if self.op.master_candidate == False: |
2295 |
rrc = self.rpc.call_node_demote_from_mc(node.name)
|
2296 |
msg = rrc.RemoteFailMsg() |
2297 |
if msg:
|
2298 |
self.LogWarning("Node failed to demote itself: %s" % msg) |
2299 |
|
2300 |
# this will trigger configuration file update, if needed
|
2301 |
self.cfg.Update(node)
|
2302 |
# this will trigger job queue propagation or cleanup
|
2303 |
if self.op.node_name != self.cfg.GetMasterNode(): |
2304 |
self.context.ReaddNode(node)
|
2305 |
|
2306 |
return result
|
2307 |
|
2308 |
|
2309 |
class LUQueryClusterInfo(NoHooksLU): |
2310 |
"""Query cluster configuration.
|
2311 |
|
2312 |
"""
|
2313 |
_OP_REQP = [] |
2314 |
REQ_BGL = False
|
2315 |
|
2316 |
def ExpandNames(self): |
2317 |
self.needed_locks = {}
|
2318 |
|
2319 |
def CheckPrereq(self): |
2320 |
"""No prerequsites needed for this LU.
|
2321 |
|
2322 |
"""
|
2323 |
pass
|
2324 |
|
2325 |
def Exec(self, feedback_fn): |
2326 |
"""Return cluster config.
|
2327 |
|
2328 |
"""
|
2329 |
cluster = self.cfg.GetClusterInfo()
|
2330 |
result = { |
2331 |
"software_version": constants.RELEASE_VERSION,
|
2332 |
"protocol_version": constants.PROTOCOL_VERSION,
|
2333 |
"config_version": constants.CONFIG_VERSION,
|
2334 |
"os_api_version": constants.OS_API_VERSION,
|
2335 |
"export_version": constants.EXPORT_VERSION,
|
2336 |
"architecture": (platform.architecture()[0], platform.machine()), |
2337 |
"name": cluster.cluster_name,
|
2338 |
"master": cluster.master_node,
|
2339 |
"default_hypervisor": cluster.default_hypervisor,
|
2340 |
"enabled_hypervisors": cluster.enabled_hypervisors,
|
2341 |
"hvparams": dict([(hypervisor, cluster.hvparams[hypervisor]) |
2342 |
for hypervisor in cluster.enabled_hypervisors]), |
2343 |
"beparams": cluster.beparams,
|
2344 |
"candidate_pool_size": cluster.candidate_pool_size,
|
2345 |
} |
2346 |
|
2347 |
return result
|
2348 |
|
2349 |
|
2350 |
class LUQueryConfigValues(NoHooksLU): |
2351 |
"""Return configuration values.
|
2352 |
|
2353 |
"""
|
2354 |
_OP_REQP = [] |
2355 |
REQ_BGL = False
|
2356 |
_FIELDS_DYNAMIC = utils.FieldSet() |
2357 |
_FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag") |
2358 |
|
2359 |
def ExpandNames(self): |
2360 |
self.needed_locks = {}
|
2361 |
|
2362 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
2363 |
dynamic=self._FIELDS_DYNAMIC,
|
2364 |
selected=self.op.output_fields)
|
2365 |
|
2366 |
def CheckPrereq(self): |
2367 |
"""No prerequisites.
|
2368 |
|
2369 |
"""
|
2370 |
pass
|
2371 |
|
2372 |
def Exec(self, feedback_fn): |
2373 |
"""Dump a representation of the cluster config to the standard output.
|
2374 |
|
2375 |
"""
|
2376 |
values = [] |
2377 |
for field in self.op.output_fields: |
2378 |
if field == "cluster_name": |
2379 |
entry = self.cfg.GetClusterName()
|
2380 |
elif field == "master_node": |
2381 |
entry = self.cfg.GetMasterNode()
|
2382 |
elif field == "drain_flag": |
2383 |
entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE) |
2384 |
else:
|
2385 |
raise errors.ParameterError(field)
|
2386 |
values.append(entry) |
2387 |
return values
|
2388 |
|
2389 |
|
2390 |
class LUActivateInstanceDisks(NoHooksLU): |
2391 |
"""Bring up an instance's disks.
|
2392 |
|
2393 |
"""
|
2394 |
_OP_REQP = ["instance_name"]
|
2395 |
REQ_BGL = False
|
2396 |
|
2397 |
def ExpandNames(self): |
2398 |
self._ExpandAndLockInstance()
|
2399 |
self.needed_locks[locking.LEVEL_NODE] = []
|
2400 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2401 |
|
2402 |
def DeclareLocks(self, level): |
2403 |
if level == locking.LEVEL_NODE:
|
2404 |
self._LockInstancesNodes()
|
2405 |
|
2406 |
def CheckPrereq(self): |
2407 |
"""Check prerequisites.
|
2408 |
|
2409 |
This checks that the instance is in the cluster.
|
2410 |
|
2411 |
"""
|
2412 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2413 |
assert self.instance is not None, \ |
2414 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2415 |
_CheckNodeOnline(self, self.instance.primary_node) |
2416 |
|
2417 |
def Exec(self, feedback_fn): |
2418 |
"""Activate the disks.
|
2419 |
|
2420 |
"""
|
2421 |
disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance) |
2422 |
if not disks_ok: |
2423 |
raise errors.OpExecError("Cannot activate block devices") |
2424 |
|
2425 |
return disks_info
|
2426 |
|
2427 |
|
2428 |
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False): |
2429 |
"""Prepare the block devices for an instance.
|
2430 |
|
2431 |
This sets up the block devices on all nodes.
|
2432 |
|
2433 |
@type lu: L{LogicalUnit}
|
2434 |
@param lu: the logical unit on whose behalf we execute
|
2435 |
@type instance: L{objects.Instance}
|
2436 |
@param instance: the instance for whose disks we assemble
|
2437 |
@type ignore_secondaries: boolean
|
2438 |
@param ignore_secondaries: if true, errors on secondary nodes
|
2439 |
won't result in an error return from the function
|
2440 |
@return: False if the operation failed, otherwise a list of
|
2441 |
(host, instance_visible_name, node_visible_name)
|
2442 |
with the mapping from node devices to instance devices
|
2443 |
|
2444 |
"""
|
2445 |
device_info = [] |
2446 |
disks_ok = True
|
2447 |
iname = instance.name |
2448 |
# With the two passes mechanism we try to reduce the window of
|
2449 |
# opportunity for the race condition of switching DRBD to primary
|
2450 |
# before handshaking occured, but we do not eliminate it
|
2451 |
|
2452 |
# The proper fix would be to wait (with some limits) until the
|
2453 |
# connection has been made and drbd transitions from WFConnection
|
2454 |
# into any other network-connected state (Connected, SyncTarget,
|
2455 |
# SyncSource, etc.)
|
2456 |
|
2457 |
# 1st pass, assemble on all nodes in secondary mode
|
2458 |
for inst_disk in instance.disks: |
2459 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
2460 |
lu.cfg.SetDiskID(node_disk, node) |
2461 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
|
2462 |
if result.failed or not result: |
2463 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
2464 |
" (is_primary=False, pass=1)",
|
2465 |
inst_disk.iv_name, node) |
2466 |
if not ignore_secondaries: |
2467 |
disks_ok = False
|
2468 |
|
2469 |
# FIXME: race condition on drbd migration to primary
|
2470 |
|
2471 |
# 2nd pass, do only the primary node
|
2472 |
for inst_disk in instance.disks: |
2473 |
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
2474 |
if node != instance.primary_node:
|
2475 |
continue
|
2476 |
lu.cfg.SetDiskID(node_disk, node) |
2477 |
result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
|
2478 |
if result.failed or not result: |
2479 |
lu.proc.LogWarning("Could not prepare block device %s on node %s"
|
2480 |
" (is_primary=True, pass=2)",
|
2481 |
inst_disk.iv_name, node) |
2482 |
disks_ok = False
|
2483 |
device_info.append((instance.primary_node, inst_disk.iv_name, result.data)) |
2484 |
|
2485 |
# leave the disks configured for the primary node
|
2486 |
# this is a workaround that would be fixed better by
|
2487 |
# improving the logical/physical id handling
|
2488 |
for disk in instance.disks: |
2489 |
lu.cfg.SetDiskID(disk, instance.primary_node) |
2490 |
|
2491 |
return disks_ok, device_info
|
2492 |
|
2493 |
|
2494 |
def _StartInstanceDisks(lu, instance, force): |
2495 |
"""Start the disks of an instance.
|
2496 |
|
2497 |
"""
|
2498 |
disks_ok, dummy = _AssembleInstanceDisks(lu, instance, |
2499 |
ignore_secondaries=force) |
2500 |
if not disks_ok: |
2501 |
_ShutdownInstanceDisks(lu, instance) |
2502 |
if force is not None and not force: |
2503 |
lu.proc.LogWarning("", hint="If the message above refers to a" |
2504 |
" secondary node,"
|
2505 |
" you can retry the operation using '--force'.")
|
2506 |
raise errors.OpExecError("Disk consistency error") |
2507 |
|
2508 |
|
2509 |
class LUDeactivateInstanceDisks(NoHooksLU): |
2510 |
"""Shutdown an instance's disks.
|
2511 |
|
2512 |
"""
|
2513 |
_OP_REQP = ["instance_name"]
|
2514 |
REQ_BGL = False
|
2515 |
|
2516 |
def ExpandNames(self): |
2517 |
self._ExpandAndLockInstance()
|
2518 |
self.needed_locks[locking.LEVEL_NODE] = []
|
2519 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
2520 |
|
2521 |
def DeclareLocks(self, level): |
2522 |
if level == locking.LEVEL_NODE:
|
2523 |
self._LockInstancesNodes()
|
2524 |
|
2525 |
def CheckPrereq(self): |
2526 |
"""Check prerequisites.
|
2527 |
|
2528 |
This checks that the instance is in the cluster.
|
2529 |
|
2530 |
"""
|
2531 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2532 |
assert self.instance is not None, \ |
2533 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2534 |
|
2535 |
def Exec(self, feedback_fn): |
2536 |
"""Deactivate the disks
|
2537 |
|
2538 |
"""
|
2539 |
instance = self.instance
|
2540 |
_SafeShutdownInstanceDisks(self, instance)
|
2541 |
|
2542 |
|
2543 |
def _SafeShutdownInstanceDisks(lu, instance): |
2544 |
"""Shutdown block devices of an instance.
|
2545 |
|
2546 |
This function checks if an instance is running, before calling
|
2547 |
_ShutdownInstanceDisks.
|
2548 |
|
2549 |
"""
|
2550 |
ins_l = lu.rpc.call_instance_list([instance.primary_node], |
2551 |
[instance.hypervisor]) |
2552 |
ins_l = ins_l[instance.primary_node] |
2553 |
if ins_l.failed or not isinstance(ins_l.data, list): |
2554 |
raise errors.OpExecError("Can't contact node '%s'" % |
2555 |
instance.primary_node) |
2556 |
|
2557 |
if instance.name in ins_l.data: |
2558 |
raise errors.OpExecError("Instance is running, can't shutdown" |
2559 |
" block devices.")
|
2560 |
|
2561 |
_ShutdownInstanceDisks(lu, instance) |
2562 |
|
2563 |
|
2564 |
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False): |
2565 |
"""Shutdown block devices of an instance.
|
2566 |
|
2567 |
This does the shutdown on all nodes of the instance.
|
2568 |
|
2569 |
If the ignore_primary is false, errors on the primary node are
|
2570 |
ignored.
|
2571 |
|
2572 |
"""
|
2573 |
result = True
|
2574 |
for disk in instance.disks: |
2575 |
for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
2576 |
lu.cfg.SetDiskID(top_disk, node) |
2577 |
result = lu.rpc.call_blockdev_shutdown(node, top_disk) |
2578 |
if result.failed or not result.data: |
2579 |
logging.error("Could not shutdown block device %s on node %s",
|
2580 |
disk.iv_name, node) |
2581 |
if not ignore_primary or node != instance.primary_node: |
2582 |
result = False
|
2583 |
return result
|
2584 |
|
2585 |
|
2586 |
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): |
2587 |
"""Checks if a node has enough free memory.
|
2588 |
|
2589 |
This function check if a given node has the needed amount of free
|
2590 |
memory. In case the node has less memory or we cannot get the
|
2591 |
information from the node, this function raise an OpPrereqError
|
2592 |
exception.
|
2593 |
|
2594 |
@type lu: C{LogicalUnit}
|
2595 |
@param lu: a logical unit from which we get configuration data
|
2596 |
@type node: C{str}
|
2597 |
@param node: the node to check
|
2598 |
@type reason: C{str}
|
2599 |
@param reason: string to use in the error message
|
2600 |
@type requested: C{int}
|
2601 |
@param requested: the amount of memory in MiB to check for
|
2602 |
@type hypervisor_name: C{str}
|
2603 |
@param hypervisor_name: the hypervisor to ask for memory stats
|
2604 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
|
2605 |
we cannot check the node
|
2606 |
|
2607 |
"""
|
2608 |
nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name) |
2609 |
nodeinfo[node].Raise() |
2610 |
free_mem = nodeinfo[node].data.get('memory_free')
|
2611 |
if not isinstance(free_mem, int): |
2612 |
raise errors.OpPrereqError("Can't compute free memory on node %s, result" |
2613 |
" was '%s'" % (node, free_mem))
|
2614 |
if requested > free_mem:
|
2615 |
raise errors.OpPrereqError("Not enough memory on node %s for %s:" |
2616 |
" needed %s MiB, available %s MiB" %
|
2617 |
(node, reason, requested, free_mem)) |
2618 |
|
2619 |
|
2620 |
class LUStartupInstance(LogicalUnit): |
2621 |
"""Starts an instance.
|
2622 |
|
2623 |
"""
|
2624 |
HPATH = "instance-start"
|
2625 |
HTYPE = constants.HTYPE_INSTANCE |
2626 |
_OP_REQP = ["instance_name", "force"] |
2627 |
REQ_BGL = False
|
2628 |
|
2629 |
def ExpandNames(self): |
2630 |
self._ExpandAndLockInstance()
|
2631 |
|
2632 |
def BuildHooksEnv(self): |
2633 |
"""Build hooks env.
|
2634 |
|
2635 |
This runs on master, primary and secondary nodes of the instance.
|
2636 |
|
2637 |
"""
|
2638 |
env = { |
2639 |
"FORCE": self.op.force, |
2640 |
} |
2641 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
2642 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2643 |
return env, nl, nl
|
2644 |
|
2645 |
def CheckPrereq(self): |
2646 |
"""Check prerequisites.
|
2647 |
|
2648 |
This checks that the instance is in the cluster.
|
2649 |
|
2650 |
"""
|
2651 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2652 |
assert self.instance is not None, \ |
2653 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2654 |
|
2655 |
_CheckNodeOnline(self, instance.primary_node)
|
2656 |
|
2657 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
2658 |
# check bridges existance
|
2659 |
_CheckInstanceBridgesExist(self, instance)
|
2660 |
|
2661 |
_CheckNodeFreeMemory(self, instance.primary_node,
|
2662 |
"starting instance %s" % instance.name,
|
2663 |
bep[constants.BE_MEMORY], instance.hypervisor) |
2664 |
|
2665 |
def Exec(self, feedback_fn): |
2666 |
"""Start the instance.
|
2667 |
|
2668 |
"""
|
2669 |
instance = self.instance
|
2670 |
force = self.op.force
|
2671 |
extra_args = getattr(self.op, "extra_args", "") |
2672 |
|
2673 |
self.cfg.MarkInstanceUp(instance.name)
|
2674 |
|
2675 |
node_current = instance.primary_node |
2676 |
|
2677 |
_StartInstanceDisks(self, instance, force)
|
2678 |
|
2679 |
result = self.rpc.call_instance_start(node_current, instance, extra_args)
|
2680 |
msg = result.RemoteFailMsg() |
2681 |
if msg:
|
2682 |
_ShutdownInstanceDisks(self, instance)
|
2683 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
2684 |
|
2685 |
|
2686 |
class LURebootInstance(LogicalUnit): |
2687 |
"""Reboot an instance.
|
2688 |
|
2689 |
"""
|
2690 |
HPATH = "instance-reboot"
|
2691 |
HTYPE = constants.HTYPE_INSTANCE |
2692 |
_OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"] |
2693 |
REQ_BGL = False
|
2694 |
|
2695 |
def ExpandNames(self): |
2696 |
if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT, |
2697 |
constants.INSTANCE_REBOOT_HARD, |
2698 |
constants.INSTANCE_REBOOT_FULL]: |
2699 |
raise errors.ParameterError("reboot type not in [%s, %s, %s]" % |
2700 |
(constants.INSTANCE_REBOOT_SOFT, |
2701 |
constants.INSTANCE_REBOOT_HARD, |
2702 |
constants.INSTANCE_REBOOT_FULL)) |
2703 |
self._ExpandAndLockInstance()
|
2704 |
|
2705 |
def BuildHooksEnv(self): |
2706 |
"""Build hooks env.
|
2707 |
|
2708 |
This runs on master, primary and secondary nodes of the instance.
|
2709 |
|
2710 |
"""
|
2711 |
env = { |
2712 |
"IGNORE_SECONDARIES": self.op.ignore_secondaries, |
2713 |
} |
2714 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
2715 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2716 |
return env, nl, nl
|
2717 |
|
2718 |
def CheckPrereq(self): |
2719 |
"""Check prerequisites.
|
2720 |
|
2721 |
This checks that the instance is in the cluster.
|
2722 |
|
2723 |
"""
|
2724 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2725 |
assert self.instance is not None, \ |
2726 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2727 |
|
2728 |
_CheckNodeOnline(self, instance.primary_node)
|
2729 |
|
2730 |
# check bridges existance
|
2731 |
_CheckInstanceBridgesExist(self, instance)
|
2732 |
|
2733 |
def Exec(self, feedback_fn): |
2734 |
"""Reboot the instance.
|
2735 |
|
2736 |
"""
|
2737 |
instance = self.instance
|
2738 |
ignore_secondaries = self.op.ignore_secondaries
|
2739 |
reboot_type = self.op.reboot_type
|
2740 |
extra_args = getattr(self.op, "extra_args", "") |
2741 |
|
2742 |
node_current = instance.primary_node |
2743 |
|
2744 |
if reboot_type in [constants.INSTANCE_REBOOT_SOFT, |
2745 |
constants.INSTANCE_REBOOT_HARD]: |
2746 |
result = self.rpc.call_instance_reboot(node_current, instance,
|
2747 |
reboot_type, extra_args) |
2748 |
if result.failed or not result.data: |
2749 |
raise errors.OpExecError("Could not reboot instance") |
2750 |
else:
|
2751 |
if not self.rpc.call_instance_shutdown(node_current, instance): |
2752 |
raise errors.OpExecError("could not shutdown instance for full reboot") |
2753 |
_ShutdownInstanceDisks(self, instance)
|
2754 |
_StartInstanceDisks(self, instance, ignore_secondaries)
|
2755 |
result = self.rpc.call_instance_start(node_current, instance, extra_args)
|
2756 |
msg = result.RemoteFailMsg() |
2757 |
if msg:
|
2758 |
_ShutdownInstanceDisks(self, instance)
|
2759 |
raise errors.OpExecError("Could not start instance for" |
2760 |
" full reboot: %s" % msg)
|
2761 |
|
2762 |
self.cfg.MarkInstanceUp(instance.name)
|
2763 |
|
2764 |
|
2765 |
class LUShutdownInstance(LogicalUnit): |
2766 |
"""Shutdown an instance.
|
2767 |
|
2768 |
"""
|
2769 |
HPATH = "instance-stop"
|
2770 |
HTYPE = constants.HTYPE_INSTANCE |
2771 |
_OP_REQP = ["instance_name"]
|
2772 |
REQ_BGL = False
|
2773 |
|
2774 |
def ExpandNames(self): |
2775 |
self._ExpandAndLockInstance()
|
2776 |
|
2777 |
def BuildHooksEnv(self): |
2778 |
"""Build hooks env.
|
2779 |
|
2780 |
This runs on master, primary and secondary nodes of the instance.
|
2781 |
|
2782 |
"""
|
2783 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2784 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2785 |
return env, nl, nl
|
2786 |
|
2787 |
def CheckPrereq(self): |
2788 |
"""Check prerequisites.
|
2789 |
|
2790 |
This checks that the instance is in the cluster.
|
2791 |
|
2792 |
"""
|
2793 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2794 |
assert self.instance is not None, \ |
2795 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2796 |
_CheckNodeOnline(self, self.instance.primary_node) |
2797 |
|
2798 |
def Exec(self, feedback_fn): |
2799 |
"""Shutdown the instance.
|
2800 |
|
2801 |
"""
|
2802 |
instance = self.instance
|
2803 |
node_current = instance.primary_node |
2804 |
self.cfg.MarkInstanceDown(instance.name)
|
2805 |
result = self.rpc.call_instance_shutdown(node_current, instance)
|
2806 |
if result.failed or not result.data: |
2807 |
self.proc.LogWarning("Could not shutdown instance") |
2808 |
|
2809 |
_ShutdownInstanceDisks(self, instance)
|
2810 |
|
2811 |
|
2812 |
class LUReinstallInstance(LogicalUnit): |
2813 |
"""Reinstall an instance.
|
2814 |
|
2815 |
"""
|
2816 |
HPATH = "instance-reinstall"
|
2817 |
HTYPE = constants.HTYPE_INSTANCE |
2818 |
_OP_REQP = ["instance_name"]
|
2819 |
REQ_BGL = False
|
2820 |
|
2821 |
def ExpandNames(self): |
2822 |
self._ExpandAndLockInstance()
|
2823 |
|
2824 |
def BuildHooksEnv(self): |
2825 |
"""Build hooks env.
|
2826 |
|
2827 |
This runs on master, primary and secondary nodes of the instance.
|
2828 |
|
2829 |
"""
|
2830 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2831 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2832 |
return env, nl, nl
|
2833 |
|
2834 |
def CheckPrereq(self): |
2835 |
"""Check prerequisites.
|
2836 |
|
2837 |
This checks that the instance is in the cluster and is not running.
|
2838 |
|
2839 |
"""
|
2840 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
2841 |
assert instance is not None, \ |
2842 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
2843 |
_CheckNodeOnline(self, instance.primary_node)
|
2844 |
|
2845 |
if instance.disk_template == constants.DT_DISKLESS:
|
2846 |
raise errors.OpPrereqError("Instance '%s' has no disks" % |
2847 |
self.op.instance_name)
|
2848 |
if instance.admin_up:
|
2849 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2850 |
self.op.instance_name)
|
2851 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
2852 |
instance.name, |
2853 |
instance.hypervisor) |
2854 |
if remote_info.failed or remote_info.data: |
2855 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2856 |
(self.op.instance_name,
|
2857 |
instance.primary_node)) |
2858 |
|
2859 |
self.op.os_type = getattr(self.op, "os_type", None) |
2860 |
if self.op.os_type is not None: |
2861 |
# OS verification
|
2862 |
pnode = self.cfg.GetNodeInfo(
|
2863 |
self.cfg.ExpandNodeName(instance.primary_node))
|
2864 |
if pnode is None: |
2865 |
raise errors.OpPrereqError("Primary node '%s' is unknown" % |
2866 |
self.op.pnode)
|
2867 |
result = self.rpc.call_os_get(pnode.name, self.op.os_type) |
2868 |
result.Raise() |
2869 |
if not isinstance(result.data, objects.OS): |
2870 |
raise errors.OpPrereqError("OS '%s' not in supported OS list for" |
2871 |
" primary node" % self.op.os_type) |
2872 |
|
2873 |
self.instance = instance
|
2874 |
|
2875 |
def Exec(self, feedback_fn): |
2876 |
"""Reinstall the instance.
|
2877 |
|
2878 |
"""
|
2879 |
inst = self.instance
|
2880 |
|
2881 |
if self.op.os_type is not None: |
2882 |
feedback_fn("Changing OS to '%s'..." % self.op.os_type) |
2883 |
inst.os = self.op.os_type
|
2884 |
self.cfg.Update(inst)
|
2885 |
|
2886 |
_StartInstanceDisks(self, inst, None) |
2887 |
try:
|
2888 |
feedback_fn("Running the instance OS create scripts...")
|
2889 |
result = self.rpc.call_instance_os_add(inst.primary_node, inst)
|
2890 |
msg = result.RemoteFailMsg() |
2891 |
if msg:
|
2892 |
raise errors.OpExecError("Could not install OS for instance %s" |
2893 |
" on node %s: %s" %
|
2894 |
(inst.name, inst.primary_node, msg)) |
2895 |
finally:
|
2896 |
_ShutdownInstanceDisks(self, inst)
|
2897 |
|
2898 |
|
2899 |
class LURenameInstance(LogicalUnit): |
2900 |
"""Rename an instance.
|
2901 |
|
2902 |
"""
|
2903 |
HPATH = "instance-rename"
|
2904 |
HTYPE = constants.HTYPE_INSTANCE |
2905 |
_OP_REQP = ["instance_name", "new_name"] |
2906 |
|
2907 |
def BuildHooksEnv(self): |
2908 |
"""Build hooks env.
|
2909 |
|
2910 |
This runs on master, primary and secondary nodes of the instance.
|
2911 |
|
2912 |
"""
|
2913 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
2914 |
env["INSTANCE_NEW_NAME"] = self.op.new_name |
2915 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
2916 |
return env, nl, nl
|
2917 |
|
2918 |
def CheckPrereq(self): |
2919 |
"""Check prerequisites.
|
2920 |
|
2921 |
This checks that the instance is in the cluster and is not running.
|
2922 |
|
2923 |
"""
|
2924 |
instance = self.cfg.GetInstanceInfo(
|
2925 |
self.cfg.ExpandInstanceName(self.op.instance_name)) |
2926 |
if instance is None: |
2927 |
raise errors.OpPrereqError("Instance '%s' not known" % |
2928 |
self.op.instance_name)
|
2929 |
_CheckNodeOnline(self, instance.primary_node)
|
2930 |
|
2931 |
if instance.admin_up:
|
2932 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2933 |
self.op.instance_name)
|
2934 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
2935 |
instance.name, |
2936 |
instance.hypervisor) |
2937 |
remote_info.Raise() |
2938 |
if remote_info.data:
|
2939 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2940 |
(self.op.instance_name,
|
2941 |
instance.primary_node)) |
2942 |
self.instance = instance
|
2943 |
|
2944 |
# new name verification
|
2945 |
name_info = utils.HostInfo(self.op.new_name)
|
2946 |
|
2947 |
self.op.new_name = new_name = name_info.name
|
2948 |
instance_list = self.cfg.GetInstanceList()
|
2949 |
if new_name in instance_list: |
2950 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
2951 |
new_name) |
2952 |
|
2953 |
if not getattr(self.op, "ignore_ip", False): |
2954 |
if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
|
2955 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
2956 |
(name_info.ip, new_name)) |
2957 |
|
2958 |
|
2959 |
def Exec(self, feedback_fn): |
2960 |
"""Reinstall the instance.
|
2961 |
|
2962 |
"""
|
2963 |
inst = self.instance
|
2964 |
old_name = inst.name |
2965 |
|
2966 |
if inst.disk_template == constants.DT_FILE:
|
2967 |
old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
2968 |
|
2969 |
self.cfg.RenameInstance(inst.name, self.op.new_name) |
2970 |
# Change the instance lock. This is definitely safe while we hold the BGL
|
2971 |
self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
|
2972 |
self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) |
2973 |
|
2974 |
# re-read the instance from the configuration after rename
|
2975 |
inst = self.cfg.GetInstanceInfo(self.op.new_name) |
2976 |
|
2977 |
if inst.disk_template == constants.DT_FILE:
|
2978 |
new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1]) |
2979 |
result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
|
2980 |
old_file_storage_dir, |
2981 |
new_file_storage_dir) |
2982 |
result.Raise() |
2983 |
if not result.data: |
2984 |
raise errors.OpExecError("Could not connect to node '%s' to rename" |
2985 |
" directory '%s' to '%s' (but the instance"
|
2986 |
" has been renamed in Ganeti)" % (
|
2987 |
inst.primary_node, old_file_storage_dir, |
2988 |
new_file_storage_dir)) |
2989 |
|
2990 |
if not result.data[0]: |
2991 |
raise errors.OpExecError("Could not rename directory '%s' to '%s'" |
2992 |
" (but the instance has been renamed in"
|
2993 |
" Ganeti)" % (old_file_storage_dir,
|
2994 |
new_file_storage_dir)) |
2995 |
|
2996 |
_StartInstanceDisks(self, inst, None) |
2997 |
try:
|
2998 |
result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
|
2999 |
old_name) |
3000 |
msg = result.RemoteFailMsg() |
3001 |
if msg:
|
3002 |
msg = ("Could not run OS rename script for instance %s on node %s"
|
3003 |
" (but the instance has been renamed in Ganeti): %s" %
|
3004 |
(inst.name, inst.primary_node, msg)) |
3005 |
self.proc.LogWarning(msg)
|
3006 |
finally:
|
3007 |
_ShutdownInstanceDisks(self, inst)
|
3008 |
|
3009 |
|
3010 |
class LURemoveInstance(LogicalUnit): |
3011 |
"""Remove an instance.
|
3012 |
|
3013 |
"""
|
3014 |
HPATH = "instance-remove"
|
3015 |
HTYPE = constants.HTYPE_INSTANCE |
3016 |
_OP_REQP = ["instance_name", "ignore_failures"] |
3017 |
REQ_BGL = False
|
3018 |
|
3019 |
def ExpandNames(self): |
3020 |
self._ExpandAndLockInstance()
|
3021 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3022 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3023 |
|
3024 |
def DeclareLocks(self, level): |
3025 |
if level == locking.LEVEL_NODE:
|
3026 |
self._LockInstancesNodes()
|
3027 |
|
3028 |
def BuildHooksEnv(self): |
3029 |
"""Build hooks env.
|
3030 |
|
3031 |
This runs on master, primary and secondary nodes of the instance.
|
3032 |
|
3033 |
"""
|
3034 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
3035 |
nl = [self.cfg.GetMasterNode()]
|
3036 |
return env, nl, nl
|
3037 |
|
3038 |
def CheckPrereq(self): |
3039 |
"""Check prerequisites.
|
3040 |
|
3041 |
This checks that the instance is in the cluster.
|
3042 |
|
3043 |
"""
|
3044 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3045 |
assert self.instance is not None, \ |
3046 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3047 |
|
3048 |
def Exec(self, feedback_fn): |
3049 |
"""Remove the instance.
|
3050 |
|
3051 |
"""
|
3052 |
instance = self.instance
|
3053 |
logging.info("Shutting down instance %s on node %s",
|
3054 |
instance.name, instance.primary_node) |
3055 |
|
3056 |
result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
|
3057 |
if result.failed or not result.data: |
3058 |
if self.op.ignore_failures: |
3059 |
feedback_fn("Warning: can't shutdown instance")
|
3060 |
else:
|
3061 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
3062 |
(instance.name, instance.primary_node)) |
3063 |
|
3064 |
logging.info("Removing block devices for instance %s", instance.name)
|
3065 |
|
3066 |
if not _RemoveDisks(self, instance): |
3067 |
if self.op.ignore_failures: |
3068 |
feedback_fn("Warning: can't remove instance's disks")
|
3069 |
else:
|
3070 |
raise errors.OpExecError("Can't remove instance's disks") |
3071 |
|
3072 |
logging.info("Removing instance %s out of cluster config", instance.name)
|
3073 |
|
3074 |
self.cfg.RemoveInstance(instance.name)
|
3075 |
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
|
3076 |
|
3077 |
|
3078 |
class LUQueryInstances(NoHooksLU): |
3079 |
"""Logical unit for querying instances.
|
3080 |
|
3081 |
"""
|
3082 |
_OP_REQP = ["output_fields", "names", "use_locking"] |
3083 |
REQ_BGL = False
|
3084 |
_FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes", |
3085 |
"admin_state", "admin_ram", |
3086 |
"disk_template", "ip", "mac", "bridge", |
3087 |
"sda_size", "sdb_size", "vcpus", "tags", |
3088 |
"network_port", "beparams", |
3089 |
"(disk).(size)/([0-9]+)",
|
3090 |
"(disk).(sizes)", "disk_usage", |
3091 |
"(nic).(mac|ip|bridge)/([0-9]+)",
|
3092 |
"(nic).(macs|ips|bridges)",
|
3093 |
"(disk|nic).(count)",
|
3094 |
"serial_no", "hypervisor", "hvparams",] + |
3095 |
["hv/%s" % name
|
3096 |
for name in constants.HVS_PARAMETERS] + |
3097 |
["be/%s" % name
|
3098 |
for name in constants.BES_PARAMETERS]) |
3099 |
_FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status") |
3100 |
|
3101 |
|
3102 |
def ExpandNames(self): |
3103 |
_CheckOutputFields(static=self._FIELDS_STATIC,
|
3104 |
dynamic=self._FIELDS_DYNAMIC,
|
3105 |
selected=self.op.output_fields)
|
3106 |
|
3107 |
self.needed_locks = {}
|
3108 |
self.share_locks[locking.LEVEL_INSTANCE] = 1 |
3109 |
self.share_locks[locking.LEVEL_NODE] = 1 |
3110 |
|
3111 |
if self.op.names: |
3112 |
self.wanted = _GetWantedInstances(self, self.op.names) |
3113 |
else:
|
3114 |
self.wanted = locking.ALL_SET
|
3115 |
|
3116 |
self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields) |
3117 |
self.do_locking = self.do_node_query and self.op.use_locking |
3118 |
if self.do_locking: |
3119 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted |
3120 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3121 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3122 |
|
3123 |
def DeclareLocks(self, level): |
3124 |
if level == locking.LEVEL_NODE and self.do_locking: |
3125 |
self._LockInstancesNodes()
|
3126 |
|
3127 |
def CheckPrereq(self): |
3128 |
"""Check prerequisites.
|
3129 |
|
3130 |
"""
|
3131 |
pass
|
3132 |
|
3133 |
def Exec(self, feedback_fn): |
3134 |
"""Computes the list of nodes and their attributes.
|
3135 |
|
3136 |
"""
|
3137 |
all_info = self.cfg.GetAllInstancesInfo()
|
3138 |
if self.wanted == locking.ALL_SET: |
3139 |
# caller didn't specify instance names, so ordering is not important
|
3140 |
if self.do_locking: |
3141 |
instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
|
3142 |
else:
|
3143 |
instance_names = all_info.keys() |
3144 |
instance_names = utils.NiceSort(instance_names) |
3145 |
else:
|
3146 |
# caller did specify names, so we must keep the ordering
|
3147 |
if self.do_locking: |
3148 |
tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
|
3149 |
else:
|
3150 |
tgt_set = all_info.keys() |
3151 |
missing = set(self.wanted).difference(tgt_set) |
3152 |
if missing:
|
3153 |
raise errors.OpExecError("Some instances were removed before" |
3154 |
" retrieving their data: %s" % missing)
|
3155 |
instance_names = self.wanted
|
3156 |
|
3157 |
instance_list = [all_info[iname] for iname in instance_names] |
3158 |
|
3159 |
# begin data gathering
|
3160 |
|
3161 |
nodes = frozenset([inst.primary_node for inst in instance_list]) |
3162 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
3163 |
|
3164 |
bad_nodes = [] |
3165 |
off_nodes = [] |
3166 |
if self.do_node_query: |
3167 |
live_data = {} |
3168 |
node_data = self.rpc.call_all_instances_info(nodes, hv_list)
|
3169 |
for name in nodes: |
3170 |
result = node_data[name] |
3171 |
if result.offline:
|
3172 |
# offline nodes will be in both lists
|
3173 |
off_nodes.append(name) |
3174 |
if result.failed:
|
3175 |
bad_nodes.append(name) |
3176 |
else:
|
3177 |
if result.data:
|
3178 |
live_data.update(result.data) |
3179 |
# else no instance is alive
|
3180 |
else:
|
3181 |
live_data = dict([(name, {}) for name in instance_names]) |
3182 |
|
3183 |
# end data gathering
|
3184 |
|
3185 |
HVPREFIX = "hv/"
|
3186 |
BEPREFIX = "be/"
|
3187 |
output = [] |
3188 |
for instance in instance_list: |
3189 |
iout = [] |
3190 |
i_hv = self.cfg.GetClusterInfo().FillHV(instance)
|
3191 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
3192 |
for field in self.op.output_fields: |
3193 |
st_match = self._FIELDS_STATIC.Matches(field)
|
3194 |
if field == "name": |
3195 |
val = instance.name |
3196 |
elif field == "os": |
3197 |
val = instance.os |
3198 |
elif field == "pnode": |
3199 |
val = instance.primary_node |
3200 |
elif field == "snodes": |
3201 |
val = list(instance.secondary_nodes)
|
3202 |
elif field == "admin_state": |
3203 |
val = instance.admin_up |
3204 |
elif field == "oper_state": |
3205 |
if instance.primary_node in bad_nodes: |
3206 |
val = None
|
3207 |
else:
|
3208 |
val = bool(live_data.get(instance.name))
|
3209 |
elif field == "status": |
3210 |
if instance.primary_node in off_nodes: |
3211 |
val = "ERROR_nodeoffline"
|
3212 |
elif instance.primary_node in bad_nodes: |
3213 |
val = "ERROR_nodedown"
|
3214 |
else:
|
3215 |
running = bool(live_data.get(instance.name))
|
3216 |
if running:
|
3217 |
if instance.admin_up:
|
3218 |
val = "running"
|
3219 |
else:
|
3220 |
val = "ERROR_up"
|
3221 |
else:
|
3222 |
if instance.admin_up:
|
3223 |
val = "ERROR_down"
|
3224 |
else:
|
3225 |
val = "ADMIN_down"
|
3226 |
elif field == "oper_ram": |
3227 |
if instance.primary_node in bad_nodes: |
3228 |
val = None
|
3229 |
elif instance.name in live_data: |
3230 |
val = live_data[instance.name].get("memory", "?") |
3231 |
else:
|
3232 |
val = "-"
|
3233 |
elif field == "disk_template": |
3234 |
val = instance.disk_template |
3235 |
elif field == "ip": |
3236 |
val = instance.nics[0].ip
|
3237 |
elif field == "bridge": |
3238 |
val = instance.nics[0].bridge
|
3239 |
elif field == "mac": |
3240 |
val = instance.nics[0].mac
|
3241 |
elif field == "sda_size" or field == "sdb_size": |
3242 |
idx = ord(field[2]) - ord('a') |
3243 |
try:
|
3244 |
val = instance.FindDisk(idx).size |
3245 |
except errors.OpPrereqError:
|
3246 |
val = None
|
3247 |
elif field == "disk_usage": # total disk usage per node |
3248 |
disk_sizes = [{'size': disk.size} for disk in instance.disks] |
3249 |
val = _ComputeDiskSize(instance.disk_template, disk_sizes) |
3250 |
elif field == "tags": |
3251 |
val = list(instance.GetTags())
|
3252 |
elif field == "serial_no": |
3253 |
val = instance.serial_no |
3254 |
elif field == "network_port": |
3255 |
val = instance.network_port |
3256 |
elif field == "hypervisor": |
3257 |
val = instance.hypervisor |
3258 |
elif field == "hvparams": |
3259 |
val = i_hv |
3260 |
elif (field.startswith(HVPREFIX) and |
3261 |
field[len(HVPREFIX):] in constants.HVS_PARAMETERS): |
3262 |
val = i_hv.get(field[len(HVPREFIX):], None) |
3263 |
elif field == "beparams": |
3264 |
val = i_be |
3265 |
elif (field.startswith(BEPREFIX) and |
3266 |
field[len(BEPREFIX):] in constants.BES_PARAMETERS): |
3267 |
val = i_be.get(field[len(BEPREFIX):], None) |
3268 |
elif st_match and st_match.groups(): |
3269 |
# matches a variable list
|
3270 |
st_groups = st_match.groups() |
3271 |
if st_groups and st_groups[0] == "disk": |
3272 |
if st_groups[1] == "count": |
3273 |
val = len(instance.disks)
|
3274 |
elif st_groups[1] == "sizes": |
3275 |
val = [disk.size for disk in instance.disks] |
3276 |
elif st_groups[1] == "size": |
3277 |
try:
|
3278 |
val = instance.FindDisk(st_groups[2]).size
|
3279 |
except errors.OpPrereqError:
|
3280 |
val = None
|
3281 |
else:
|
3282 |
assert False, "Unhandled disk parameter" |
3283 |
elif st_groups[0] == "nic": |
3284 |
if st_groups[1] == "count": |
3285 |
val = len(instance.nics)
|
3286 |
elif st_groups[1] == "macs": |
3287 |
val = [nic.mac for nic in instance.nics] |
3288 |
elif st_groups[1] == "ips": |
3289 |
val = [nic.ip for nic in instance.nics] |
3290 |
elif st_groups[1] == "bridges": |
3291 |
val = [nic.bridge for nic in instance.nics] |
3292 |
else:
|
3293 |
# index-based item
|
3294 |
nic_idx = int(st_groups[2]) |
3295 |
if nic_idx >= len(instance.nics): |
3296 |
val = None
|
3297 |
else:
|
3298 |
if st_groups[1] == "mac": |
3299 |
val = instance.nics[nic_idx].mac |
3300 |
elif st_groups[1] == "ip": |
3301 |
val = instance.nics[nic_idx].ip |
3302 |
elif st_groups[1] == "bridge": |
3303 |
val = instance.nics[nic_idx].bridge |
3304 |
else:
|
3305 |
assert False, "Unhandled NIC parameter" |
3306 |
else:
|
3307 |
assert False, "Unhandled variable parameter" |
3308 |
else:
|
3309 |
raise errors.ParameterError(field)
|
3310 |
iout.append(val) |
3311 |
output.append(iout) |
3312 |
|
3313 |
return output
|
3314 |
|
3315 |
|
3316 |
class LUFailoverInstance(LogicalUnit): |
3317 |
"""Failover an instance.
|
3318 |
|
3319 |
"""
|
3320 |
HPATH = "instance-failover"
|
3321 |
HTYPE = constants.HTYPE_INSTANCE |
3322 |
_OP_REQP = ["instance_name", "ignore_consistency"] |
3323 |
REQ_BGL = False
|
3324 |
|
3325 |
def ExpandNames(self): |
3326 |
self._ExpandAndLockInstance()
|
3327 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3328 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3329 |
|
3330 |
def DeclareLocks(self, level): |
3331 |
if level == locking.LEVEL_NODE:
|
3332 |
self._LockInstancesNodes()
|
3333 |
|
3334 |
def BuildHooksEnv(self): |
3335 |
"""Build hooks env.
|
3336 |
|
3337 |
This runs on master, primary and secondary nodes of the instance.
|
3338 |
|
3339 |
"""
|
3340 |
env = { |
3341 |
"IGNORE_CONSISTENCY": self.op.ignore_consistency, |
3342 |
} |
3343 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
3344 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) |
3345 |
return env, nl, nl
|
3346 |
|
3347 |
def CheckPrereq(self): |
3348 |
"""Check prerequisites.
|
3349 |
|
3350 |
This checks that the instance is in the cluster.
|
3351 |
|
3352 |
"""
|
3353 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
3354 |
assert self.instance is not None, \ |
3355 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
3356 |
|
3357 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
3358 |
if instance.disk_template not in constants.DTS_NET_MIRROR: |
3359 |
raise errors.OpPrereqError("Instance's disk layout is not" |
3360 |
" network mirrored, cannot failover.")
|
3361 |
|
3362 |
secondary_nodes = instance.secondary_nodes |
3363 |
if not secondary_nodes: |
3364 |
raise errors.ProgrammerError("no secondary node but using " |
3365 |
"a mirrored disk template")
|
3366 |
|
3367 |
target_node = secondary_nodes[0]
|
3368 |
_CheckNodeOnline(self, target_node)
|
3369 |
# check memory requirements on the secondary node
|
3370 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" % |
3371 |
instance.name, bep[constants.BE_MEMORY], |
3372 |
instance.hypervisor) |
3373 |
|
3374 |
# check bridge existance
|
3375 |
brlist = [nic.bridge for nic in instance.nics] |
3376 |
result = self.rpc.call_bridges_exist(target_node, brlist)
|
3377 |
result.Raise() |
3378 |
if not result.data: |
3379 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
3380 |
" exist on destination node '%s'" %
|
3381 |
(brlist, target_node)) |
3382 |
|
3383 |
def Exec(self, feedback_fn): |
3384 |
"""Failover an instance.
|
3385 |
|
3386 |
The failover is done by shutting it down on its present node and
|
3387 |
starting it on the secondary.
|
3388 |
|
3389 |
"""
|
3390 |
instance = self.instance
|
3391 |
|
3392 |
source_node = instance.primary_node |
3393 |
target_node = instance.secondary_nodes[0]
|
3394 |
|
3395 |
feedback_fn("* checking disk consistency between source and target")
|
3396 |
for dev in instance.disks: |
3397 |
# for drbd, these are drbd over lvm
|
3398 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
3399 |
if instance.admin_up and not self.op.ignore_consistency: |
3400 |
raise errors.OpExecError("Disk %s is degraded on target node," |
3401 |
" aborting failover." % dev.iv_name)
|
3402 |
|
3403 |
feedback_fn("* shutting down instance on source node")
|
3404 |
logging.info("Shutting down instance %s on node %s",
|
3405 |
instance.name, source_node) |
3406 |
|
3407 |
result = self.rpc.call_instance_shutdown(source_node, instance)
|
3408 |
if result.failed or not result.data: |
3409 |
if self.op.ignore_consistency: |
3410 |
self.proc.LogWarning("Could not shutdown instance %s on node %s." |
3411 |
" Proceeding"
|
3412 |
" anyway. Please make sure node %s is down",
|
3413 |
instance.name, source_node, source_node) |
3414 |
else:
|
3415 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
3416 |
(instance.name, source_node)) |
3417 |
|
3418 |
feedback_fn("* deactivating the instance's disks on source node")
|
3419 |
if not _ShutdownInstanceDisks(self, instance, ignore_primary=True): |
3420 |
raise errors.OpExecError("Can't shut down the instance's disks.") |
3421 |
|
3422 |
instance.primary_node = target_node |
3423 |
# distribute new instance config to the other nodes
|
3424 |
self.cfg.Update(instance)
|
3425 |
|
3426 |
# Only start the instance if it's marked as up
|
3427 |
if instance.admin_up:
|
3428 |
feedback_fn("* activating the instance's disks on target node")
|
3429 |
logging.info("Starting instance %s on node %s",
|
3430 |
instance.name, target_node) |
3431 |
|
3432 |
disks_ok, dummy = _AssembleInstanceDisks(self, instance,
|
3433 |
ignore_secondaries=True)
|
3434 |
if not disks_ok: |
3435 |
_ShutdownInstanceDisks(self, instance)
|
3436 |
raise errors.OpExecError("Can't activate the instance's disks") |
3437 |
|
3438 |
feedback_fn("* starting the instance on the target node")
|
3439 |
result = self.rpc.call_instance_start(target_node, instance, None) |
3440 |
msg = result.RemoteFailMsg() |
3441 |
if msg:
|
3442 |
_ShutdownInstanceDisks(self, instance)
|
3443 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" % |
3444 |
(instance.name, target_node, msg)) |
3445 |
|
3446 |
|
3447 |
class LUMigrateInstance(LogicalUnit): |
3448 |
"""Migrate an instance.
|
3449 |
|
3450 |
This is migration without shutting down, compared to the failover,
|
3451 |
which is done with shutdown.
|
3452 |
|
3453 |
"""
|
3454 |
HPATH = "instance-migrate"
|
3455 |
HTYPE = constants.HTYPE_INSTANCE |
3456 |
_OP_REQP = ["instance_name", "live", "cleanup"] |
3457 |
|
3458 |
REQ_BGL = False
|
3459 |
|
3460 |
def ExpandNames(self): |
3461 |
self._ExpandAndLockInstance()
|
3462 |
self.needed_locks[locking.LEVEL_NODE] = []
|
3463 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
3464 |
|
3465 |
def DeclareLocks(self, level): |
3466 |
if level == locking.LEVEL_NODE:
|
3467 |
self._LockInstancesNodes()
|
3468 |
|
3469 |
def BuildHooksEnv(self): |
3470 |
"""Build hooks env.
|
3471 |
|
3472 |
This runs on master, primary and secondary nodes of the instance.
|
3473 |
|
3474 |
"""
|
3475 |
env = _BuildInstanceHookEnvByObject(self, self.instance) |
3476 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes) |
3477 |
return env, nl, nl
|
3478 |
|
3479 |
def CheckPrereq(self): |
3480 |
"""Check prerequisites.
|
3481 |
|
3482 |
This checks that the instance is in the cluster.
|
3483 |
|
3484 |
"""
|
3485 |
instance = self.cfg.GetInstanceInfo(
|
3486 |
self.cfg.ExpandInstanceName(self.op.instance_name)) |
3487 |
if instance is None: |
3488 |
raise errors.OpPrereqError("Instance '%s' not known" % |
3489 |
self.op.instance_name)
|
3490 |
|
3491 |
if instance.disk_template != constants.DT_DRBD8:
|
3492 |
raise errors.OpPrereqError("Instance's disk layout is not" |
3493 |
" drbd8, cannot migrate.")
|
3494 |
|
3495 |
secondary_nodes = instance.secondary_nodes |
3496 |
if not secondary_nodes: |
3497 |
raise errors.ProgrammerError("no secondary node but using " |
3498 |
"drbd8 disk template")
|
3499 |
|
3500 |
i_be = self.cfg.GetClusterInfo().FillBE(instance)
|
3501 |
|
3502 |
target_node = secondary_nodes[0]
|
3503 |
# check memory requirements on the secondary node
|
3504 |
_CheckNodeFreeMemory(self, target_node, "migrating instance %s" % |
3505 |
instance.name, i_be[constants.BE_MEMORY], |
3506 |
instance.hypervisor) |
3507 |
|
3508 |
# check bridge existance
|
3509 |
brlist = [nic.bridge for nic in instance.nics] |
3510 |
result = self.rpc.call_bridges_exist(target_node, brlist)
|
3511 |
if result.failed or not result.data: |
3512 |
raise errors.OpPrereqError("One or more target bridges %s does not" |
3513 |
" exist on destination node '%s'" %
|
3514 |
(brlist, target_node)) |
3515 |
|
3516 |
if not self.op.cleanup: |
3517 |
result = self.rpc.call_instance_migratable(instance.primary_node,
|
3518 |
instance) |
3519 |
msg = result.RemoteFailMsg() |
3520 |
if msg:
|
3521 |
raise errors.OpPrereqError("Can't migrate: %s - please use failover" % |
3522 |
msg) |
3523 |
|
3524 |
self.instance = instance
|
3525 |
|
3526 |
def _WaitUntilSync(self): |
3527 |
"""Poll with custom rpc for disk sync.
|
3528 |
|
3529 |
This uses our own step-based rpc call.
|
3530 |
|
3531 |
"""
|
3532 |
self.feedback_fn("* wait until resync is done") |
3533 |
all_done = False
|
3534 |
while not all_done: |
3535 |
all_done = True
|
3536 |
result = self.rpc.call_drbd_wait_sync(self.all_nodes, |
3537 |
self.nodes_ip,
|
3538 |
self.instance.disks)
|
3539 |
min_percent = 100
|
3540 |
for node, nres in result.items(): |
3541 |
msg = nres.RemoteFailMsg() |
3542 |
if msg:
|
3543 |
raise errors.OpExecError("Cannot resync disks on node %s: %s" % |
3544 |
(node, msg)) |
3545 |
node_done, node_percent = nres.payload |
3546 |
all_done = all_done and node_done
|
3547 |
if node_percent is not None: |
3548 |
min_percent = min(min_percent, node_percent)
|
3549 |
if not all_done: |
3550 |
if min_percent < 100: |
3551 |
self.feedback_fn(" - progress: %.1f%%" % min_percent) |
3552 |
time.sleep(2)
|
3553 |
|
3554 |
def _EnsureSecondary(self, node): |
3555 |
"""Demote a node to secondary.
|
3556 |
|
3557 |
"""
|
3558 |
self.feedback_fn("* switching node %s to secondary mode" % node) |
3559 |
|
3560 |
for dev in self.instance.disks: |
3561 |
self.cfg.SetDiskID(dev, node)
|
3562 |
|
3563 |
result = self.rpc.call_blockdev_close(node, self.instance.name, |
3564 |
self.instance.disks)
|
3565 |
msg = result.RemoteFailMsg() |
3566 |
if msg:
|
3567 |
raise errors.OpExecError("Cannot change disk to secondary on node %s," |
3568 |
" error %s" % (node, msg))
|
3569 |
|
3570 |
def _GoStandalone(self): |
3571 |
"""Disconnect from the network.
|
3572 |
|
3573 |
"""
|
3574 |
self.feedback_fn("* changing into standalone mode") |
3575 |
result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip, |
3576 |
self.instance.disks)
|
3577 |
for node, nres in result.items(): |
3578 |
msg = nres.RemoteFailMsg() |
3579 |
if msg:
|
3580 |
raise errors.OpExecError("Cannot disconnect disks node %s," |
3581 |
" error %s" % (node, msg))
|
3582 |
|
3583 |
def _GoReconnect(self, multimaster): |
3584 |
"""Reconnect to the network.
|
3585 |
|
3586 |
"""
|
3587 |
if multimaster:
|
3588 |
msg = "dual-master"
|
3589 |
else:
|
3590 |
msg = "single-master"
|
3591 |
self.feedback_fn("* changing disks into %s mode" % msg) |
3592 |
result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip, |
3593 |
self.instance.disks,
|
3594 |
self.instance.name, multimaster)
|
3595 |
for node, nres in result.items(): |
3596 |
msg = nres.RemoteFailMsg() |
3597 |
if msg:
|
3598 |
raise errors.OpExecError("Cannot change disks config on node %s," |
3599 |
" error: %s" % (node, msg))
|
3600 |
|
3601 |
def _ExecCleanup(self): |
3602 |
"""Try to cleanup after a failed migration.
|
3603 |
|
3604 |
The cleanup is done by:
|
3605 |
- check that the instance is running only on one node
|
3606 |
(and update the config if needed)
|
3607 |
- change disks on its secondary node to secondary
|
3608 |
- wait until disks are fully synchronized
|
3609 |
- disconnect from the network
|
3610 |
- change disks into single-master mode
|
3611 |
- wait again until disks are fully synchronized
|
3612 |
|
3613 |
"""
|
3614 |
instance = self.instance
|
3615 |
target_node = self.target_node
|
3616 |
source_node = self.source_node
|
3617 |
|
3618 |
# check running on only one node
|
3619 |
self.feedback_fn("* checking where the instance actually runs" |
3620 |
" (if this hangs, the hypervisor might be in"
|
3621 |
" a bad state)")
|
3622 |
ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) |
3623 |
for node, result in ins_l.items(): |
3624 |
result.Raise() |
3625 |
if not isinstance(result.data, list): |
3626 |
raise errors.OpExecError("Can't contact node '%s'" % node) |
3627 |
|
3628 |
runningon_source = instance.name in ins_l[source_node].data
|
3629 |
runningon_target = instance.name in ins_l[target_node].data
|
3630 |
|
3631 |
if runningon_source and runningon_target: |
3632 |
raise errors.OpExecError("Instance seems to be running on two nodes," |
3633 |
" or the hypervisor is confused. You will have"
|
3634 |
" to ensure manually that it runs only on one"
|
3635 |
" and restart this operation.")
|
3636 |
|
3637 |
if not (runningon_source or runningon_target): |
3638 |
raise errors.OpExecError("Instance does not seem to be running at all." |
3639 |
" In this case, it's safer to repair by"
|
3640 |
" running 'gnt-instance stop' to ensure disk"
|
3641 |
" shutdown, and then restarting it.")
|
3642 |
|
3643 |
if runningon_target:
|
3644 |
# the migration has actually succeeded, we need to update the config
|
3645 |
self.feedback_fn("* instance running on secondary node (%s)," |
3646 |
" updating config" % target_node)
|
3647 |
instance.primary_node = target_node |
3648 |
self.cfg.Update(instance)
|
3649 |
demoted_node = source_node |
3650 |
else:
|
3651 |
self.feedback_fn("* instance confirmed to be running on its" |
3652 |
" primary node (%s)" % source_node)
|
3653 |
demoted_node = target_node |
3654 |
|
3655 |
self._EnsureSecondary(demoted_node)
|
3656 |
try:
|
3657 |
self._WaitUntilSync()
|
3658 |
except errors.OpExecError:
|
3659 |
# we ignore here errors, since if the device is standalone, it
|
3660 |
# won't be able to sync
|
3661 |
pass
|
3662 |
self._GoStandalone()
|
3663 |
self._GoReconnect(False) |
3664 |
self._WaitUntilSync()
|
3665 |
|
3666 |
self.feedback_fn("* done") |
3667 |
|
3668 |
def _RevertDiskStatus(self): |
3669 |
"""Try to revert the disk status after a failed migration.
|
3670 |
|
3671 |
"""
|
3672 |
target_node = self.target_node
|
3673 |
try:
|
3674 |
self._EnsureSecondary(target_node)
|
3675 |
self._GoStandalone()
|
3676 |
self._GoReconnect(False) |
3677 |
self._WaitUntilSync()
|
3678 |
except errors.OpExecError, err:
|
3679 |
self.LogWarning("Migration failed and I can't reconnect the" |
3680 |
" drives: error '%s'\n"
|
3681 |
"Please look and recover the instance status" %
|
3682 |
str(err))
|
3683 |
|
3684 |
def _AbortMigration(self): |
3685 |
"""Call the hypervisor code to abort a started migration.
|
3686 |
|
3687 |
"""
|
3688 |
instance = self.instance
|
3689 |
target_node = self.target_node
|
3690 |
migration_info = self.migration_info
|
3691 |
|
3692 |
abort_result = self.rpc.call_finalize_migration(target_node,
|
3693 |
instance, |
3694 |
migration_info, |
3695 |
False)
|
3696 |
abort_msg = abort_result.RemoteFailMsg() |
3697 |
if abort_msg:
|
3698 |
logging.error("Aborting migration failed on target node %s: %s" %
|
3699 |
(target_node, abort_msg)) |
3700 |
# Don't raise an exception here, as we stil have to try to revert the
|
3701 |
# disk status, even if this step failed.
|
3702 |
|
3703 |
def _ExecMigration(self): |
3704 |
"""Migrate an instance.
|
3705 |
|
3706 |
The migrate is done by:
|
3707 |
- change the disks into dual-master mode
|
3708 |
- wait until disks are fully synchronized again
|
3709 |
- migrate the instance
|
3710 |
- change disks on the new secondary node (the old primary) to secondary
|
3711 |
- wait until disks are fully synchronized
|
3712 |
- change disks into single-master mode
|
3713 |
|
3714 |
"""
|
3715 |
instance = self.instance
|
3716 |
target_node = self.target_node
|
3717 |
source_node = self.source_node
|
3718 |
|
3719 |
self.feedback_fn("* checking disk consistency between source and target") |
3720 |
for dev in instance.disks: |
3721 |
if not _CheckDiskConsistency(self, dev, target_node, False): |
3722 |
raise errors.OpExecError("Disk %s is degraded or not fully" |
3723 |
" synchronized on target node,"
|
3724 |
" aborting migrate." % dev.iv_name)
|
3725 |
|
3726 |
# First get the migration information from the remote node
|
3727 |
result = self.rpc.call_migration_info(source_node, instance)
|
3728 |
msg = result.RemoteFailMsg() |
3729 |
if msg:
|
3730 |
log_err = ("Failed fetching source migration information from %s: %s" %
|
3731 |
(source_node, msg)) |
3732 |
logging.error(log_err) |
3733 |
raise errors.OpExecError(log_err)
|
3734 |
|
3735 |
self.migration_info = migration_info = result.payload
|
3736 |
|
3737 |
# Then switch the disks to master/master mode
|
3738 |
self._EnsureSecondary(target_node)
|
3739 |
self._GoStandalone()
|
3740 |
self._GoReconnect(True) |
3741 |
self._WaitUntilSync()
|
3742 |
|
3743 |
self.feedback_fn("* preparing %s to accept the instance" % target_node) |
3744 |
result = self.rpc.call_accept_instance(target_node,
|
3745 |
instance, |
3746 |
migration_info, |
3747 |
self.nodes_ip[target_node])
|
3748 |
|
3749 |
msg = result.RemoteFailMsg() |
3750 |
if msg:
|
3751 |
logging.error("Instance pre-migration failed, trying to revert"
|
3752 |
" disk status: %s", msg)
|
3753 |
self._AbortMigration()
|
3754 |
self._RevertDiskStatus()
|
3755 |
raise errors.OpExecError("Could not pre-migrate instance %s: %s" % |
3756 |
(instance.name, msg)) |
3757 |
|
3758 |
self.feedback_fn("* migrating instance to %s" % target_node) |
3759 |
time.sleep(10)
|
3760 |
result = self.rpc.call_instance_migrate(source_node, instance,
|
3761 |
self.nodes_ip[target_node],
|
3762 |
self.op.live)
|
3763 |
msg = result.RemoteFailMsg() |
3764 |
if msg:
|
3765 |
logging.error("Instance migration failed, trying to revert"
|
3766 |
" disk status: %s", msg)
|
3767 |
self._AbortMigration()
|
3768 |
self._RevertDiskStatus()
|
3769 |
raise errors.OpExecError("Could not migrate instance %s: %s" % |
3770 |
(instance.name, msg)) |
3771 |
time.sleep(10)
|
3772 |
|
3773 |
instance.primary_node = target_node |
3774 |
# distribute new instance config to the other nodes
|
3775 |
self.cfg.Update(instance)
|
3776 |
|
3777 |
result = self.rpc.call_finalize_migration(target_node,
|
3778 |
instance, |
3779 |
migration_info, |
3780 |
True)
|
3781 |
msg = result.RemoteFailMsg() |
3782 |
if msg:
|
3783 |
logging.error("Instance migration succeeded, but finalization failed:"
|
3784 |
" %s" % msg)
|
3785 |
raise errors.OpExecError("Could not finalize instance migration: %s" % |
3786 |
msg) |
3787 |
|
3788 |
self._EnsureSecondary(source_node)
|
3789 |
self._WaitUntilSync()
|
3790 |
self._GoStandalone()
|
3791 |
self._GoReconnect(False) |
3792 |
self._WaitUntilSync()
|
3793 |
|
3794 |
self.feedback_fn("* done") |
3795 |
|
3796 |
def Exec(self, feedback_fn): |
3797 |
"""Perform the migration.
|
3798 |
|
3799 |
"""
|
3800 |
self.feedback_fn = feedback_fn
|
3801 |
|
3802 |
self.source_node = self.instance.primary_node |
3803 |
self.target_node = self.instance.secondary_nodes[0] |
3804 |
self.all_nodes = [self.source_node, self.target_node] |
3805 |
self.nodes_ip = {
|
3806 |
self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip, |
3807 |
self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip, |
3808 |
} |
3809 |
if self.op.cleanup: |
3810 |
return self._ExecCleanup() |
3811 |
else:
|
3812 |
return self._ExecMigration() |
3813 |
|
3814 |
|
3815 |
def _CreateBlockDev(lu, node, instance, device, force_create, |
3816 |
info, force_open): |
3817 |
"""Create a tree of block devices on a given node.
|
3818 |
|
3819 |
If this device type has to be created on secondaries, create it and
|
3820 |
all its children.
|
3821 |
|
3822 |
If not, just recurse to children keeping the same 'force' value.
|
3823 |
|
3824 |
@param lu: the lu on whose behalf we execute
|
3825 |
@param node: the node on which to create the device
|
3826 |
@type instance: L{objects.Instance}
|
3827 |
@param instance: the instance which owns the device
|
3828 |
@type device: L{objects.Disk}
|
3829 |
@param device: the device to create
|
3830 |
@type force_create: boolean
|
3831 |
@param force_create: whether to force creation of this device; this
|
3832 |
will be change to True whenever we find a device which has
|
3833 |
CreateOnSecondary() attribute
|
3834 |
@param info: the extra 'metadata' we should attach to the device
|
3835 |
(this will be represented as a LVM tag)
|
3836 |
@type force_open: boolean
|
3837 |
@param force_open: this parameter will be passes to the
|
3838 |
L{backend.BlockdevCreate} function where it specifies
|
3839 |
whether we run on primary or not, and it affects both
|
3840 |
the child assembly and the device own Open() execution
|
3841 |
|
3842 |
"""
|
3843 |
if device.CreateOnSecondary():
|
3844 |
force_create = True
|
3845 |
|
3846 |
if device.children:
|
3847 |
for child in device.children: |
3848 |
_CreateBlockDev(lu, node, instance, child, force_create, |
3849 |
info, force_open) |
3850 |
|
3851 |
if not force_create: |
3852 |
return
|
3853 |
|
3854 |
_CreateSingleBlockDev(lu, node, instance, device, info, force_open) |
3855 |
|
3856 |
|
3857 |
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open): |
3858 |
"""Create a single block device on a given node.
|
3859 |
|
3860 |
This will not recurse over children of the device, so they must be
|
3861 |
created in advance.
|
3862 |
|
3863 |
@param lu: the lu on whose behalf we execute
|
3864 |
@param node: the node on which to create the device
|
3865 |
@type instance: L{objects.Instance}
|
3866 |
@param instance: the instance which owns the device
|
3867 |
@type device: L{objects.Disk}
|
3868 |
@param device: the device to create
|
3869 |
@param info: the extra 'metadata' we should attach to the device
|
3870 |
(this will be represented as a LVM tag)
|
3871 |
@type force_open: boolean
|
3872 |
@param force_open: this parameter will be passes to the
|
3873 |
L{backend.BlockdevCreate} function where it specifies
|
3874 |
whether we run on primary or not, and it affects both
|
3875 |
the child assembly and the device own Open() execution
|
3876 |
|
3877 |
"""
|
3878 |
lu.cfg.SetDiskID(device, node) |
3879 |
result = lu.rpc.call_blockdev_create(node, device, device.size, |
3880 |
instance.name, force_open, info) |
3881 |
msg = result.RemoteFailMsg() |
3882 |
if msg:
|
3883 |
raise errors.OpExecError("Can't create block device %s on" |
3884 |
" node %s for instance %s: %s" %
|
3885 |
(device, node, instance.name, msg)) |
3886 |
if device.physical_id is None: |
3887 |
device.physical_id = result.payload |
3888 |
|
3889 |
|
3890 |
def _GenerateUniqueNames(lu, exts): |
3891 |
"""Generate a suitable LV name.
|
3892 |
|
3893 |
This will generate a logical volume name for the given instance.
|
3894 |
|
3895 |
"""
|
3896 |
results = [] |
3897 |
for val in exts: |
3898 |
new_id = lu.cfg.GenerateUniqueID() |
3899 |
results.append("%s%s" % (new_id, val))
|
3900 |
return results
|
3901 |
|
3902 |
|
3903 |
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name, |
3904 |
p_minor, s_minor): |
3905 |
"""Generate a drbd8 device complete with its children.
|
3906 |
|
3907 |
"""
|
3908 |
port = lu.cfg.AllocatePort() |
3909 |
vgname = lu.cfg.GetVGName() |
3910 |
shared_secret = lu.cfg.GenerateDRBDSecret() |
3911 |
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
3912 |
logical_id=(vgname, names[0]))
|
3913 |
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
3914 |
logical_id=(vgname, names[1]))
|
3915 |
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size, |
3916 |
logical_id=(primary, secondary, port, |
3917 |
p_minor, s_minor, |
3918 |
shared_secret), |
3919 |
children=[dev_data, dev_meta], |
3920 |
iv_name=iv_name) |
3921 |
return drbd_dev
|
3922 |
|
3923 |
|
3924 |
def _GenerateDiskTemplate(lu, template_name, |
3925 |
instance_name, primary_node, |
3926 |
secondary_nodes, disk_info, |
3927 |
file_storage_dir, file_driver, |
3928 |
base_index): |
3929 |
"""Generate the entire disk layout for a given template type.
|
3930 |
|
3931 |
"""
|
3932 |
#TODO: compute space requirements
|
3933 |
|
3934 |
vgname = lu.cfg.GetVGName() |
3935 |
disk_count = len(disk_info)
|
3936 |
disks = [] |
3937 |
if template_name == constants.DT_DISKLESS:
|
3938 |
pass
|
3939 |
elif template_name == constants.DT_PLAIN:
|
3940 |
if len(secondary_nodes) != 0: |
3941 |
raise errors.ProgrammerError("Wrong template configuration") |
3942 |
|
3943 |
names = _GenerateUniqueNames(lu, [".disk%d" % i
|
3944 |
for i in range(disk_count)]) |
3945 |
for idx, disk in enumerate(disk_info): |
3946 |
disk_index = idx + base_index |
3947 |
disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
|
3948 |
logical_id=(vgname, names[idx]), |
3949 |
iv_name="disk/%d" % disk_index,
|
3950 |
mode=disk["mode"])
|
3951 |
disks.append(disk_dev) |
3952 |
elif template_name == constants.DT_DRBD8:
|
3953 |
if len(secondary_nodes) != 1: |
3954 |
raise errors.ProgrammerError("Wrong template configuration") |
3955 |
remote_node = secondary_nodes[0]
|
3956 |
minors = lu.cfg.AllocateDRBDMinor( |
3957 |
[primary_node, remote_node] * len(disk_info), instance_name)
|
3958 |
|
3959 |
names = [] |
3960 |
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i |
3961 |
for i in range(disk_count)]): |
3962 |
names.append(lv_prefix + "_data")
|
3963 |
names.append(lv_prefix + "_meta")
|
3964 |
for idx, disk in enumerate(disk_info): |
3965 |
disk_index = idx + base_index |
3966 |
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node, |
3967 |
disk["size"], names[idx*2:idx*2+2], |
3968 |
"disk/%d" % disk_index,
|
3969 |
minors[idx*2], minors[idx*2+1]) |
3970 |
disk_dev.mode = disk["mode"]
|
3971 |
disks.append(disk_dev) |
3972 |
elif template_name == constants.DT_FILE:
|
3973 |
if len(secondary_nodes) != 0: |
3974 |
raise errors.ProgrammerError("Wrong template configuration") |
3975 |
|
3976 |
for idx, disk in enumerate(disk_info): |
3977 |
disk_index = idx + base_index |
3978 |
disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
|
3979 |
iv_name="disk/%d" % disk_index,
|
3980 |
logical_id=(file_driver, |
3981 |
"%s/disk%d" % (file_storage_dir,
|
3982 |
idx)), |
3983 |
mode=disk["mode"])
|
3984 |
disks.append(disk_dev) |
3985 |
else:
|
3986 |
raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
3987 |
return disks
|
3988 |
|
3989 |
|
3990 |
def _GetInstanceInfoText(instance): |
3991 |
"""Compute that text that should be added to the disk's metadata.
|
3992 |
|
3993 |
"""
|
3994 |
return "originstname+%s" % instance.name |
3995 |
|
3996 |
|
3997 |
def _CreateDisks(lu, instance): |
3998 |
"""Create all disks for an instance.
|
3999 |
|
4000 |
This abstracts away some work from AddInstance.
|
4001 |
|
4002 |
@type lu: L{LogicalUnit}
|
4003 |
@param lu: the logical unit on whose behalf we execute
|
4004 |
@type instance: L{objects.Instance}
|
4005 |
@param instance: the instance whose disks we should create
|
4006 |
@rtype: boolean
|
4007 |
@return: the success of the creation
|
4008 |
|
4009 |
"""
|
4010 |
info = _GetInstanceInfoText(instance) |
4011 |
pnode = instance.primary_node |
4012 |
|
4013 |
if instance.disk_template == constants.DT_FILE:
|
4014 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
4015 |
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir) |
4016 |
|
4017 |
if result.failed or not result.data: |
4018 |
raise errors.OpExecError("Could not connect to node '%s'" % pnode) |
4019 |
|
4020 |
if not result.data[0]: |
4021 |
raise errors.OpExecError("Failed to create directory '%s'" % |
4022 |
file_storage_dir) |
4023 |
|
4024 |
# Note: this needs to be kept in sync with adding of disks in
|
4025 |
# LUSetInstanceParams
|
4026 |
for device in instance.disks: |
4027 |
logging.info("Creating volume %s for instance %s",
|
4028 |
device.iv_name, instance.name) |
4029 |
#HARDCODE
|
4030 |
for node in instance.all_nodes: |
4031 |
f_create = node == pnode |
4032 |
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create) |
4033 |
|
4034 |
|
4035 |
def _RemoveDisks(lu, instance): |
4036 |
"""Remove all disks for an instance.
|
4037 |
|
4038 |
This abstracts away some work from `AddInstance()` and
|
4039 |
`RemoveInstance()`. Note that in case some of the devices couldn't
|
4040 |
be removed, the removal will continue with the other ones (compare
|
4041 |
with `_CreateDisks()`).
|
4042 |
|
4043 |
@type lu: L{LogicalUnit}
|
4044 |
@param lu: the logical unit on whose behalf we execute
|
4045 |
@type instance: L{objects.Instance}
|
4046 |
@param instance: the instance whose disks we should remove
|
4047 |
@rtype: boolean
|
4048 |
@return: the success of the removal
|
4049 |
|
4050 |
"""
|
4051 |
logging.info("Removing block devices for instance %s", instance.name)
|
4052 |
|
4053 |
result = True
|
4054 |
for device in instance.disks: |
4055 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
4056 |
lu.cfg.SetDiskID(disk, node) |
4057 |
result = lu.rpc.call_blockdev_remove(node, disk) |
4058 |
if result.failed or not result.data: |
4059 |
lu.proc.LogWarning("Could not remove block device %s on node %s,"
|
4060 |
" continuing anyway", device.iv_name, node)
|
4061 |
result = False
|
4062 |
|
4063 |
if instance.disk_template == constants.DT_FILE:
|
4064 |
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) |
4065 |
result = lu.rpc.call_file_storage_dir_remove(instance.primary_node, |
4066 |
file_storage_dir) |
4067 |
if result.failed or not result.data: |
4068 |
logging.error("Could not remove directory '%s'", file_storage_dir)
|
4069 |
result = False
|
4070 |
|
4071 |
return result
|
4072 |
|
4073 |
|
4074 |
def _ComputeDiskSize(disk_template, disks): |
4075 |
"""Compute disk size requirements in the volume group
|
4076 |
|
4077 |
"""
|
4078 |
# Required free disk space as a function of disk and swap space
|
4079 |
req_size_dict = { |
4080 |
constants.DT_DISKLESS: None,
|
4081 |
constants.DT_PLAIN: sum(d["size"] for d in disks), |
4082 |
# 128 MB are added for drbd metadata for each disk
|
4083 |
constants.DT_DRBD8: sum(d["size"] + 128 for d in disks), |
4084 |
constants.DT_FILE: None,
|
4085 |
} |
4086 |
|
4087 |
if disk_template not in req_size_dict: |
4088 |
raise errors.ProgrammerError("Disk template '%s' size requirement" |
4089 |
" is unknown" % disk_template)
|
4090 |
|
4091 |
return req_size_dict[disk_template]
|
4092 |
|
4093 |
|
4094 |
def _CheckHVParams(lu, nodenames, hvname, hvparams): |
4095 |
"""Hypervisor parameter validation.
|
4096 |
|
4097 |
This function abstract the hypervisor parameter validation to be
|
4098 |
used in both instance create and instance modify.
|
4099 |
|
4100 |
@type lu: L{LogicalUnit}
|
4101 |
@param lu: the logical unit for which we check
|
4102 |
@type nodenames: list
|
4103 |
@param nodenames: the list of nodes on which we should check
|
4104 |
@type hvname: string
|
4105 |
@param hvname: the name of the hypervisor we should use
|
4106 |
@type hvparams: dict
|
4107 |
@param hvparams: the parameters which we need to check
|
4108 |
@raise errors.OpPrereqError: if the parameters are not valid
|
4109 |
|
4110 |
"""
|
4111 |
hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, |
4112 |
hvname, |
4113 |
hvparams) |
4114 |
for node in nodenames: |
4115 |
info = hvinfo[node] |
4116 |
if info.offline:
|
4117 |
continue
|
4118 |
msg = info.RemoteFailMsg() |
4119 |
if msg:
|
4120 |
raise errors.OpPrereqError("Hypervisor parameter validation failed:" |
4121 |
" %s" % msg)
|
4122 |
|
4123 |
|
4124 |
class LUCreateInstance(LogicalUnit): |
4125 |
"""Create an instance.
|
4126 |
|
4127 |
"""
|
4128 |
HPATH = "instance-add"
|
4129 |
HTYPE = constants.HTYPE_INSTANCE |
4130 |
_OP_REQP = ["instance_name", "disks", "disk_template", |
4131 |
"mode", "start", |
4132 |
"wait_for_sync", "ip_check", "nics", |
4133 |
"hvparams", "beparams"] |
4134 |
REQ_BGL = False
|
4135 |
|
4136 |
def _ExpandNode(self, node): |
4137 |
"""Expands and checks one node name.
|
4138 |
|
4139 |
"""
|
4140 |
node_full = self.cfg.ExpandNodeName(node)
|
4141 |
if node_full is None: |
4142 |
raise errors.OpPrereqError("Unknown node %s" % node) |
4143 |
return node_full
|
4144 |
|
4145 |
def ExpandNames(self): |
4146 |
"""ExpandNames for CreateInstance.
|
4147 |
|
4148 |
Figure out the right locks for instance creation.
|
4149 |
|
4150 |
"""
|
4151 |
self.needed_locks = {}
|
4152 |
|
4153 |
# set optional parameters to none if they don't exist
|
4154 |
for attr in ["pnode", "snode", "iallocator", "hypervisor"]: |
4155 |
if not hasattr(self.op, attr): |
4156 |
setattr(self.op, attr, None) |
4157 |
|
4158 |
# cheap checks, mostly valid constants given
|
4159 |
|
4160 |
# verify creation mode
|
4161 |
if self.op.mode not in (constants.INSTANCE_CREATE, |
4162 |
constants.INSTANCE_IMPORT): |
4163 |
raise errors.OpPrereqError("Invalid instance creation mode '%s'" % |
4164 |
self.op.mode)
|
4165 |
|
4166 |
# disk template and mirror node verification
|
4167 |
if self.op.disk_template not in constants.DISK_TEMPLATES: |
4168 |
raise errors.OpPrereqError("Invalid disk template name") |
4169 |
|
4170 |
if self.op.hypervisor is None: |
4171 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
4172 |
|
4173 |
cluster = self.cfg.GetClusterInfo()
|
4174 |
enabled_hvs = cluster.enabled_hypervisors |
4175 |
if self.op.hypervisor not in enabled_hvs: |
4176 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
4177 |
" cluster (%s)" % (self.op.hypervisor, |
4178 |
",".join(enabled_hvs)))
|
4179 |
|
4180 |
# check hypervisor parameter syntax (locally)
|
4181 |
|
4182 |
filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
|
4183 |
self.op.hvparams)
|
4184 |
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
|
4185 |
hv_type.CheckParameterSyntax(filled_hvp) |
4186 |
|
4187 |
# fill and remember the beparams dict
|
4188 |
utils.CheckBEParams(self.op.beparams)
|
4189 |
self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
|
4190 |
self.op.beparams)
|
4191 |
|
4192 |
#### instance parameters check
|
4193 |
|
4194 |
# instance name verification
|
4195 |
hostname1 = utils.HostInfo(self.op.instance_name)
|
4196 |
self.op.instance_name = instance_name = hostname1.name
|
4197 |
|
4198 |
# this is just a preventive check, but someone might still add this
|
4199 |
# instance in the meantime, and creation will fail at lock-add time
|
4200 |
if instance_name in self.cfg.GetInstanceList(): |
4201 |
raise errors.OpPrereqError("Instance '%s' is already in the cluster" % |
4202 |
instance_name) |
4203 |
|
4204 |
self.add_locks[locking.LEVEL_INSTANCE] = instance_name
|
4205 |
|
4206 |
# NIC buildup
|
4207 |
self.nics = []
|
4208 |
for nic in self.op.nics: |
4209 |
# ip validity checks
|
4210 |
ip = nic.get("ip", None) |
4211 |
if ip is None or ip.lower() == "none": |
4212 |
nic_ip = None
|
4213 |
elif ip.lower() == constants.VALUE_AUTO:
|
4214 |
nic_ip = hostname1.ip |
4215 |
else:
|
4216 |
if not utils.IsValidIP(ip): |
4217 |
raise errors.OpPrereqError("Given IP address '%s' doesn't look" |
4218 |
" like a valid IP" % ip)
|
4219 |
nic_ip = ip |
4220 |
|
4221 |
# MAC address verification
|
4222 |
mac = nic.get("mac", constants.VALUE_AUTO)
|
4223 |
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
4224 |
if not utils.IsValidMac(mac.lower()): |
4225 |
raise errors.OpPrereqError("Invalid MAC address specified: %s" % |
4226 |
mac) |
4227 |
# bridge verification
|
4228 |
bridge = nic.get("bridge", None) |
4229 |
if bridge is None: |
4230 |
bridge = self.cfg.GetDefBridge()
|
4231 |
self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
|
4232 |
|
4233 |
# disk checks/pre-build
|
4234 |
self.disks = []
|
4235 |
for disk in self.op.disks: |
4236 |
mode = disk.get("mode", constants.DISK_RDWR)
|
4237 |
if mode not in constants.DISK_ACCESS_SET: |
4238 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % |
4239 |
mode) |
4240 |
size = disk.get("size", None) |
4241 |
if size is None: |
4242 |
raise errors.OpPrereqError("Missing disk size") |
4243 |
try:
|
4244 |
size = int(size)
|
4245 |
except ValueError: |
4246 |
raise errors.OpPrereqError("Invalid disk size '%s'" % size) |
4247 |
self.disks.append({"size": size, "mode": mode}) |
4248 |
|
4249 |
# used in CheckPrereq for ip ping check
|
4250 |
self.check_ip = hostname1.ip
|
4251 |
|
4252 |
# file storage checks
|
4253 |
if (self.op.file_driver and |
4254 |
not self.op.file_driver in constants.FILE_DRIVER): |
4255 |
raise errors.OpPrereqError("Invalid file driver name '%s'" % |
4256 |
self.op.file_driver)
|
4257 |
|
4258 |
if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir): |
4259 |
raise errors.OpPrereqError("File storage directory path not absolute") |
4260 |
|
4261 |
### Node/iallocator related checks
|
4262 |
if [self.op.iallocator, self.op.pnode].count(None) != 1: |
4263 |
raise errors.OpPrereqError("One and only one of iallocator and primary" |
4264 |
" node must be given")
|
4265 |
|
4266 |
if self.op.iallocator: |
4267 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4268 |
else:
|
4269 |
self.op.pnode = self._ExpandNode(self.op.pnode) |
4270 |
nodelist = [self.op.pnode]
|
4271 |
if self.op.snode is not None: |
4272 |
self.op.snode = self._ExpandNode(self.op.snode) |
4273 |
nodelist.append(self.op.snode)
|
4274 |
self.needed_locks[locking.LEVEL_NODE] = nodelist
|
4275 |
|
4276 |
# in case of import lock the source node too
|
4277 |
if self.op.mode == constants.INSTANCE_IMPORT: |
4278 |
src_node = getattr(self.op, "src_node", None) |
4279 |
src_path = getattr(self.op, "src_path", None) |
4280 |
|
4281 |
if src_path is None: |
4282 |
self.op.src_path = src_path = self.op.instance_name |
4283 |
|
4284 |
if src_node is None: |
4285 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4286 |
self.op.src_node = None |
4287 |
if os.path.isabs(src_path):
|
4288 |
raise errors.OpPrereqError("Importing an instance from an absolute" |
4289 |
" path requires a source node option.")
|
4290 |
else:
|
4291 |
self.op.src_node = src_node = self._ExpandNode(src_node) |
4292 |
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: |
4293 |
self.needed_locks[locking.LEVEL_NODE].append(src_node)
|
4294 |
if not os.path.isabs(src_path): |
4295 |
self.op.src_path = src_path = \
|
4296 |
os.path.join(constants.EXPORT_DIR, src_path) |
4297 |
|
4298 |
else: # INSTANCE_CREATE |
4299 |
if getattr(self.op, "os_type", None) is None: |
4300 |
raise errors.OpPrereqError("No guest OS specified") |
4301 |
|
4302 |
def _RunAllocator(self): |
4303 |
"""Run the allocator based on input opcode.
|
4304 |
|
4305 |
"""
|
4306 |
nics = [n.ToDict() for n in self.nics] |
4307 |
ial = IAllocator(self,
|
4308 |
mode=constants.IALLOCATOR_MODE_ALLOC, |
4309 |
name=self.op.instance_name,
|
4310 |
disk_template=self.op.disk_template,
|
4311 |
tags=[], |
4312 |
os=self.op.os_type,
|
4313 |
vcpus=self.be_full[constants.BE_VCPUS],
|
4314 |
mem_size=self.be_full[constants.BE_MEMORY],
|
4315 |
disks=self.disks,
|
4316 |
nics=nics, |
4317 |
hypervisor=self.op.hypervisor,
|
4318 |
) |
4319 |
|
4320 |
ial.Run(self.op.iallocator)
|
4321 |
|
4322 |
if not ial.success: |
4323 |
raise errors.OpPrereqError("Can't compute nodes using" |
4324 |
" iallocator '%s': %s" % (self.op.iallocator, |
4325 |
ial.info)) |
4326 |
if len(ial.nodes) != ial.required_nodes: |
4327 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
4328 |
" of nodes (%s), required %s" %
|
4329 |
(self.op.iallocator, len(ial.nodes), |
4330 |
ial.required_nodes)) |
4331 |
self.op.pnode = ial.nodes[0] |
4332 |
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", |
4333 |
self.op.instance_name, self.op.iallocator, |
4334 |
", ".join(ial.nodes))
|
4335 |
if ial.required_nodes == 2: |
4336 |
self.op.snode = ial.nodes[1] |
4337 |
|
4338 |
def BuildHooksEnv(self): |
4339 |
"""Build hooks env.
|
4340 |
|
4341 |
This runs on master, primary and secondary nodes of the instance.
|
4342 |
|
4343 |
"""
|
4344 |
env = { |
4345 |
"INSTANCE_DISK_TEMPLATE": self.op.disk_template, |
4346 |
"INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks), |
4347 |
"INSTANCE_ADD_MODE": self.op.mode, |
4348 |
} |
4349 |
if self.op.mode == constants.INSTANCE_IMPORT: |
4350 |
env["INSTANCE_SRC_NODE"] = self.op.src_node |
4351 |
env["INSTANCE_SRC_PATH"] = self.op.src_path |
4352 |
env["INSTANCE_SRC_IMAGES"] = self.src_images |
4353 |
|
4354 |
env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
|
4355 |
primary_node=self.op.pnode,
|
4356 |
secondary_nodes=self.secondaries,
|
4357 |
status=self.op.start,
|
4358 |
os_type=self.op.os_type,
|
4359 |
memory=self.be_full[constants.BE_MEMORY],
|
4360 |
vcpus=self.be_full[constants.BE_VCPUS],
|
4361 |
nics=[(n.ip, n.bridge, n.mac) for n in self.nics], |
4362 |
)) |
4363 |
|
4364 |
nl = ([self.cfg.GetMasterNode(), self.op.pnode] + |
4365 |
self.secondaries)
|
4366 |
return env, nl, nl
|
4367 |
|
4368 |
|
4369 |
def CheckPrereq(self): |
4370 |
"""Check prerequisites.
|
4371 |
|
4372 |
"""
|
4373 |
if (not self.cfg.GetVGName() and |
4374 |
self.op.disk_template not in constants.DTS_NOT_LVM): |
4375 |
raise errors.OpPrereqError("Cluster does not support lvm-based" |
4376 |
" instances")
|
4377 |
|
4378 |
|
4379 |
if self.op.mode == constants.INSTANCE_IMPORT: |
4380 |
src_node = self.op.src_node
|
4381 |
src_path = self.op.src_path
|
4382 |
|
4383 |
if src_node is None: |
4384 |
exp_list = self.rpc.call_export_list(
|
4385 |
self.acquired_locks[locking.LEVEL_NODE])
|
4386 |
found = False
|
4387 |
for node in exp_list: |
4388 |
if not exp_list[node].failed and src_path in exp_list[node].data: |
4389 |
found = True
|
4390 |
self.op.src_node = src_node = node
|
4391 |
self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
|
4392 |
src_path) |
4393 |
break
|
4394 |
if not found: |
4395 |
raise errors.OpPrereqError("No export found for relative path %s" % |
4396 |
src_path) |
4397 |
|
4398 |
_CheckNodeOnline(self, src_node)
|
4399 |
result = self.rpc.call_export_info(src_node, src_path)
|
4400 |
result.Raise() |
4401 |
if not result.data: |
4402 |
raise errors.OpPrereqError("No export found in dir %s" % src_path) |
4403 |
|
4404 |
export_info = result.data |
4405 |
if not export_info.has_section(constants.INISECT_EXP): |
4406 |
raise errors.ProgrammerError("Corrupted export config") |
4407 |
|
4408 |
ei_version = export_info.get(constants.INISECT_EXP, 'version')
|
4409 |
if (int(ei_version) != constants.EXPORT_VERSION): |
4410 |
raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % |
4411 |
(ei_version, constants.EXPORT_VERSION)) |
4412 |
|
4413 |
# Check that the new instance doesn't have less disks than the export
|
4414 |
instance_disks = len(self.disks) |
4415 |
export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
|
4416 |
if instance_disks < export_disks:
|
4417 |
raise errors.OpPrereqError("Not enough disks to import." |
4418 |
" (instance: %d, export: %d)" %
|
4419 |
(instance_disks, export_disks)) |
4420 |
|
4421 |
self.op.os_type = export_info.get(constants.INISECT_EXP, 'os') |
4422 |
disk_images = [] |
4423 |
for idx in range(export_disks): |
4424 |
option = 'disk%d_dump' % idx
|
4425 |
if export_info.has_option(constants.INISECT_INS, option):
|
4426 |
# FIXME: are the old os-es, disk sizes, etc. useful?
|
4427 |
export_name = export_info.get(constants.INISECT_INS, option) |
4428 |
image = os.path.join(src_path, export_name) |
4429 |
disk_images.append(image) |
4430 |
else:
|
4431 |
disk_images.append(False)
|
4432 |
|
4433 |
self.src_images = disk_images
|
4434 |
|
4435 |
old_name = export_info.get(constants.INISECT_INS, 'name')
|
4436 |
# FIXME: int() here could throw a ValueError on broken exports
|
4437 |
exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count')) |
4438 |
if self.op.instance_name == old_name: |
4439 |
for idx, nic in enumerate(self.nics): |
4440 |
if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx: |
4441 |
nic_mac_ini = 'nic%d_mac' % idx
|
4442 |
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) |
4443 |
|
4444 |
# ip ping checks (we use the same ip that was resolved in ExpandNames)
|
4445 |
if self.op.start and not self.op.ip_check: |
4446 |
raise errors.OpPrereqError("Cannot ignore IP address conflicts when" |
4447 |
" adding an instance in start mode")
|
4448 |
|
4449 |
if self.op.ip_check: |
4450 |
if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): |
4451 |
raise errors.OpPrereqError("IP %s of instance %s already in use" % |
4452 |
(self.check_ip, self.op.instance_name)) |
4453 |
|
4454 |
#### allocator run
|
4455 |
|
4456 |
if self.op.iallocator is not None: |
4457 |
self._RunAllocator()
|
4458 |
|
4459 |
#### node related checks
|
4460 |
|
4461 |
# check primary node
|
4462 |
self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode) |
4463 |
assert self.pnode is not None, \ |
4464 |
"Cannot retrieve locked node %s" % self.op.pnode |
4465 |
if pnode.offline:
|
4466 |
raise errors.OpPrereqError("Cannot use offline primary node '%s'" % |
4467 |
pnode.name) |
4468 |
|
4469 |
self.secondaries = []
|
4470 |
|
4471 |
# mirror node verification
|
4472 |
if self.op.disk_template in constants.DTS_NET_MIRROR: |
4473 |
if self.op.snode is None: |
4474 |
raise errors.OpPrereqError("The networked disk templates need" |
4475 |
" a mirror node")
|
4476 |
if self.op.snode == pnode.name: |
4477 |
raise errors.OpPrereqError("The secondary node cannot be" |
4478 |
" the primary node.")
|
4479 |
self.secondaries.append(self.op.snode) |
4480 |
_CheckNodeOnline(self, self.op.snode) |
4481 |
|
4482 |
nodenames = [pnode.name] + self.secondaries
|
4483 |
|
4484 |
req_size = _ComputeDiskSize(self.op.disk_template,
|
4485 |
self.disks)
|
4486 |
|
4487 |
# Check lv size requirements
|
4488 |
if req_size is not None: |
4489 |
nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
4490 |
self.op.hypervisor)
|
4491 |
for node in nodenames: |
4492 |
info = nodeinfo[node] |
4493 |
info.Raise() |
4494 |
info = info.data |
4495 |
if not info: |
4496 |
raise errors.OpPrereqError("Cannot get current information" |
4497 |
" from node '%s'" % node)
|
4498 |
vg_free = info.get('vg_free', None) |
4499 |
if not isinstance(vg_free, int): |
4500 |
raise errors.OpPrereqError("Can't compute free disk space on" |
4501 |
" node %s" % node)
|
4502 |
if req_size > info['vg_free']: |
4503 |
raise errors.OpPrereqError("Not enough disk space on target node %s." |
4504 |
" %d MB available, %d MB required" %
|
4505 |
(node, info['vg_free'], req_size))
|
4506 |
|
4507 |
_CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) |
4508 |
|
4509 |
# os verification
|
4510 |
result = self.rpc.call_os_get(pnode.name, self.op.os_type) |
4511 |
result.Raise() |
4512 |
if not isinstance(result.data, objects.OS): |
4513 |
raise errors.OpPrereqError("OS '%s' not in supported os list for" |
4514 |
" primary node" % self.op.os_type) |
4515 |
|
4516 |
# bridge check on primary node
|
4517 |
bridges = [n.bridge for n in self.nics] |
4518 |
result = self.rpc.call_bridges_exist(self.pnode.name, bridges) |
4519 |
result.Raise() |
4520 |
if not result.data: |
4521 |
raise errors.OpPrereqError("One of the target bridges '%s' does not" |
4522 |
" exist on destination node '%s'" %
|
4523 |
(",".join(bridges), pnode.name))
|
4524 |
|
4525 |
# memory check on primary node
|
4526 |
if self.op.start: |
4527 |
_CheckNodeFreeMemory(self, self.pnode.name, |
4528 |
"creating instance %s" % self.op.instance_name, |
4529 |
self.be_full[constants.BE_MEMORY],
|
4530 |
self.op.hypervisor)
|
4531 |
|
4532 |
def Exec(self, feedback_fn): |
4533 |
"""Create and add the instance to the cluster.
|
4534 |
|
4535 |
"""
|
4536 |
instance = self.op.instance_name
|
4537 |
pnode_name = self.pnode.name
|
4538 |
|
4539 |
for nic in self.nics: |
4540 |
if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
4541 |
nic.mac = self.cfg.GenerateMAC()
|
4542 |
|
4543 |
ht_kind = self.op.hypervisor
|
4544 |
if ht_kind in constants.HTS_REQ_PORT: |
4545 |
network_port = self.cfg.AllocatePort()
|
4546 |
else:
|
4547 |
network_port = None
|
4548 |
|
4549 |
##if self.op.vnc_bind_address is None:
|
4550 |
## self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
|
4551 |
|
4552 |
# this is needed because os.path.join does not accept None arguments
|
4553 |
if self.op.file_storage_dir is None: |
4554 |
string_file_storage_dir = ""
|
4555 |
else:
|
4556 |
string_file_storage_dir = self.op.file_storage_dir
|
4557 |
|
4558 |
# build the full file storage dir path
|
4559 |
file_storage_dir = os.path.normpath(os.path.join( |
4560 |
self.cfg.GetFileStorageDir(),
|
4561 |
string_file_storage_dir, instance)) |
4562 |
|
4563 |
|
4564 |
disks = _GenerateDiskTemplate(self,
|
4565 |
self.op.disk_template,
|
4566 |
instance, pnode_name, |
4567 |
self.secondaries,
|
4568 |
self.disks,
|
4569 |
file_storage_dir, |
4570 |
self.op.file_driver,
|
4571 |
0)
|
4572 |
|
4573 |
iobj = objects.Instance(name=instance, os=self.op.os_type,
|
4574 |
primary_node=pnode_name, |
4575 |
nics=self.nics, disks=disks,
|
4576 |
disk_template=self.op.disk_template,
|
4577 |
admin_up=False,
|
4578 |
network_port=network_port, |
4579 |
beparams=self.op.beparams,
|
4580 |
hvparams=self.op.hvparams,
|
4581 |
hypervisor=self.op.hypervisor,
|
4582 |
) |
4583 |
|
4584 |
feedback_fn("* creating instance disks...")
|
4585 |
try:
|
4586 |
_CreateDisks(self, iobj)
|
4587 |
except errors.OpExecError:
|
4588 |
self.LogWarning("Device creation failed, reverting...") |
4589 |
try:
|
4590 |
_RemoveDisks(self, iobj)
|
4591 |
finally:
|
4592 |
self.cfg.ReleaseDRBDMinors(instance)
|
4593 |
raise
|
4594 |
|
4595 |
feedback_fn("adding instance %s to cluster config" % instance)
|
4596 |
|
4597 |
self.cfg.AddInstance(iobj)
|
4598 |
# Declare that we don't want to remove the instance lock anymore, as we've
|
4599 |
# added the instance to the config
|
4600 |
del self.remove_locks[locking.LEVEL_INSTANCE] |
4601 |
# Unlock all the nodes
|
4602 |
if self.op.mode == constants.INSTANCE_IMPORT: |
4603 |
nodes_keep = [self.op.src_node]
|
4604 |
nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE] |
4605 |
if node != self.op.src_node] |
4606 |
self.context.glm.release(locking.LEVEL_NODE, nodes_release)
|
4607 |
self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
|
4608 |
else:
|
4609 |
self.context.glm.release(locking.LEVEL_NODE)
|
4610 |
del self.acquired_locks[locking.LEVEL_NODE] |
4611 |
|
4612 |
if self.op.wait_for_sync: |
4613 |
disk_abort = not _WaitForSync(self, iobj) |
4614 |
elif iobj.disk_template in constants.DTS_NET_MIRROR: |
4615 |
# make sure the disks are not degraded (still sync-ing is ok)
|
4616 |
time.sleep(15)
|
4617 |
feedback_fn("* checking mirrors status")
|
4618 |
disk_abort = not _WaitForSync(self, iobj, oneshot=True) |
4619 |
else:
|
4620 |
disk_abort = False
|
4621 |
|
4622 |
if disk_abort:
|
4623 |
_RemoveDisks(self, iobj)
|
4624 |
self.cfg.RemoveInstance(iobj.name)
|
4625 |
# Make sure the instance lock gets removed
|
4626 |
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
|
4627 |
raise errors.OpExecError("There are some degraded disks for" |
4628 |
" this instance")
|
4629 |
|
4630 |
feedback_fn("creating os for instance %s on node %s" %
|
4631 |
(instance, pnode_name)) |
4632 |
|
4633 |
if iobj.disk_template != constants.DT_DISKLESS:
|
4634 |
if self.op.mode == constants.INSTANCE_CREATE: |
4635 |
feedback_fn("* running the instance OS create scripts...")
|
4636 |
result = self.rpc.call_instance_os_add(pnode_name, iobj)
|
4637 |
msg = result.RemoteFailMsg() |
4638 |
if msg:
|
4639 |
raise errors.OpExecError("Could not add os for instance %s" |
4640 |
" on node %s: %s" %
|
4641 |
(instance, pnode_name, msg)) |
4642 |
|
4643 |
elif self.op.mode == constants.INSTANCE_IMPORT: |
4644 |
feedback_fn("* running the instance OS import scripts...")
|
4645 |
src_node = self.op.src_node
|
4646 |
src_images = self.src_images
|
4647 |
cluster_name = self.cfg.GetClusterName()
|
4648 |
import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
|
4649 |
src_node, src_images, |
4650 |
cluster_name) |
4651 |
import_result.Raise() |
4652 |
for idx, result in enumerate(import_result.data): |
4653 |
if not result: |
4654 |
self.LogWarning("Could not import the image %s for instance" |
4655 |
" %s, disk %d, on node %s" %
|
4656 |
(src_images[idx], instance, idx, pnode_name)) |
4657 |
else:
|
4658 |
# also checked in the prereq part
|
4659 |
raise errors.ProgrammerError("Unknown OS initialization mode '%s'" |
4660 |
% self.op.mode)
|
4661 |
|
4662 |
if self.op.start: |
4663 |
iobj.admin_up = True
|
4664 |
self.cfg.Update(iobj)
|
4665 |
logging.info("Starting instance %s on node %s", instance, pnode_name)
|
4666 |
feedback_fn("* starting instance...")
|
4667 |
result = self.rpc.call_instance_start(pnode_name, iobj, None) |
4668 |
msg = result.RemoteFailMsg() |
4669 |
if msg:
|
4670 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
4671 |
|
4672 |
|
4673 |
class LUConnectConsole(NoHooksLU): |
4674 |
"""Connect to an instance's console.
|
4675 |
|
4676 |
This is somewhat special in that it returns the command line that
|
4677 |
you need to run on the master node in order to connect to the
|
4678 |
console.
|
4679 |
|
4680 |
"""
|
4681 |
_OP_REQP = ["instance_name"]
|
4682 |
REQ_BGL = False
|
4683 |
|
4684 |
def ExpandNames(self): |
4685 |
self._ExpandAndLockInstance()
|
4686 |
|
4687 |
def CheckPrereq(self): |
4688 |
"""Check prerequisites.
|
4689 |
|
4690 |
This checks that the instance is in the cluster.
|
4691 |
|
4692 |
"""
|
4693 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4694 |
assert self.instance is not None, \ |
4695 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4696 |
_CheckNodeOnline(self, self.instance.primary_node) |
4697 |
|
4698 |
def Exec(self, feedback_fn): |
4699 |
"""Connect to the console of an instance
|
4700 |
|
4701 |
"""
|
4702 |
instance = self.instance
|
4703 |
node = instance.primary_node |
4704 |
|
4705 |
node_insts = self.rpc.call_instance_list([node],
|
4706 |
[instance.hypervisor])[node] |
4707 |
node_insts.Raise() |
4708 |
|
4709 |
if instance.name not in node_insts.data: |
4710 |
raise errors.OpExecError("Instance %s is not running." % instance.name) |
4711 |
|
4712 |
logging.debug("Connecting to console of %s on %s", instance.name, node)
|
4713 |
|
4714 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
4715 |
cluster = self.cfg.GetClusterInfo()
|
4716 |
# beparams and hvparams are passed separately, to avoid editing the
|
4717 |
# instance and then saving the defaults in the instance itself.
|
4718 |
hvparams = cluster.FillHV(instance) |
4719 |
beparams = cluster.FillBE(instance) |
4720 |
console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams) |
4721 |
|
4722 |
# build ssh cmdline
|
4723 |
return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True) |
4724 |
|
4725 |
|
4726 |
class LUReplaceDisks(LogicalUnit): |
4727 |
"""Replace the disks of an instance.
|
4728 |
|
4729 |
"""
|
4730 |
HPATH = "mirrors-replace"
|
4731 |
HTYPE = constants.HTYPE_INSTANCE |
4732 |
_OP_REQP = ["instance_name", "mode", "disks"] |
4733 |
REQ_BGL = False
|
4734 |
|
4735 |
def CheckArguments(self): |
4736 |
if not hasattr(self.op, "remote_node"): |
4737 |
self.op.remote_node = None |
4738 |
if not hasattr(self.op, "iallocator"): |
4739 |
self.op.iallocator = None |
4740 |
|
4741 |
# check for valid parameter combination
|
4742 |
cnt = [self.op.remote_node, self.op.iallocator].count(None) |
4743 |
if self.op.mode == constants.REPLACE_DISK_CHG: |
4744 |
if cnt == 2: |
4745 |
raise errors.OpPrereqError("When changing the secondary either an" |
4746 |
" iallocator script must be used or the"
|
4747 |
" new node given")
|
4748 |
elif cnt == 0: |
4749 |
raise errors.OpPrereqError("Give either the iallocator or the new" |
4750 |
" secondary, not both")
|
4751 |
else: # not replacing the secondary |
4752 |
if cnt != 2: |
4753 |
raise errors.OpPrereqError("The iallocator and new node options can" |
4754 |
" be used only when changing the"
|
4755 |
" secondary node")
|
4756 |
|
4757 |
def ExpandNames(self): |
4758 |
self._ExpandAndLockInstance()
|
4759 |
|
4760 |
if self.op.iallocator is not None: |
4761 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4762 |
elif self.op.remote_node is not None: |
4763 |
remote_node = self.cfg.ExpandNodeName(self.op.remote_node) |
4764 |
if remote_node is None: |
4765 |
raise errors.OpPrereqError("Node '%s' not known" % |
4766 |
self.op.remote_node)
|
4767 |
self.op.remote_node = remote_node
|
4768 |
# Warning: do not remove the locking of the new secondary here
|
4769 |
# unless DRBD8.AddChildren is changed to work in parallel;
|
4770 |
# currently it doesn't since parallel invocations of
|
4771 |
# FindUnusedMinor will conflict
|
4772 |
self.needed_locks[locking.LEVEL_NODE] = [remote_node]
|
4773 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
4774 |
else:
|
4775 |
self.needed_locks[locking.LEVEL_NODE] = []
|
4776 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
4777 |
|
4778 |
def DeclareLocks(self, level): |
4779 |
# If we're not already locking all nodes in the set we have to declare the
|
4780 |
# instance's primary/secondary nodes.
|
4781 |
if (level == locking.LEVEL_NODE and |
4782 |
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET): |
4783 |
self._LockInstancesNodes()
|
4784 |
|
4785 |
def _RunAllocator(self): |
4786 |
"""Compute a new secondary node using an IAllocator.
|
4787 |
|
4788 |
"""
|
4789 |
ial = IAllocator(self,
|
4790 |
mode=constants.IALLOCATOR_MODE_RELOC, |
4791 |
name=self.op.instance_name,
|
4792 |
relocate_from=[self.sec_node])
|
4793 |
|
4794 |
ial.Run(self.op.iallocator)
|
4795 |
|
4796 |
if not ial.success: |
4797 |
raise errors.OpPrereqError("Can't compute nodes using" |
4798 |
" iallocator '%s': %s" % (self.op.iallocator, |
4799 |
ial.info)) |
4800 |
if len(ial.nodes) != ial.required_nodes: |
4801 |
raise errors.OpPrereqError("iallocator '%s' returned invalid number" |
4802 |
" of nodes (%s), required %s" %
|
4803 |
(len(ial.nodes), ial.required_nodes))
|
4804 |
self.op.remote_node = ial.nodes[0] |
4805 |
self.LogInfo("Selected new secondary for the instance: %s", |
4806 |
self.op.remote_node)
|
4807 |
|
4808 |
def BuildHooksEnv(self): |
4809 |
"""Build hooks env.
|
4810 |
|
4811 |
This runs on the master, the primary and all the secondaries.
|
4812 |
|
4813 |
"""
|
4814 |
env = { |
4815 |
"MODE": self.op.mode, |
4816 |
"NEW_SECONDARY": self.op.remote_node, |
4817 |
"OLD_SECONDARY": self.instance.secondary_nodes[0], |
4818 |
} |
4819 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
4820 |
nl = [ |
4821 |
self.cfg.GetMasterNode(),
|
4822 |
self.instance.primary_node,
|
4823 |
] |
4824 |
if self.op.remote_node is not None: |
4825 |
nl.append(self.op.remote_node)
|
4826 |
return env, nl, nl
|
4827 |
|
4828 |
def CheckPrereq(self): |
4829 |
"""Check prerequisites.
|
4830 |
|
4831 |
This checks that the instance is in the cluster.
|
4832 |
|
4833 |
"""
|
4834 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
4835 |
assert instance is not None, \ |
4836 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
4837 |
self.instance = instance
|
4838 |
|
4839 |
if instance.disk_template != constants.DT_DRBD8:
|
4840 |
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based" |
4841 |
" instances")
|
4842 |
|
4843 |
if len(instance.secondary_nodes) != 1: |
4844 |
raise errors.OpPrereqError("The instance has a strange layout," |
4845 |
" expected one secondary but found %d" %
|
4846 |
len(instance.secondary_nodes))
|
4847 |
|
4848 |
self.sec_node = instance.secondary_nodes[0] |
4849 |
|
4850 |
if self.op.iallocator is not None: |
4851 |
self._RunAllocator()
|
4852 |
|
4853 |
remote_node = self.op.remote_node
|
4854 |
if remote_node is not None: |
4855 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
4856 |
assert self.remote_node_info is not None, \ |
4857 |
"Cannot retrieve locked node %s" % remote_node
|
4858 |
else:
|
4859 |
self.remote_node_info = None |
4860 |
if remote_node == instance.primary_node:
|
4861 |
raise errors.OpPrereqError("The specified node is the primary node of" |
4862 |
" the instance.")
|
4863 |
elif remote_node == self.sec_node: |
4864 |
raise errors.OpPrereqError("The specified node is already the" |
4865 |
" secondary node of the instance.")
|
4866 |
|
4867 |
if self.op.mode == constants.REPLACE_DISK_PRI: |
4868 |
n1 = self.tgt_node = instance.primary_node
|
4869 |
n2 = self.oth_node = self.sec_node |
4870 |
elif self.op.mode == constants.REPLACE_DISK_SEC: |
4871 |
n1 = self.tgt_node = self.sec_node |
4872 |
n2 = self.oth_node = instance.primary_node
|
4873 |
elif self.op.mode == constants.REPLACE_DISK_CHG: |
4874 |
n1 = self.new_node = remote_node
|
4875 |
n2 = self.oth_node = instance.primary_node
|
4876 |
self.tgt_node = self.sec_node |
4877 |
else:
|
4878 |
raise errors.ProgrammerError("Unhandled disk replace mode") |
4879 |
|
4880 |
_CheckNodeOnline(self, n1)
|
4881 |
_CheckNodeOnline(self, n2)
|
4882 |
|
4883 |
if not self.op.disks: |
4884 |
self.op.disks = range(len(instance.disks)) |
4885 |
|
4886 |
for disk_idx in self.op.disks: |
4887 |
instance.FindDisk(disk_idx) |
4888 |
|
4889 |
def _ExecD8DiskOnly(self, feedback_fn): |
4890 |
"""Replace a disk on the primary or secondary for dbrd8.
|
4891 |
|
4892 |
The algorithm for replace is quite complicated:
|
4893 |
|
4894 |
1. for each disk to be replaced:
|
4895 |
|
4896 |
1. create new LVs on the target node with unique names
|
4897 |
1. detach old LVs from the drbd device
|
4898 |
1. rename old LVs to name_replaced.<time_t>
|
4899 |
1. rename new LVs to old LVs
|
4900 |
1. attach the new LVs (with the old names now) to the drbd device
|
4901 |
|
4902 |
1. wait for sync across all devices
|
4903 |
|
4904 |
1. for each modified disk:
|
4905 |
|
4906 |
1. remove old LVs (which have the name name_replaces.<time_t>)
|
4907 |
|
4908 |
Failures are not very well handled.
|
4909 |
|
4910 |
"""
|
4911 |
steps_total = 6
|
4912 |
warning, info = (self.proc.LogWarning, self.proc.LogInfo) |
4913 |
instance = self.instance
|
4914 |
iv_names = {} |
4915 |
vgname = self.cfg.GetVGName()
|
4916 |
# start of work
|
4917 |
cfg = self.cfg
|
4918 |
tgt_node = self.tgt_node
|
4919 |
oth_node = self.oth_node
|
4920 |
|
4921 |
# Step: check device activation
|
4922 |
self.proc.LogStep(1, steps_total, "check device existence") |
4923 |
info("checking volume groups")
|
4924 |
my_vg = cfg.GetVGName() |
4925 |
results = self.rpc.call_vg_list([oth_node, tgt_node])
|
4926 |
if not results: |
4927 |
raise errors.OpExecError("Can't list volume groups on the nodes") |
4928 |
for node in oth_node, tgt_node: |
4929 |
res = results[node] |
4930 |
if res.failed or not res.data or my_vg not in res.data: |
4931 |
raise errors.OpExecError("Volume group '%s' not found on %s" % |
4932 |
(my_vg, node)) |
4933 |
for idx, dev in enumerate(instance.disks): |
4934 |
if idx not in self.op.disks: |
4935 |
continue
|
4936 |
for node in tgt_node, oth_node: |
4937 |
info("checking disk/%d on %s" % (idx, node))
|
4938 |
cfg.SetDiskID(dev, node) |
4939 |
result = self.rpc.call_blockdev_find(node, dev)
|
4940 |
msg = result.RemoteFailMsg() |
4941 |
if not msg and not result.payload: |
4942 |
msg = "disk not found"
|
4943 |
if msg:
|
4944 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
4945 |
(idx, node, msg)) |
4946 |
|
4947 |
# Step: check other node consistency
|
4948 |
self.proc.LogStep(2, steps_total, "check peer consistency") |
4949 |
for idx, dev in enumerate(instance.disks): |
4950 |
if idx not in self.op.disks: |
4951 |
continue
|
4952 |
info("checking disk/%d consistency on %s" % (idx, oth_node))
|
4953 |
if not _CheckDiskConsistency(self, dev, oth_node, |
4954 |
oth_node==instance.primary_node): |
4955 |
raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe" |
4956 |
" to replace disks on this node (%s)" %
|
4957 |
(oth_node, tgt_node)) |
4958 |
|
4959 |
# Step: create new storage
|
4960 |
self.proc.LogStep(3, steps_total, "allocate new storage") |
4961 |
for idx, dev in enumerate(instance.disks): |
4962 |
if idx not in self.op.disks: |
4963 |
continue
|
4964 |
size = dev.size |
4965 |
cfg.SetDiskID(dev, tgt_node) |
4966 |
lv_names = [".disk%d_%s" % (idx, suf)
|
4967 |
for suf in ["data", "meta"]] |
4968 |
names = _GenerateUniqueNames(self, lv_names)
|
4969 |
lv_data = objects.Disk(dev_type=constants.LD_LV, size=size, |
4970 |
logical_id=(vgname, names[0]))
|
4971 |
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
|
4972 |
logical_id=(vgname, names[1]))
|
4973 |
new_lvs = [lv_data, lv_meta] |
4974 |
old_lvs = dev.children |
4975 |
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs) |
4976 |
info("creating new local storage on %s for %s" %
|
4977 |
(tgt_node, dev.iv_name)) |
4978 |
# we pass force_create=True to force the LVM creation
|
4979 |
for new_lv in new_lvs: |
4980 |
_CreateBlockDev(self, tgt_node, instance, new_lv, True, |
4981 |
_GetInstanceInfoText(instance), False)
|
4982 |
|
4983 |
# Step: for each lv, detach+rename*2+attach
|
4984 |
self.proc.LogStep(4, steps_total, "change drbd configuration") |
4985 |
for dev, old_lvs, new_lvs in iv_names.itervalues(): |
4986 |
info("detaching %s drbd from local storage" % dev.iv_name)
|
4987 |
result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
|
4988 |
result.Raise() |
4989 |
if not result.data: |
4990 |
raise errors.OpExecError("Can't detach drbd from local storage on node" |
4991 |
" %s for device %s" % (tgt_node, dev.iv_name))
|
4992 |
#dev.children = []
|
4993 |
#cfg.Update(instance)
|
4994 |
|
4995 |
# ok, we created the new LVs, so now we know we have the needed
|
4996 |
# storage; as such, we proceed on the target node to rename
|
4997 |
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
|
4998 |
# using the assumption that logical_id == physical_id (which in
|
4999 |
# turn is the unique_id on that node)
|
5000 |
|
5001 |
# FIXME(iustin): use a better name for the replaced LVs
|
5002 |
temp_suffix = int(time.time())
|
5003 |
ren_fn = lambda d, suff: (d.physical_id[0], |
5004 |
d.physical_id[1] + "_replaced-%s" % suff) |
5005 |
# build the rename list based on what LVs exist on the node
|
5006 |
rlist = [] |
5007 |
for to_ren in old_lvs: |
5008 |
result = self.rpc.call_blockdev_find(tgt_node, to_ren)
|
5009 |
if not result.RemoteFailMsg() and result.payload: |
5010 |
# device exists
|
5011 |
rlist.append((to_ren, ren_fn(to_ren, temp_suffix))) |
5012 |
|
5013 |
info("renaming the old LVs on the target node")
|
5014 |
result = self.rpc.call_blockdev_rename(tgt_node, rlist)
|
5015 |
result.Raise() |
5016 |
if not result.data: |
5017 |
raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node) |
5018 |
# now we rename the new LVs to the old LVs
|
5019 |
info("renaming the new LVs on the target node")
|
5020 |
rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)] |
5021 |
result = self.rpc.call_blockdev_rename(tgt_node, rlist)
|
5022 |
result.Raise() |
5023 |
if not result.data: |
5024 |
raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node) |
5025 |
|
5026 |
for old, new in zip(old_lvs, new_lvs): |
5027 |
new.logical_id = old.logical_id |
5028 |
cfg.SetDiskID(new, tgt_node) |
5029 |
|
5030 |
for disk in old_lvs: |
5031 |
disk.logical_id = ren_fn(disk, temp_suffix) |
5032 |
cfg.SetDiskID(disk, tgt_node) |
5033 |
|
5034 |
# now that the new lvs have the old name, we can add them to the device
|
5035 |
info("adding new mirror component on %s" % tgt_node)
|
5036 |
result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
|
5037 |
if result.failed or not result.data: |
5038 |
for new_lv in new_lvs: |
5039 |
result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
|
5040 |
if result.failed or not result.data: |
5041 |
warning("Can't rollback device %s", hint="manually cleanup unused" |
5042 |
" logical volumes")
|
5043 |
raise errors.OpExecError("Can't add local storage to drbd") |
5044 |
|
5045 |
dev.children = new_lvs |
5046 |
cfg.Update(instance) |
5047 |
|
5048 |
# Step: wait for sync
|
5049 |
|
5050 |
# this can fail as the old devices are degraded and _WaitForSync
|
5051 |
# does a combined result over all disks, so we don't check its
|
5052 |
# return value
|
5053 |
self.proc.LogStep(5, steps_total, "sync devices") |
5054 |
_WaitForSync(self, instance, unlock=True) |
5055 |
|
5056 |
# so check manually all the devices
|
5057 |
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): |
5058 |
cfg.SetDiskID(dev, instance.primary_node) |
5059 |
result = self.rpc.call_blockdev_find(instance.primary_node, dev)
|
5060 |
msg = result.RemoteFailMsg() |
5061 |
if not msg and not result.payload: |
5062 |
msg = "disk not found"
|
5063 |
if msg:
|
5064 |
raise errors.OpExecError("Can't find DRBD device %s: %s" % |
5065 |
(name, msg)) |
5066 |
if result.payload[5]: |
5067 |
raise errors.OpExecError("DRBD device %s is degraded!" % name) |
5068 |
|
5069 |
# Step: remove old storage
|
5070 |
self.proc.LogStep(6, steps_total, "removing old storage") |
5071 |
for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): |
5072 |
info("remove logical volumes for %s" % name)
|
5073 |
for lv in old_lvs: |
5074 |
cfg.SetDiskID(lv, tgt_node) |
5075 |
result = self.rpc.call_blockdev_remove(tgt_node, lv)
|
5076 |
if result.failed or not result.data: |
5077 |
warning("Can't remove old LV", hint="manually remove unused LVs") |
5078 |
continue
|
5079 |
|
5080 |
def _ExecD8Secondary(self, feedback_fn): |
5081 |
"""Replace the secondary node for drbd8.
|
5082 |
|
5083 |
The algorithm for replace is quite complicated:
|
5084 |
- for all disks of the instance:
|
5085 |
- create new LVs on the new node with same names
|
5086 |
- shutdown the drbd device on the old secondary
|
5087 |
- disconnect the drbd network on the primary
|
5088 |
- create the drbd device on the new secondary
|
5089 |
- network attach the drbd on the primary, using an artifice:
|
5090 |
the drbd code for Attach() will connect to the network if it
|
5091 |
finds a device which is connected to the good local disks but
|
5092 |
not network enabled
|
5093 |
- wait for sync across all devices
|
5094 |
- remove all disks from the old secondary
|
5095 |
|
5096 |
Failures are not very well handled.
|
5097 |
|
5098 |
"""
|
5099 |
steps_total = 6
|
5100 |
warning, info = (self.proc.LogWarning, self.proc.LogInfo) |
5101 |
instance = self.instance
|
5102 |
iv_names = {} |
5103 |
# start of work
|
5104 |
cfg = self.cfg
|
5105 |
old_node = self.tgt_node
|
5106 |
new_node = self.new_node
|
5107 |
pri_node = instance.primary_node |
5108 |
nodes_ip = { |
5109 |
old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
|
5110 |
new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
|
5111 |
pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
|
5112 |
} |
5113 |
|
5114 |
# Step: check device activation
|
5115 |
self.proc.LogStep(1, steps_total, "check device existence") |
5116 |
info("checking volume groups")
|
5117 |
my_vg = cfg.GetVGName() |
5118 |
results = self.rpc.call_vg_list([pri_node, new_node])
|
5119 |
for node in pri_node, new_node: |
5120 |
res = results[node] |
5121 |
if res.failed or not res.data or my_vg not in res.data: |
5122 |
raise errors.OpExecError("Volume group '%s' not found on %s" % |
5123 |
(my_vg, node)) |
5124 |
for idx, dev in enumerate(instance.disks): |
5125 |
if idx not in self.op.disks: |
5126 |
continue
|
5127 |
info("checking disk/%d on %s" % (idx, pri_node))
|
5128 |
cfg.SetDiskID(dev, pri_node) |
5129 |
result = self.rpc.call_blockdev_find(pri_node, dev)
|
5130 |
msg = result.RemoteFailMsg() |
5131 |
if not msg and not result.payload: |
5132 |
msg = "disk not found"
|
5133 |
if msg:
|
5134 |
raise errors.OpExecError("Can't find disk/%d on node %s: %s" % |
5135 |
(idx, pri_node, msg)) |
5136 |
|
5137 |
# Step: check other node consistency
|
5138 |
self.proc.LogStep(2, steps_total, "check peer consistency") |
5139 |
for idx, dev in enumerate(instance.disks): |
5140 |
if idx not in self.op.disks: |
5141 |
continue
|
5142 |
info("checking disk/%d consistency on %s" % (idx, pri_node))
|
5143 |
if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True): |
5144 |
raise errors.OpExecError("Primary node (%s) has degraded storage," |
5145 |
" unsafe to replace the secondary" %
|
5146 |
pri_node) |
5147 |
|
5148 |
# Step: create new storage
|
5149 |
self.proc.LogStep(3, steps_total, "allocate new storage") |
5150 |
for idx, dev in enumerate(instance.disks): |
5151 |
info("adding new local storage on %s for disk/%d" %
|
5152 |
(new_node, idx)) |
5153 |
# we pass force_create=True to force LVM creation
|
5154 |
for new_lv in dev.children: |
5155 |
_CreateBlockDev(self, new_node, instance, new_lv, True, |
5156 |
_GetInstanceInfoText(instance), False)
|
5157 |
|
5158 |
# Step 4: dbrd minors and drbd setups changes
|
5159 |
# after this, we must manually remove the drbd minors on both the
|
5160 |
# error and the success paths
|
5161 |
minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks], |
5162 |
instance.name) |
5163 |
logging.debug("Allocated minors %s" % (minors,))
|
5164 |
self.proc.LogStep(4, steps_total, "changing drbd configuration") |
5165 |
for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)): |
5166 |
size = dev.size |
5167 |
info("activating a new drbd on %s for disk/%d" % (new_node, idx))
|
5168 |
# create new devices on new_node; note that we create two IDs:
|
5169 |
# one without port, so the drbd will be activated without
|
5170 |
# networking information on the new node at this stage, and one
|
5171 |
# with network, for the latter activation in step 4
|
5172 |
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id |
5173 |
if pri_node == o_node1:
|
5174 |
p_minor = o_minor1 |
5175 |
else:
|
5176 |
p_minor = o_minor2 |
5177 |
|
5178 |
new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
|
5179 |
new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret) |
5180 |
|
5181 |
iv_names[idx] = (dev, dev.children, new_net_id) |
5182 |
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
|
5183 |
new_net_id) |
5184 |
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8, |
5185 |
logical_id=new_alone_id, |
5186 |
children=dev.children) |
5187 |
try:
|
5188 |
_CreateSingleBlockDev(self, new_node, instance, new_drbd,
|
5189 |
_GetInstanceInfoText(instance), False)
|
5190 |
except errors.BlockDeviceError:
|
5191 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
5192 |
raise
|
5193 |
|
5194 |
for idx, dev in enumerate(instance.disks): |
5195 |
# we have new devices, shutdown the drbd on the old secondary
|
5196 |
info("shutting down drbd for disk/%d on old node" % idx)
|
5197 |
cfg.SetDiskID(dev, old_node) |
5198 |
result = self.rpc.call_blockdev_shutdown(old_node, dev)
|
5199 |
if result.failed or not result.data: |
5200 |
warning("Failed to shutdown drbd for disk/%d on old node" % idx,
|
5201 |
hint="Please cleanup this device manually as soon as possible")
|
5202 |
|
5203 |
info("detaching primary drbds from the network (=> standalone)")
|
5204 |
result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
|
5205 |
instance.disks)[pri_node] |
5206 |
|
5207 |
msg = result.RemoteFailMsg() |
5208 |
if msg:
|
5209 |
# detaches didn't succeed (unlikely)
|
5210 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
5211 |
raise errors.OpExecError("Can't detach the disks from the network on" |
5212 |
" old node: %s" % (msg,))
|
5213 |
|
5214 |
# if we managed to detach at least one, we update all the disks of
|
5215 |
# the instance to point to the new secondary
|
5216 |
info("updating instance configuration")
|
5217 |
for dev, _, new_logical_id in iv_names.itervalues(): |
5218 |
dev.logical_id = new_logical_id |
5219 |
cfg.SetDiskID(dev, pri_node) |
5220 |
cfg.Update(instance) |
5221 |
|
5222 |
# and now perform the drbd attach
|
5223 |
info("attaching primary drbds to new secondary (standalone => connected)")
|
5224 |
result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
|
5225 |
instance.disks, instance.name, |
5226 |
False)
|
5227 |
for to_node, to_result in result.items(): |
5228 |
msg = to_result.RemoteFailMsg() |
5229 |
if msg:
|
5230 |
warning("can't attach drbd disks on node %s: %s", to_node, msg,
|
5231 |
hint="please do a gnt-instance info to see the"
|
5232 |
" status of disks")
|
5233 |
|
5234 |
# this can fail as the old devices are degraded and _WaitForSync
|
5235 |
# does a combined result over all disks, so we don't check its
|
5236 |
# return value
|
5237 |
self.proc.LogStep(5, steps_total, "sync devices") |
5238 |
_WaitForSync(self, instance, unlock=True) |
5239 |
|
5240 |
# so check manually all the devices
|
5241 |
for idx, (dev, old_lvs, _) in iv_names.iteritems(): |
5242 |
cfg.SetDiskID(dev, pri_node) |
5243 |
result = self.rpc.call_blockdev_find(pri_node, dev)
|
5244 |
msg = result.RemoteFailMsg() |
5245 |
if not msg and not result.payload: |
5246 |
msg = "disk not found"
|
5247 |
if msg:
|
5248 |
raise errors.OpExecError("Can't find DRBD device disk/%d: %s" % |
5249 |
(idx, msg)) |
5250 |
if result.payload[5]: |
5251 |
raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx) |
5252 |
|
5253 |
self.proc.LogStep(6, steps_total, "removing old storage") |
5254 |
for idx, (dev, old_lvs, _) in iv_names.iteritems(): |
5255 |
info("remove logical volumes for disk/%d" % idx)
|
5256 |
for lv in old_lvs: |
5257 |
cfg.SetDiskID(lv, old_node) |
5258 |
result = self.rpc.call_blockdev_remove(old_node, lv)
|
5259 |
if result.failed or not result.data: |
5260 |
warning("Can't remove LV on old secondary",
|
5261 |
hint="Cleanup stale volumes by hand")
|
5262 |
|
5263 |
def Exec(self, feedback_fn): |
5264 |
"""Execute disk replacement.
|
5265 |
|
5266 |
This dispatches the disk replacement to the appropriate handler.
|
5267 |
|
5268 |
"""
|
5269 |
instance = self.instance
|
5270 |
|
5271 |
# Activate the instance disks if we're replacing them on a down instance
|
5272 |
if not instance.admin_up: |
5273 |
_StartInstanceDisks(self, instance, True) |
5274 |
|
5275 |
if self.op.mode == constants.REPLACE_DISK_CHG: |
5276 |
fn = self._ExecD8Secondary
|
5277 |
else:
|
5278 |
fn = self._ExecD8DiskOnly
|
5279 |
|
5280 |
ret = fn(feedback_fn) |
5281 |
|
5282 |
# Deactivate the instance disks if we're replacing them on a down instance
|
5283 |
if not instance.admin_up: |
5284 |
_SafeShutdownInstanceDisks(self, instance)
|
5285 |
|
5286 |
return ret
|
5287 |
|
5288 |
|
5289 |
class LUGrowDisk(LogicalUnit): |
5290 |
"""Grow a disk of an instance.
|
5291 |
|
5292 |
"""
|
5293 |
HPATH = "disk-grow"
|
5294 |
HTYPE = constants.HTYPE_INSTANCE |
5295 |
_OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"] |
5296 |
REQ_BGL = False
|
5297 |
|
5298 |
def ExpandNames(self): |
5299 |
self._ExpandAndLockInstance()
|
5300 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5301 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5302 |
|
5303 |
def DeclareLocks(self, level): |
5304 |
if level == locking.LEVEL_NODE:
|
5305 |
self._LockInstancesNodes()
|
5306 |
|
5307 |
def BuildHooksEnv(self): |
5308 |
"""Build hooks env.
|
5309 |
|
5310 |
This runs on the master, the primary and all the secondaries.
|
5311 |
|
5312 |
"""
|
5313 |
env = { |
5314 |
"DISK": self.op.disk, |
5315 |
"AMOUNT": self.op.amount, |
5316 |
} |
5317 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
5318 |
nl = [ |
5319 |
self.cfg.GetMasterNode(),
|
5320 |
self.instance.primary_node,
|
5321 |
] |
5322 |
return env, nl, nl
|
5323 |
|
5324 |
def CheckPrereq(self): |
5325 |
"""Check prerequisites.
|
5326 |
|
5327 |
This checks that the instance is in the cluster.
|
5328 |
|
5329 |
"""
|
5330 |
instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
5331 |
assert instance is not None, \ |
5332 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
5333 |
nodenames = list(instance.all_nodes)
|
5334 |
for node in nodenames: |
5335 |
_CheckNodeOnline(self, node)
|
5336 |
|
5337 |
|
5338 |
self.instance = instance
|
5339 |
|
5340 |
if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8): |
5341 |
raise errors.OpPrereqError("Instance's disk layout does not support" |
5342 |
" growing.")
|
5343 |
|
5344 |
self.disk = instance.FindDisk(self.op.disk) |
5345 |
|
5346 |
nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
5347 |
instance.hypervisor) |
5348 |
for node in nodenames: |
5349 |
info = nodeinfo[node] |
5350 |
if info.failed or not info.data: |
5351 |
raise errors.OpPrereqError("Cannot get current information" |
5352 |
" from node '%s'" % node)
|
5353 |
vg_free = info.data.get('vg_free', None) |
5354 |
if not isinstance(vg_free, int): |
5355 |
raise errors.OpPrereqError("Can't compute free disk space on" |
5356 |
" node %s" % node)
|
5357 |
if self.op.amount > vg_free: |
5358 |
raise errors.OpPrereqError("Not enough disk space on target node %s:" |
5359 |
" %d MiB available, %d MiB required" %
|
5360 |
(node, vg_free, self.op.amount))
|
5361 |
|
5362 |
def Exec(self, feedback_fn): |
5363 |
"""Execute disk grow.
|
5364 |
|
5365 |
"""
|
5366 |
instance = self.instance
|
5367 |
disk = self.disk
|
5368 |
for node in instance.all_nodes: |
5369 |
self.cfg.SetDiskID(disk, node)
|
5370 |
result = self.rpc.call_blockdev_grow(node, disk, self.op.amount) |
5371 |
msg = result.RemoteFailMsg() |
5372 |
if msg:
|
5373 |
raise errors.OpExecError("Grow request failed to node %s: %s" % |
5374 |
(node, msg)) |
5375 |
disk.RecordGrow(self.op.amount)
|
5376 |
self.cfg.Update(instance)
|
5377 |
if self.op.wait_for_sync: |
5378 |
disk_abort = not _WaitForSync(self, instance) |
5379 |
if disk_abort:
|
5380 |
self.proc.LogWarning("Warning: disk sync-ing has not returned a good" |
5381 |
" status.\nPlease check the instance.")
|
5382 |
|
5383 |
|
5384 |
class LUQueryInstanceData(NoHooksLU): |
5385 |
"""Query runtime instance data.
|
5386 |
|
5387 |
"""
|
5388 |
_OP_REQP = ["instances", "static"] |
5389 |
REQ_BGL = False
|
5390 |
|
5391 |
def ExpandNames(self): |
5392 |
self.needed_locks = {}
|
5393 |
self.share_locks = dict(((i, 1) for i in locking.LEVELS)) |
5394 |
|
5395 |
if not isinstance(self.op.instances, list): |
5396 |
raise errors.OpPrereqError("Invalid argument type 'instances'") |
5397 |
|
5398 |
if self.op.instances: |
5399 |
self.wanted_names = []
|
5400 |
for name in self.op.instances: |
5401 |
full_name = self.cfg.ExpandInstanceName(name)
|
5402 |
if full_name is None: |
5403 |
raise errors.OpPrereqError("Instance '%s' not known" % name) |
5404 |
self.wanted_names.append(full_name)
|
5405 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names |
5406 |
else:
|
5407 |
self.wanted_names = None |
5408 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
5409 |
|
5410 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5411 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5412 |
|
5413 |
def DeclareLocks(self, level): |
5414 |
if level == locking.LEVEL_NODE:
|
5415 |
self._LockInstancesNodes()
|
5416 |
|
5417 |
def CheckPrereq(self): |
5418 |
"""Check prerequisites.
|
5419 |
|
5420 |
This only checks the optional instance list against the existing names.
|
5421 |
|
5422 |
"""
|
5423 |
if self.wanted_names is None: |
5424 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE] |
5425 |
|
5426 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
5427 |
in self.wanted_names] |
5428 |
return
|
5429 |
|
5430 |
def _ComputeDiskStatus(self, instance, snode, dev): |
5431 |
"""Compute block device status.
|
5432 |
|
5433 |
"""
|
5434 |
static = self.op.static
|
5435 |
if not static: |
5436 |
self.cfg.SetDiskID(dev, instance.primary_node)
|
5437 |
dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
|
5438 |
msg = dev_pstatus.RemoteFailMsg() |
5439 |
if msg:
|
5440 |
raise errors.OpExecError("Can't compute disk status for %s: %s" % |
5441 |
(instance.name, msg)) |
5442 |
dev_pstatus = dev_pstatus.payload |
5443 |
else:
|
5444 |
dev_pstatus = None
|
5445 |
|
5446 |
if dev.dev_type in constants.LDS_DRBD: |
5447 |
# we change the snode then (otherwise we use the one passed in)
|
5448 |
if dev.logical_id[0] == instance.primary_node: |
5449 |
snode = dev.logical_id[1]
|
5450 |
else:
|
5451 |
snode = dev.logical_id[0]
|
5452 |
|
5453 |
if snode and not static: |
5454 |
self.cfg.SetDiskID(dev, snode)
|
5455 |
dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
|
5456 |
msg = dev_sstatus.RemoteFailMsg() |
5457 |
if msg:
|
5458 |
raise errors.OpExecError("Can't compute disk status for %s: %s" % |
5459 |
(instance.name, msg)) |
5460 |
dev_sstatus = dev_sstatus.payload |
5461 |
else:
|
5462 |
dev_sstatus = None
|
5463 |
|
5464 |
if dev.children:
|
5465 |
dev_children = [self._ComputeDiskStatus(instance, snode, child)
|
5466 |
for child in dev.children] |
5467 |
else:
|
5468 |
dev_children = [] |
5469 |
|
5470 |
data = { |
5471 |
"iv_name": dev.iv_name,
|
5472 |
"dev_type": dev.dev_type,
|
5473 |
"logical_id": dev.logical_id,
|
5474 |
"physical_id": dev.physical_id,
|
5475 |
"pstatus": dev_pstatus,
|
5476 |
"sstatus": dev_sstatus,
|
5477 |
"children": dev_children,
|
5478 |
"mode": dev.mode,
|
5479 |
} |
5480 |
|
5481 |
return data
|
5482 |
|
5483 |
def Exec(self, feedback_fn): |
5484 |
"""Gather and return data"""
|
5485 |
result = {} |
5486 |
|
5487 |
cluster = self.cfg.GetClusterInfo()
|
5488 |
|
5489 |
for instance in self.wanted_instances: |
5490 |
if not self.op.static: |
5491 |
remote_info = self.rpc.call_instance_info(instance.primary_node,
|
5492 |
instance.name, |
5493 |
instance.hypervisor) |
5494 |
remote_info.Raise() |
5495 |
remote_info = remote_info.data |
5496 |
if remote_info and "state" in remote_info: |
5497 |
remote_state = "up"
|
5498 |
else:
|
5499 |
remote_state = "down"
|
5500 |
else:
|
5501 |
remote_state = None
|
5502 |
if instance.admin_up:
|
5503 |
config_state = "up"
|
5504 |
else:
|
5505 |
config_state = "down"
|
5506 |
|
5507 |
disks = [self._ComputeDiskStatus(instance, None, device) |
5508 |
for device in instance.disks] |
5509 |
|
5510 |
idict = { |
5511 |
"name": instance.name,
|
5512 |
"config_state": config_state,
|
5513 |
"run_state": remote_state,
|
5514 |
"pnode": instance.primary_node,
|
5515 |
"snodes": instance.secondary_nodes,
|
5516 |
"os": instance.os,
|
5517 |
"nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], |
5518 |
"disks": disks,
|
5519 |
"hypervisor": instance.hypervisor,
|
5520 |
"network_port": instance.network_port,
|
5521 |
"hv_instance": instance.hvparams,
|
5522 |
"hv_actual": cluster.FillHV(instance),
|
5523 |
"be_instance": instance.beparams,
|
5524 |
"be_actual": cluster.FillBE(instance),
|
5525 |
} |
5526 |
|
5527 |
result[instance.name] = idict |
5528 |
|
5529 |
return result
|
5530 |
|
5531 |
|
5532 |
class LUSetInstanceParams(LogicalUnit): |
5533 |
"""Modifies an instances's parameters.
|
5534 |
|
5535 |
"""
|
5536 |
HPATH = "instance-modify"
|
5537 |
HTYPE = constants.HTYPE_INSTANCE |
5538 |
_OP_REQP = ["instance_name"]
|
5539 |
REQ_BGL = False
|
5540 |
|
5541 |
def CheckArguments(self): |
5542 |
if not hasattr(self.op, 'nics'): |
5543 |
self.op.nics = []
|
5544 |
if not hasattr(self.op, 'disks'): |
5545 |
self.op.disks = []
|
5546 |
if not hasattr(self.op, 'beparams'): |
5547 |
self.op.beparams = {}
|
5548 |
if not hasattr(self.op, 'hvparams'): |
5549 |
self.op.hvparams = {}
|
5550 |
self.op.force = getattr(self.op, "force", False) |
5551 |
if not (self.op.nics or self.op.disks or |
5552 |
self.op.hvparams or self.op.beparams): |
5553 |
raise errors.OpPrereqError("No changes submitted") |
5554 |
|
5555 |
utils.CheckBEParams(self.op.beparams)
|
5556 |
|
5557 |
# Disk validation
|
5558 |
disk_addremove = 0
|
5559 |
for disk_op, disk_dict in self.op.disks: |
5560 |
if disk_op == constants.DDM_REMOVE:
|
5561 |
disk_addremove += 1
|
5562 |
continue
|
5563 |
elif disk_op == constants.DDM_ADD:
|
5564 |
disk_addremove += 1
|
5565 |
else:
|
5566 |
if not isinstance(disk_op, int): |
5567 |
raise errors.OpPrereqError("Invalid disk index") |
5568 |
if disk_op == constants.DDM_ADD:
|
5569 |
mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
|
5570 |
if mode not in constants.DISK_ACCESS_SET: |
5571 |
raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode) |
5572 |
size = disk_dict.get('size', None) |
5573 |
if size is None: |
5574 |
raise errors.OpPrereqError("Required disk parameter size missing") |
5575 |
try:
|
5576 |
size = int(size)
|
5577 |
except ValueError, err: |
5578 |
raise errors.OpPrereqError("Invalid disk size parameter: %s" % |
5579 |
str(err))
|
5580 |
disk_dict['size'] = size
|
5581 |
else:
|
5582 |
# modification of disk
|
5583 |
if 'size' in disk_dict: |
5584 |
raise errors.OpPrereqError("Disk size change not possible, use" |
5585 |
" grow-disk")
|
5586 |
|
5587 |
if disk_addremove > 1: |
5588 |
raise errors.OpPrereqError("Only one disk add or remove operation" |
5589 |
" supported at a time")
|
5590 |
|
5591 |
# NIC validation
|
5592 |
nic_addremove = 0
|
5593 |
for nic_op, nic_dict in self.op.nics: |
5594 |
if nic_op == constants.DDM_REMOVE:
|
5595 |
nic_addremove += 1
|
5596 |
continue
|
5597 |
elif nic_op == constants.DDM_ADD:
|
5598 |
nic_addremove += 1
|
5599 |
else:
|
5600 |
if not isinstance(nic_op, int): |
5601 |
raise errors.OpPrereqError("Invalid nic index") |
5602 |
|
5603 |
# nic_dict should be a dict
|
5604 |
nic_ip = nic_dict.get('ip', None) |
5605 |
if nic_ip is not None: |
5606 |
if nic_ip.lower() == "none": |
5607 |
nic_dict['ip'] = None |
5608 |
else:
|
5609 |
if not utils.IsValidIP(nic_ip): |
5610 |
raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip) |
5611 |
# we can only check None bridges and assign the default one
|
5612 |
nic_bridge = nic_dict.get('bridge', None) |
5613 |
if nic_bridge is None: |
5614 |
nic_dict['bridge'] = self.cfg.GetDefBridge() |
5615 |
# but we can validate MACs
|
5616 |
nic_mac = nic_dict.get('mac', None) |
5617 |
if nic_mac is not None: |
5618 |
if self.cfg.IsMacInUse(nic_mac): |
5619 |
raise errors.OpPrereqError("MAC address %s already in use" |
5620 |
" in cluster" % nic_mac)
|
5621 |
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
5622 |
if not utils.IsValidMac(nic_mac): |
5623 |
raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac) |
5624 |
if nic_addremove > 1: |
5625 |
raise errors.OpPrereqError("Only one NIC add or remove operation" |
5626 |
" supported at a time")
|
5627 |
|
5628 |
def ExpandNames(self): |
5629 |
self._ExpandAndLockInstance()
|
5630 |
self.needed_locks[locking.LEVEL_NODE] = []
|
5631 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
5632 |
|
5633 |
def DeclareLocks(self, level): |
5634 |
if level == locking.LEVEL_NODE:
|
5635 |
self._LockInstancesNodes()
|
5636 |
|
5637 |
def BuildHooksEnv(self): |
5638 |
"""Build hooks env.
|
5639 |
|
5640 |
This runs on the master, primary and secondaries.
|
5641 |
|
5642 |
"""
|
5643 |
args = dict()
|
5644 |
if constants.BE_MEMORY in self.be_new: |
5645 |
args['memory'] = self.be_new[constants.BE_MEMORY] |
5646 |
if constants.BE_VCPUS in self.be_new: |
5647 |
args['vcpus'] = self.be_new[constants.BE_VCPUS] |
5648 |
# FIXME: readd disk/nic changes
|
5649 |
env = _BuildInstanceHookEnvByObject(self, self.instance, override=args) |
5650 |
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) |
5651 |
return env, nl, nl
|
5652 |
|
5653 |
def CheckPrereq(self): |
5654 |
"""Check prerequisites.
|
5655 |
|
5656 |
This only checks the instance list against the existing names.
|
5657 |
|
5658 |
"""
|
5659 |
force = self.force = self.op.force |
5660 |
|
5661 |
# checking the new params on the primary/secondary nodes
|
5662 |
|
5663 |
instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) |
5664 |
assert self.instance is not None, \ |
5665 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
5666 |
pnode = instance.primary_node |
5667 |
nodelist = list(instance.all_nodes)
|
5668 |
|
5669 |
# hvparams processing
|
5670 |
if self.op.hvparams: |
5671 |
i_hvdict = copy.deepcopy(instance.hvparams) |
5672 |
for key, val in self.op.hvparams.iteritems(): |
5673 |
if val == constants.VALUE_DEFAULT:
|
5674 |
try:
|
5675 |
del i_hvdict[key]
|
5676 |
except KeyError: |
5677 |
pass
|
5678 |
elif val == constants.VALUE_NONE:
|
5679 |
i_hvdict[key] = None
|
5680 |
else:
|
5681 |
i_hvdict[key] = val |
5682 |
cluster = self.cfg.GetClusterInfo()
|
5683 |
hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor], |
5684 |
i_hvdict) |
5685 |
# local check
|
5686 |
hypervisor.GetHypervisor( |
5687 |
instance.hypervisor).CheckParameterSyntax(hv_new) |
5688 |
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
|
5689 |
self.hv_new = hv_new # the new actual values |
5690 |
self.hv_inst = i_hvdict # the new dict (without defaults) |
5691 |
else:
|
5692 |
self.hv_new = self.hv_inst = {} |
5693 |
|
5694 |
# beparams processing
|
5695 |
if self.op.beparams: |
5696 |
i_bedict = copy.deepcopy(instance.beparams) |
5697 |
for key, val in self.op.beparams.iteritems(): |
5698 |
if val == constants.VALUE_DEFAULT:
|
5699 |
try:
|
5700 |
del i_bedict[key]
|
5701 |
except KeyError: |
5702 |
pass
|
5703 |
else:
|
5704 |
i_bedict[key] = val |
5705 |
cluster = self.cfg.GetClusterInfo()
|
5706 |
be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT], |
5707 |
i_bedict) |
5708 |
self.be_new = be_new # the new actual values |
5709 |
self.be_inst = i_bedict # the new dict (without defaults) |
5710 |
else:
|
5711 |
self.be_new = self.be_inst = {} |
5712 |
|
5713 |
self.warn = []
|
5714 |
|
5715 |
if constants.BE_MEMORY in self.op.beparams and not self.force: |
5716 |
mem_check_list = [pnode] |
5717 |
if be_new[constants.BE_AUTO_BALANCE]:
|
5718 |
# either we changed auto_balance to yes or it was from before
|
5719 |
mem_check_list.extend(instance.secondary_nodes) |
5720 |
instance_info = self.rpc.call_instance_info(pnode, instance.name,
|
5721 |
instance.hypervisor) |
5722 |
nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(), |
5723 |
instance.hypervisor) |
5724 |
if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict): |
5725 |
# Assume the primary node is unreachable and go ahead
|
5726 |
self.warn.append("Can't get info from primary node %s" % pnode) |
5727 |
else:
|
5728 |
if not instance_info.failed and instance_info.data: |
5729 |
current_mem = instance_info.data['memory']
|
5730 |
else:
|
5731 |
# Assume instance not running
|
5732 |
# (there is a slight race condition here, but it's not very probable,
|
5733 |
# and we have no other way to check)
|
5734 |
current_mem = 0
|
5735 |
miss_mem = (be_new[constants.BE_MEMORY] - current_mem - |
5736 |
nodeinfo[pnode].data['memory_free'])
|
5737 |
if miss_mem > 0: |
5738 |
raise errors.OpPrereqError("This change will prevent the instance" |
5739 |
" from starting, due to %d MB of memory"
|
5740 |
" missing on its primary node" % miss_mem)
|
5741 |
|
5742 |
if be_new[constants.BE_AUTO_BALANCE]:
|
5743 |
for node, nres in nodeinfo.iteritems(): |
5744 |
if node not in instance.secondary_nodes: |
5745 |
continue
|
5746 |
if nres.failed or not isinstance(nres.data, dict): |
5747 |
self.warn.append("Can't get info from secondary node %s" % node) |
5748 |
elif be_new[constants.BE_MEMORY] > nres.data['memory_free']: |
5749 |
self.warn.append("Not enough memory to failover instance to" |
5750 |
" secondary node %s" % node)
|
5751 |
|
5752 |
# NIC processing
|
5753 |
for nic_op, nic_dict in self.op.nics: |
5754 |
if nic_op == constants.DDM_REMOVE:
|
5755 |
if not instance.nics: |
5756 |
raise errors.OpPrereqError("Instance has no NICs, cannot remove") |
5757 |
continue
|
5758 |
if nic_op != constants.DDM_ADD:
|
5759 |
# an existing nic
|
5760 |
if nic_op < 0 or nic_op >= len(instance.nics): |
5761 |
raise errors.OpPrereqError("Invalid NIC index %s, valid values" |
5762 |
" are 0 to %d" %
|
5763 |
(nic_op, len(instance.nics)))
|
5764 |
nic_bridge = nic_dict.get('bridge', None) |
5765 |
if nic_bridge is not None: |
5766 |
if not self.rpc.call_bridges_exist(pnode, [nic_bridge]): |
5767 |
msg = ("Bridge '%s' doesn't exist on one of"
|
5768 |
" the instance nodes" % nic_bridge)
|
5769 |
if self.force: |
5770 |
self.warn.append(msg)
|
5771 |
else:
|
5772 |
raise errors.OpPrereqError(msg)
|
5773 |
|
5774 |
# DISK processing
|
5775 |
if self.op.disks and instance.disk_template == constants.DT_DISKLESS: |
5776 |
raise errors.OpPrereqError("Disk operations not supported for" |
5777 |
" diskless instances")
|
5778 |
for disk_op, disk_dict in self.op.disks: |
5779 |
if disk_op == constants.DDM_REMOVE:
|
5780 |
if len(instance.disks) == 1: |
5781 |
raise errors.OpPrereqError("Cannot remove the last disk of" |
5782 |
" an instance")
|
5783 |
ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
|
5784 |
ins_l = ins_l[pnode] |
5785 |
if ins_l.failed or not isinstance(ins_l.data, list): |
5786 |
raise errors.OpPrereqError("Can't contact node '%s'" % pnode) |
5787 |
if instance.name in ins_l.data: |
5788 |
raise errors.OpPrereqError("Instance is running, can't remove" |
5789 |
" disks.")
|
5790 |
|
5791 |
if (disk_op == constants.DDM_ADD and |
5792 |
len(instance.nics) >= constants.MAX_DISKS):
|
5793 |
raise errors.OpPrereqError("Instance has too many disks (%d), cannot" |
5794 |
" add more" % constants.MAX_DISKS)
|
5795 |
if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE): |
5796 |
# an existing disk
|
5797 |
if disk_op < 0 or disk_op >= len(instance.disks): |
5798 |
raise errors.OpPrereqError("Invalid disk index %s, valid values" |
5799 |
" are 0 to %d" %
|
5800 |
(disk_op, len(instance.disks)))
|
5801 |
|
5802 |
return
|
5803 |
|
5804 |
def Exec(self, feedback_fn): |
5805 |
"""Modifies an instance.
|
5806 |
|
5807 |
All parameters take effect only at the next restart of the instance.
|
5808 |
|
5809 |
"""
|
5810 |
# Process here the warnings from CheckPrereq, as we don't have a
|
5811 |
# feedback_fn there.
|
5812 |
for warn in self.warn: |
5813 |
feedback_fn("WARNING: %s" % warn)
|
5814 |
|
5815 |
result = [] |
5816 |
instance = self.instance
|
5817 |
# disk changes
|
5818 |
for disk_op, disk_dict in self.op.disks: |
5819 |
if disk_op == constants.DDM_REMOVE:
|
5820 |
# remove the last disk
|
5821 |
device = instance.disks.pop() |
5822 |
device_idx = len(instance.disks)
|
5823 |
for node, disk in device.ComputeNodeTree(instance.primary_node): |
5824 |
self.cfg.SetDiskID(disk, node)
|
5825 |
rpc_result = self.rpc.call_blockdev_remove(node, disk)
|
5826 |
if rpc_result.failed or not rpc_result.data: |
5827 |
self.proc.LogWarning("Could not remove disk/%d on node %s," |
5828 |
" continuing anyway", device_idx, node)
|
5829 |
result.append(("disk/%d" % device_idx, "remove")) |
5830 |
elif disk_op == constants.DDM_ADD:
|
5831 |
# add a new disk
|
5832 |
if instance.disk_template == constants.DT_FILE:
|
5833 |
file_driver, file_path = instance.disks[0].logical_id
|
5834 |
file_path = os.path.dirname(file_path) |
5835 |
else:
|
5836 |
file_driver = file_path = None
|
5837 |
disk_idx_base = len(instance.disks)
|
5838 |
new_disk = _GenerateDiskTemplate(self,
|
5839 |
instance.disk_template, |
5840 |
instance.name, instance.primary_node, |
5841 |
instance.secondary_nodes, |
5842 |
[disk_dict], |
5843 |
file_path, |
5844 |
file_driver, |
5845 |
disk_idx_base)[0]
|
5846 |
instance.disks.append(new_disk) |
5847 |
info = _GetInstanceInfoText(instance) |
5848 |
|
5849 |
logging.info("Creating volume %s for instance %s",
|
5850 |
new_disk.iv_name, instance.name) |
5851 |
# Note: this needs to be kept in sync with _CreateDisks
|
5852 |
#HARDCODE
|
5853 |
for node in instance.all_nodes: |
5854 |
f_create = node == instance.primary_node |
5855 |
try:
|
5856 |
_CreateBlockDev(self, node, instance, new_disk,
|
5857 |
f_create, info, f_create) |
5858 |
except errors.OpExecError, err:
|
5859 |
self.LogWarning("Failed to create volume %s (%s) on" |
5860 |
" node %s: %s",
|
5861 |
new_disk.iv_name, new_disk, node, err) |
5862 |
result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" % |
5863 |
(new_disk.size, new_disk.mode))) |
5864 |
else:
|
5865 |
# change a given disk
|
5866 |
instance.disks[disk_op].mode = disk_dict['mode']
|
5867 |
result.append(("disk.mode/%d" % disk_op, disk_dict['mode'])) |
5868 |
# NIC changes
|
5869 |
for nic_op, nic_dict in self.op.nics: |
5870 |
if nic_op == constants.DDM_REMOVE:
|
5871 |
# remove the last nic
|
5872 |
del instance.nics[-1] |
5873 |
result.append(("nic.%d" % len(instance.nics), "remove")) |
5874 |
elif nic_op == constants.DDM_ADD:
|
5875 |
# add a new nic
|
5876 |
if 'mac' not in nic_dict: |
5877 |
mac = constants.VALUE_GENERATE |
5878 |
else:
|
5879 |
mac = nic_dict['mac']
|
5880 |
if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): |
5881 |
mac = self.cfg.GenerateMAC()
|
5882 |
new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None), |
5883 |
bridge=nic_dict.get('bridge', None)) |
5884 |
instance.nics.append(new_nic) |
5885 |
result.append(("nic.%d" % (len(instance.nics) - 1), |
5886 |
"add:mac=%s,ip=%s,bridge=%s" %
|
5887 |
(new_nic.mac, new_nic.ip, new_nic.bridge))) |
5888 |
else:
|
5889 |
# change a given nic
|
5890 |
for key in 'mac', 'ip', 'bridge': |
5891 |
if key in nic_dict: |
5892 |
setattr(instance.nics[nic_op], key, nic_dict[key])
|
5893 |
result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
|
5894 |
|
5895 |
# hvparams changes
|
5896 |
if self.op.hvparams: |
5897 |
instance.hvparams = self.hv_inst
|
5898 |
for key, val in self.op.hvparams.iteritems(): |
5899 |
result.append(("hv/%s" % key, val))
|
5900 |
|
5901 |
# beparams changes
|
5902 |
if self.op.beparams: |
5903 |
instance.beparams = self.be_inst
|
5904 |
for key, val in self.op.beparams.iteritems(): |
5905 |
result.append(("be/%s" % key, val))
|
5906 |
|
5907 |
self.cfg.Update(instance)
|
5908 |
|
5909 |
return result
|
5910 |
|
5911 |
|
5912 |
class LUQueryExports(NoHooksLU): |
5913 |
"""Query the exports list
|
5914 |
|
5915 |
"""
|
5916 |
_OP_REQP = ['nodes']
|
5917 |
REQ_BGL = False
|
5918 |
|
5919 |
def ExpandNames(self): |
5920 |
self.needed_locks = {}
|
5921 |
self.share_locks[locking.LEVEL_NODE] = 1 |
5922 |
if not self.op.nodes: |
5923 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
5924 |
else:
|
5925 |
self.needed_locks[locking.LEVEL_NODE] = \
|
5926 |
_GetWantedNodes(self, self.op.nodes) |
5927 |
|
5928 |
def CheckPrereq(self): |
5929 |
"""Check prerequisites.
|
5930 |
|
5931 |
"""
|
5932 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
5933 |
|
5934 |
def Exec(self, feedback_fn): |
5935 |
"""Compute the list of all the exported system images.
|
5936 |
|
5937 |
@rtype: dict
|
5938 |
@return: a dictionary with the structure node->(export-list)
|
5939 |
where export-list is a list of the instances exported on
|
5940 |
that node.
|
5941 |
|
5942 |
"""
|
5943 |
rpcresult = self.rpc.call_export_list(self.nodes) |
5944 |
result = {} |
5945 |
for node in rpcresult: |
5946 |
if rpcresult[node].failed:
|
5947 |
result[node] = False
|
5948 |
else:
|
5949 |
result[node] = rpcresult[node].data |
5950 |
|
5951 |
return result
|
5952 |
|
5953 |
|
5954 |
class LUExportInstance(LogicalUnit): |
5955 |
"""Export an instance to an image in the cluster.
|
5956 |
|
5957 |
"""
|
5958 |
HPATH = "instance-export"
|
5959 |
HTYPE = constants.HTYPE_INSTANCE |
5960 |
_OP_REQP = ["instance_name", "target_node", "shutdown"] |
5961 |
REQ_BGL = False
|
5962 |
|
5963 |
def ExpandNames(self): |
5964 |
self._ExpandAndLockInstance()
|
5965 |
# FIXME: lock only instance primary and destination node
|
5966 |
#
|
5967 |
# Sad but true, for now we have do lock all nodes, as we don't know where
|
5968 |
# the previous export might be, and and in this LU we search for it and
|
5969 |
# remove it from its current node. In the future we could fix this by:
|
5970 |
# - making a tasklet to search (share-lock all), then create the new one,
|
5971 |
# then one to remove, after
|
5972 |
# - removing the removal operation altoghether
|
5973 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
5974 |
|
5975 |
def DeclareLocks(self, level): |
5976 |
"""Last minute lock declaration."""
|
5977 |
# All nodes are locked anyway, so nothing to do here.
|
5978 |
|
5979 |
def BuildHooksEnv(self): |
5980 |
"""Build hooks env.
|
5981 |
|
5982 |
This will run on the master, primary node and target node.
|
5983 |
|
5984 |
"""
|
5985 |
env = { |
5986 |
"EXPORT_NODE": self.op.target_node, |
5987 |
"EXPORT_DO_SHUTDOWN": self.op.shutdown, |
5988 |
} |
5989 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance)) |
5990 |
nl = [self.cfg.GetMasterNode(), self.instance.primary_node, |
5991 |
self.op.target_node]
|
5992 |
return env, nl, nl
|
5993 |
|
5994 |
def CheckPrereq(self): |
5995 |
"""Check prerequisites.
|
5996 |
|
5997 |
This checks that the instance and node names are valid.
|
5998 |
|
5999 |
"""
|
6000 |
instance_name = self.op.instance_name
|
6001 |
self.instance = self.cfg.GetInstanceInfo(instance_name) |
6002 |
assert self.instance is not None, \ |
6003 |
"Cannot retrieve locked instance %s" % self.op.instance_name |
6004 |
_CheckNodeOnline(self, self.instance.primary_node) |
6005 |
|
6006 |
self.dst_node = self.cfg.GetNodeInfo( |
6007 |
self.cfg.ExpandNodeName(self.op.target_node)) |
6008 |
|
6009 |
if self.dst_node is None: |
6010 |
# This is wrong node name, not a non-locked node
|
6011 |
raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node) |
6012 |
_CheckNodeOnline(self, self.dst_node.name) |
6013 |
|
6014 |
# instance disk type verification
|
6015 |
for disk in self.instance.disks: |
6016 |
if disk.dev_type == constants.LD_FILE:
|
6017 |
raise errors.OpPrereqError("Export not supported for instances with" |
6018 |
" file-based disks")
|
6019 |
|
6020 |
def Exec(self, feedback_fn): |
6021 |
"""Export an instance to an image in the cluster.
|
6022 |
|
6023 |
"""
|
6024 |
instance = self.instance
|
6025 |
dst_node = self.dst_node
|
6026 |
src_node = instance.primary_node |
6027 |
if self.op.shutdown: |
6028 |
# shutdown the instance, but not the disks
|
6029 |
result = self.rpc.call_instance_shutdown(src_node, instance)
|
6030 |
result.Raise() |
6031 |
if not result.data: |
6032 |
raise errors.OpExecError("Could not shutdown instance %s on node %s" % |
6033 |
(instance.name, src_node)) |
6034 |
|
6035 |
vgname = self.cfg.GetVGName()
|
6036 |
|
6037 |
snap_disks = [] |
6038 |
|
6039 |
# set the disks ID correctly since call_instance_start needs the
|
6040 |
# correct drbd minor to create the symlinks
|
6041 |
for disk in instance.disks: |
6042 |
self.cfg.SetDiskID(disk, src_node)
|
6043 |
|
6044 |
try:
|
6045 |
for disk in instance.disks: |
6046 |
# new_dev_name will be a snapshot of an lvm leaf of the one we passed
|
6047 |
new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
|
6048 |
if new_dev_name.failed or not new_dev_name.data: |
6049 |
self.LogWarning("Could not snapshot block device %s on node %s", |
6050 |
disk.logical_id[1], src_node)
|
6051 |
snap_disks.append(False)
|
6052 |
else:
|
6053 |
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size, |
6054 |
logical_id=(vgname, new_dev_name.data), |
6055 |
physical_id=(vgname, new_dev_name.data), |
6056 |
iv_name=disk.iv_name) |
6057 |
snap_disks.append(new_dev) |
6058 |
|
6059 |
finally:
|
6060 |
if self.op.shutdown and instance.admin_up: |
6061 |
result = self.rpc.call_instance_start(src_node, instance, None) |
6062 |
msg = result.RemoteFailMsg() |
6063 |
if msg:
|
6064 |
_ShutdownInstanceDisks(self, instance)
|
6065 |
raise errors.OpExecError("Could not start instance: %s" % msg) |
6066 |
|
6067 |
# TODO: check for size
|
6068 |
|
6069 |
cluster_name = self.cfg.GetClusterName()
|
6070 |
for idx, dev in enumerate(snap_disks): |
6071 |
if dev:
|
6072 |
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
|
6073 |
instance, cluster_name, idx) |
6074 |
if result.failed or not result.data: |
6075 |
self.LogWarning("Could not export block device %s from node %s to" |
6076 |
" node %s", dev.logical_id[1], src_node, |
6077 |
dst_node.name) |
6078 |
result = self.rpc.call_blockdev_remove(src_node, dev)
|
6079 |
if result.failed or not result.data: |
6080 |
self.LogWarning("Could not remove snapshot block device %s from node" |
6081 |
" %s", dev.logical_id[1], src_node) |
6082 |
|
6083 |
result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
|
6084 |
if result.failed or not result.data: |
6085 |
self.LogWarning("Could not finalize export for instance %s on node %s", |
6086 |
instance.name, dst_node.name) |
6087 |
|
6088 |
nodelist = self.cfg.GetNodeList()
|
6089 |
nodelist.remove(dst_node.name) |
6090 |
|
6091 |
# on one-node clusters nodelist will be empty after the removal
|
6092 |
# if we proceed the backup would be removed because OpQueryExports
|
6093 |
# substitutes an empty list with the full cluster node list.
|
6094 |
if nodelist:
|
6095 |
exportlist = self.rpc.call_export_list(nodelist)
|
6096 |
for node in exportlist: |
6097 |
if exportlist[node].failed:
|
6098 |
continue
|
6099 |
if instance.name in exportlist[node].data: |
6100 |
if not self.rpc.call_export_remove(node, instance.name): |
6101 |
self.LogWarning("Could not remove older export for instance %s" |
6102 |
" on node %s", instance.name, node)
|
6103 |
|
6104 |
|
6105 |
class LURemoveExport(NoHooksLU): |
6106 |
"""Remove exports related to the named instance.
|
6107 |
|
6108 |
"""
|
6109 |
_OP_REQP = ["instance_name"]
|
6110 |
REQ_BGL = False
|
6111 |
|
6112 |
def ExpandNames(self): |
6113 |
self.needed_locks = {}
|
6114 |
# We need all nodes to be locked in order for RemoveExport to work, but we
|
6115 |
# don't need to lock the instance itself, as nothing will happen to it (and
|
6116 |
# we can remove exports also for a removed instance)
|
6117 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
6118 |
|
6119 |
def CheckPrereq(self): |
6120 |
"""Check prerequisites.
|
6121 |
"""
|
6122 |
pass
|
6123 |
|
6124 |
def Exec(self, feedback_fn): |
6125 |
"""Remove any export.
|
6126 |
|
6127 |
"""
|
6128 |
instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
6129 |
# If the instance was not found we'll try with the name that was passed in.
|
6130 |
# This will only work if it was an FQDN, though.
|
6131 |
fqdn_warn = False
|
6132 |
if not instance_name: |
6133 |
fqdn_warn = True
|
6134 |
instance_name = self.op.instance_name
|
6135 |
|
6136 |
exportlist = self.rpc.call_export_list(self.acquired_locks[ |
6137 |
locking.LEVEL_NODE]) |
6138 |
found = False
|
6139 |
for node in exportlist: |
6140 |
if exportlist[node].failed:
|
6141 |
self.LogWarning("Failed to query node %s, continuing" % node) |
6142 |
continue
|
6143 |
if instance_name in exportlist[node].data: |
6144 |
found = True
|
6145 |
result = self.rpc.call_export_remove(node, instance_name)
|
6146 |
if result.failed or not result.data: |
6147 |
logging.error("Could not remove export for instance %s"
|
6148 |
" on node %s", instance_name, node)
|
6149 |
|
6150 |
if fqdn_warn and not found: |
6151 |
feedback_fn("Export not found. If trying to remove an export belonging"
|
6152 |
" to a deleted instance please use its Fully Qualified"
|
6153 |
" Domain Name.")
|
6154 |
|
6155 |
|
6156 |
class TagsLU(NoHooksLU): |
6157 |
"""Generic tags LU.
|
6158 |
|
6159 |
This is an abstract class which is the parent of all the other tags LUs.
|
6160 |
|
6161 |
"""
|
6162 |
|
6163 |
def ExpandNames(self): |
6164 |
self.needed_locks = {}
|
6165 |
if self.op.kind == constants.TAG_NODE: |
6166 |
name = self.cfg.ExpandNodeName(self.op.name) |
6167 |
if name is None: |
6168 |
raise errors.OpPrereqError("Invalid node name (%s)" % |
6169 |
(self.op.name,))
|
6170 |
self.op.name = name
|
6171 |
self.needed_locks[locking.LEVEL_NODE] = name
|
6172 |
elif self.op.kind == constants.TAG_INSTANCE: |
6173 |
name = self.cfg.ExpandInstanceName(self.op.name) |
6174 |
if name is None: |
6175 |
raise errors.OpPrereqError("Invalid instance name (%s)" % |
6176 |
(self.op.name,))
|
6177 |
self.op.name = name
|
6178 |
self.needed_locks[locking.LEVEL_INSTANCE] = name
|
6179 |
|
6180 |
def CheckPrereq(self): |
6181 |
"""Check prerequisites.
|
6182 |
|
6183 |
"""
|
6184 |
if self.op.kind == constants.TAG_CLUSTER: |
6185 |
self.target = self.cfg.GetClusterInfo() |
6186 |
elif self.op.kind == constants.TAG_NODE: |
6187 |
self.target = self.cfg.GetNodeInfo(self.op.name) |
6188 |
elif self.op.kind == constants.TAG_INSTANCE: |
6189 |
self.target = self.cfg.GetInstanceInfo(self.op.name) |
6190 |
else:
|
6191 |
raise errors.OpPrereqError("Wrong tag type requested (%s)" % |
6192 |
str(self.op.kind)) |
6193 |
|
6194 |
|
6195 |
class LUGetTags(TagsLU): |
6196 |
"""Returns the tags of a given object.
|
6197 |
|
6198 |
"""
|
6199 |
_OP_REQP = ["kind", "name"] |
6200 |
REQ_BGL = False
|
6201 |
|
6202 |
def Exec(self, feedback_fn): |
6203 |
"""Returns the tag list.
|
6204 |
|
6205 |
"""
|
6206 |
return list(self.target.GetTags()) |
6207 |
|
6208 |
|
6209 |
class LUSearchTags(NoHooksLU): |
6210 |
"""Searches the tags for a given pattern.
|
6211 |
|
6212 |
"""
|
6213 |
_OP_REQP = ["pattern"]
|
6214 |
REQ_BGL = False
|
6215 |
|
6216 |
def ExpandNames(self): |
6217 |
self.needed_locks = {}
|
6218 |
|
6219 |
def CheckPrereq(self): |
6220 |
"""Check prerequisites.
|
6221 |
|
6222 |
This checks the pattern passed for validity by compiling it.
|
6223 |
|
6224 |
"""
|
6225 |
try:
|
6226 |
self.re = re.compile(self.op.pattern) |
6227 |
except re.error, err:
|
6228 |
raise errors.OpPrereqError("Invalid search pattern '%s': %s" % |
6229 |
(self.op.pattern, err))
|
6230 |
|
6231 |
def Exec(self, feedback_fn): |
6232 |
"""Returns the tag list.
|
6233 |
|
6234 |
"""
|
6235 |
cfg = self.cfg
|
6236 |
tgts = [("/cluster", cfg.GetClusterInfo())]
|
6237 |
ilist = cfg.GetAllInstancesInfo().values() |
6238 |
tgts.extend([("/instances/%s" % i.name, i) for i in ilist]) |
6239 |
nlist = cfg.GetAllNodesInfo().values() |
6240 |
tgts.extend([("/nodes/%s" % n.name, n) for n in nlist]) |
6241 |
results = [] |
6242 |
for path, target in tgts: |
6243 |
for tag in target.GetTags(): |
6244 |
if self.re.search(tag): |
6245 |
results.append((path, tag)) |
6246 |
return results
|
6247 |
|
6248 |
|
6249 |
class LUAddTags(TagsLU): |
6250 |
"""Sets a tag on a given object.
|
6251 |
|
6252 |
"""
|
6253 |
_OP_REQP = ["kind", "name", "tags"] |
6254 |
REQ_BGL = False
|
6255 |
|
6256 |
def CheckPrereq(self): |
6257 |
"""Check prerequisites.
|
6258 |
|
6259 |
This checks the type and length of the tag name and value.
|
6260 |
|
6261 |
"""
|
6262 |
TagsLU.CheckPrereq(self)
|
6263 |
for tag in self.op.tags: |
6264 |
objects.TaggableObject.ValidateTag(tag) |
6265 |
|
6266 |
def Exec(self, feedback_fn): |
6267 |
"""Sets the tag.
|
6268 |
|
6269 |
"""
|
6270 |
try:
|
6271 |
for tag in self.op.tags: |
6272 |
self.target.AddTag(tag)
|
6273 |
except errors.TagError, err:
|
6274 |
raise errors.OpExecError("Error while setting tag: %s" % str(err)) |
6275 |
try:
|
6276 |
self.cfg.Update(self.target) |
6277 |
except errors.ConfigurationError:
|
6278 |
raise errors.OpRetryError("There has been a modification to the" |
6279 |
" config file and the operation has been"
|
6280 |
" aborted. Please retry.")
|
6281 |
|
6282 |
|
6283 |
class LUDelTags(TagsLU): |
6284 |
"""Delete a list of tags from a given object.
|
6285 |
|
6286 |
"""
|
6287 |
_OP_REQP = ["kind", "name", "tags"] |
6288 |
REQ_BGL = False
|
6289 |
|
6290 |
def CheckPrereq(self): |
6291 |
"""Check prerequisites.
|
6292 |
|
6293 |
This checks that we have the given tag.
|
6294 |
|
6295 |
"""
|
6296 |
TagsLU.CheckPrereq(self)
|
6297 |
for tag in self.op.tags: |
6298 |
objects.TaggableObject.ValidateTag(tag) |
6299 |
del_tags = frozenset(self.op.tags) |
6300 |
cur_tags = self.target.GetTags()
|
6301 |
if not del_tags <= cur_tags: |
6302 |
diff_tags = del_tags - cur_tags |
6303 |
diff_names = ["'%s'" % tag for tag in diff_tags] |
6304 |
diff_names.sort() |
6305 |
raise errors.OpPrereqError("Tag(s) %s not found" % |
6306 |
(",".join(diff_names)))
|
6307 |
|
6308 |
def Exec(self, feedback_fn): |
6309 |
"""Remove the tag from the object.
|
6310 |
|
6311 |
"""
|
6312 |
for tag in self.op.tags: |
6313 |
self.target.RemoveTag(tag)
|
6314 |
try:
|
6315 |
self.cfg.Update(self.target) |
6316 |
except errors.ConfigurationError:
|
6317 |
raise errors.OpRetryError("There has been a modification to the" |
6318 |
" config file and the operation has been"
|
6319 |
" aborted. Please retry.")
|
6320 |
|
6321 |
|
6322 |
class LUTestDelay(NoHooksLU): |
6323 |
"""Sleep for a specified amount of time.
|
6324 |
|
6325 |
This LU sleeps on the master and/or nodes for a specified amount of
|
6326 |
time.
|
6327 |
|
6328 |
"""
|
6329 |
_OP_REQP = ["duration", "on_master", "on_nodes"] |
6330 |
REQ_BGL = False
|
6331 |
|
6332 |
def ExpandNames(self): |
6333 |
"""Expand names and set required locks.
|
6334 |
|
6335 |
This expands the node list, if any.
|
6336 |
|
6337 |
"""
|
6338 |
self.needed_locks = {}
|
6339 |
if self.op.on_nodes: |
6340 |
# _GetWantedNodes can be used here, but is not always appropriate to use
|
6341 |
# this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
|
6342 |
# more information.
|
6343 |
self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes) |
6344 |
self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes |
6345 |
|
6346 |
def CheckPrereq(self): |
6347 |
"""Check prerequisites.
|
6348 |
|
6349 |
"""
|
6350 |
|
6351 |
def Exec(self, feedback_fn): |
6352 |
"""Do the actual sleep.
|
6353 |
|
6354 |
"""
|
6355 |
if self.op.on_master: |
6356 |
if not utils.TestDelay(self.op.duration): |
6357 |
raise errors.OpExecError("Error during master delay test") |
6358 |
if self.op.on_nodes: |
6359 |
result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration) |
6360 |
if not result: |
6361 |
raise errors.OpExecError("Complete failure from rpc call") |
6362 |
for node, node_result in result.items(): |
6363 |
node_result.Raise() |
6364 |
if not node_result.data: |
6365 |
raise errors.OpExecError("Failure during rpc call to node %s," |
6366 |
" result: %s" % (node, node_result.data))
|
6367 |
|
6368 |
|
6369 |
class IAllocator(object): |
6370 |
"""IAllocator framework.
|
6371 |
|
6372 |
An IAllocator instance has three sets of attributes:
|
6373 |
- cfg that is needed to query the cluster
|
6374 |
- input data (all members of the _KEYS class attribute are required)
|
6375 |
- four buffer attributes (in|out_data|text), that represent the
|
6376 |
input (to the external script) in text and data structure format,
|
6377 |
and the output from it, again in two formats
|
6378 |
- the result variables from the script (success, info, nodes) for
|
6379 |
easy usage
|
6380 |
|
6381 |
"""
|
6382 |
_ALLO_KEYS = [ |
6383 |
"mem_size", "disks", "disk_template", |
6384 |
"os", "tags", "nics", "vcpus", "hypervisor", |
6385 |
] |
6386 |
_RELO_KEYS = [ |
6387 |
"relocate_from",
|
6388 |
] |
6389 |
|
6390 |
def __init__(self, lu, mode, name, **kwargs): |
6391 |
self.lu = lu
|
6392 |
# init buffer variables
|
6393 |
self.in_text = self.out_text = self.in_data = self.out_data = None |
6394 |
# init all input fields so that pylint is happy
|
6395 |
self.mode = mode
|
6396 |
self.name = name
|
6397 |
self.mem_size = self.disks = self.disk_template = None |
6398 |
self.os = self.tags = self.nics = self.vcpus = None |
6399 |
self.hypervisor = None |
6400 |
self.relocate_from = None |
6401 |
# computed fields
|
6402 |
self.required_nodes = None |
6403 |
# init result fields
|
6404 |
self.success = self.info = self.nodes = None |
6405 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
6406 |
keyset = self._ALLO_KEYS
|
6407 |
elif self.mode == constants.IALLOCATOR_MODE_RELOC: |
6408 |
keyset = self._RELO_KEYS
|
6409 |
else:
|
6410 |
raise errors.ProgrammerError("Unknown mode '%s' passed to the" |
6411 |
" IAllocator" % self.mode) |
6412 |
for key in kwargs: |
6413 |
if key not in keyset: |
6414 |
raise errors.ProgrammerError("Invalid input parameter '%s' to" |
6415 |
" IAllocator" % key)
|
6416 |
setattr(self, key, kwargs[key]) |
6417 |
for key in keyset: |
6418 |
if key not in kwargs: |
6419 |
raise errors.ProgrammerError("Missing input parameter '%s' to" |
6420 |
" IAllocator" % key)
|
6421 |
self._BuildInputData()
|
6422 |
|
6423 |
def _ComputeClusterData(self): |
6424 |
"""Compute the generic allocator input data.
|
6425 |
|
6426 |
This is the data that is independent of the actual operation.
|
6427 |
|
6428 |
"""
|
6429 |
cfg = self.lu.cfg
|
6430 |
cluster_info = cfg.GetClusterInfo() |
6431 |
# cluster data
|
6432 |
data = { |
6433 |
"version": 1, |
6434 |
"cluster_name": cfg.GetClusterName(),
|
6435 |
"cluster_tags": list(cluster_info.GetTags()), |
6436 |
"enabled_hypervisors": list(cluster_info.enabled_hypervisors), |
6437 |
# we don't have job IDs
|
6438 |
} |
6439 |
iinfo = cfg.GetAllInstancesInfo().values() |
6440 |
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo] |
6441 |
|
6442 |
# node data
|
6443 |
node_results = {} |
6444 |
node_list = cfg.GetNodeList() |
6445 |
|
6446 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
6447 |
hypervisor_name = self.hypervisor
|
6448 |
elif self.mode == constants.IALLOCATOR_MODE_RELOC: |
6449 |
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
|
6450 |
|
6451 |
node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
|
6452 |
hypervisor_name) |
6453 |
node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
|
6454 |
cluster_info.enabled_hypervisors) |
6455 |
for nname, nresult in node_data.items(): |
6456 |
# first fill in static (config-based) values
|
6457 |
ninfo = cfg.GetNodeInfo(nname) |
6458 |
pnr = { |
6459 |
"tags": list(ninfo.GetTags()), |
6460 |
"primary_ip": ninfo.primary_ip,
|
6461 |
"secondary_ip": ninfo.secondary_ip,
|
6462 |
"offline": ninfo.offline,
|
6463 |
"master_candidate": ninfo.master_candidate,
|
6464 |
} |
6465 |
|
6466 |
if not ninfo.offline: |
6467 |
nresult.Raise() |
6468 |
if not isinstance(nresult.data, dict): |
6469 |
raise errors.OpExecError("Can't get data for node %s" % nname) |
6470 |
remote_info = nresult.data |
6471 |
for attr in ['memory_total', 'memory_free', 'memory_dom0', |
6472 |
'vg_size', 'vg_free', 'cpu_total']: |
6473 |
if attr not in remote_info: |
6474 |
raise errors.OpExecError("Node '%s' didn't return attribute" |
6475 |
" '%s'" % (nname, attr))
|
6476 |
try:
|
6477 |
remote_info[attr] = int(remote_info[attr])
|
6478 |
except ValueError, err: |
6479 |
raise errors.OpExecError("Node '%s' returned invalid value" |
6480 |
" for '%s': %s" % (nname, attr, err))
|
6481 |
# compute memory used by primary instances
|
6482 |
i_p_mem = i_p_up_mem = 0
|
6483 |
for iinfo, beinfo in i_list: |
6484 |
if iinfo.primary_node == nname:
|
6485 |
i_p_mem += beinfo[constants.BE_MEMORY] |
6486 |
if iinfo.name not in node_iinfo[nname].data: |
6487 |
i_used_mem = 0
|
6488 |
else:
|
6489 |
i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory']) |
6490 |
i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem |
6491 |
remote_info['memory_free'] -= max(0, i_mem_diff) |
6492 |
|
6493 |
if iinfo.admin_up:
|
6494 |
i_p_up_mem += beinfo[constants.BE_MEMORY] |
6495 |
|
6496 |
# compute memory used by instances
|
6497 |
pnr_dyn = { |
6498 |
"total_memory": remote_info['memory_total'], |
6499 |
"reserved_memory": remote_info['memory_dom0'], |
6500 |
"free_memory": remote_info['memory_free'], |
6501 |
"total_disk": remote_info['vg_size'], |
6502 |
"free_disk": remote_info['vg_free'], |
6503 |
"total_cpus": remote_info['cpu_total'], |
6504 |
"i_pri_memory": i_p_mem,
|
6505 |
"i_pri_up_memory": i_p_up_mem,
|
6506 |
} |
6507 |
pnr.update(pnr_dyn) |
6508 |
|
6509 |
node_results[nname] = pnr |
6510 |
data["nodes"] = node_results
|
6511 |
|
6512 |
# instance data
|
6513 |
instance_data = {} |
6514 |
for iinfo, beinfo in i_list: |
6515 |
nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge} |
6516 |
for n in iinfo.nics] |
6517 |
pir = { |
6518 |
"tags": list(iinfo.GetTags()), |
6519 |
"admin_up": iinfo.admin_up,
|
6520 |
"vcpus": beinfo[constants.BE_VCPUS],
|
6521 |
"memory": beinfo[constants.BE_MEMORY],
|
6522 |
"os": iinfo.os,
|
6523 |
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes), |
6524 |
"nics": nic_data,
|
6525 |
"disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks], |
6526 |
"disk_template": iinfo.disk_template,
|
6527 |
"hypervisor": iinfo.hypervisor,
|
6528 |
} |
6529 |
instance_data[iinfo.name] = pir |
6530 |
|
6531 |
data["instances"] = instance_data
|
6532 |
|
6533 |
self.in_data = data
|
6534 |
|
6535 |
def _AddNewInstance(self): |
6536 |
"""Add new instance data to allocator structure.
|
6537 |
|
6538 |
This in combination with _AllocatorGetClusterData will create the
|
6539 |
correct structure needed as input for the allocator.
|
6540 |
|
6541 |
The checks for the completeness of the opcode must have already been
|
6542 |
done.
|
6543 |
|
6544 |
"""
|
6545 |
data = self.in_data
|
6546 |
|
6547 |
disk_space = _ComputeDiskSize(self.disk_template, self.disks) |
6548 |
|
6549 |
if self.disk_template in constants.DTS_NET_MIRROR: |
6550 |
self.required_nodes = 2 |
6551 |
else:
|
6552 |
self.required_nodes = 1 |
6553 |
request = { |
6554 |
"type": "allocate", |
6555 |
"name": self.name, |
6556 |
"disk_template": self.disk_template, |
6557 |
"tags": self.tags, |
6558 |
"os": self.os, |
6559 |
"vcpus": self.vcpus, |
6560 |
"memory": self.mem_size, |
6561 |
"disks": self.disks, |
6562 |
"disk_space_total": disk_space,
|
6563 |
"nics": self.nics, |
6564 |
"required_nodes": self.required_nodes, |
6565 |
} |
6566 |
data["request"] = request
|
6567 |
|
6568 |
def _AddRelocateInstance(self): |
6569 |
"""Add relocate instance data to allocator structure.
|
6570 |
|
6571 |
This in combination with _IAllocatorGetClusterData will create the
|
6572 |
correct structure needed as input for the allocator.
|
6573 |
|
6574 |
The checks for the completeness of the opcode must have already been
|
6575 |
done.
|
6576 |
|
6577 |
"""
|
6578 |
instance = self.lu.cfg.GetInstanceInfo(self.name) |
6579 |
if instance is None: |
6580 |
raise errors.ProgrammerError("Unknown instance '%s' passed to" |
6581 |
" IAllocator" % self.name) |
6582 |
|
6583 |
if instance.disk_template not in constants.DTS_NET_MIRROR: |
6584 |
raise errors.OpPrereqError("Can't relocate non-mirrored instances") |
6585 |
|
6586 |
if len(instance.secondary_nodes) != 1: |
6587 |
raise errors.OpPrereqError("Instance has not exactly one secondary node") |
6588 |
|
6589 |
self.required_nodes = 1 |
6590 |
disk_sizes = [{'size': disk.size} for disk in instance.disks] |
6591 |
disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes) |
6592 |
|
6593 |
request = { |
6594 |
"type": "relocate", |
6595 |
"name": self.name, |
6596 |
"disk_space_total": disk_space,
|
6597 |
"required_nodes": self.required_nodes, |
6598 |
"relocate_from": self.relocate_from, |
6599 |
} |
6600 |
self.in_data["request"] = request |
6601 |
|
6602 |
def _BuildInputData(self): |
6603 |
"""Build input data structures.
|
6604 |
|
6605 |
"""
|
6606 |
self._ComputeClusterData()
|
6607 |
|
6608 |
if self.mode == constants.IALLOCATOR_MODE_ALLOC: |
6609 |
self._AddNewInstance()
|
6610 |
else:
|
6611 |
self._AddRelocateInstance()
|
6612 |
|
6613 |
self.in_text = serializer.Dump(self.in_data) |
6614 |
|
6615 |
def Run(self, name, validate=True, call_fn=None): |
6616 |
"""Run an instance allocator and return the results.
|
6617 |
|
6618 |
"""
|
6619 |
if call_fn is None: |
6620 |
call_fn = self.lu.rpc.call_iallocator_runner
|
6621 |
data = self.in_text
|
6622 |
|
6623 |
result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text) |
6624 |
result.Raise() |
6625 |
|
6626 |
if not isinstance(result.data, (list, tuple)) or len(result.data) != 4: |
6627 |
raise errors.OpExecError("Invalid result from master iallocator runner") |
6628 |
|
6629 |
rcode, stdout, stderr, fail = result.data |
6630 |
|
6631 |
if rcode == constants.IARUN_NOTFOUND:
|
6632 |
raise errors.OpExecError("Can't find allocator '%s'" % name) |
6633 |
elif rcode == constants.IARUN_FAILURE:
|
6634 |
raise errors.OpExecError("Instance allocator call failed: %s," |
6635 |
" output: %s" % (fail, stdout+stderr))
|
6636 |
self.out_text = stdout
|
6637 |
if validate:
|
6638 |
self._ValidateResult()
|
6639 |
|
6640 |
def _ValidateResult(self): |
6641 |
"""Process the allocator results.
|
6642 |
|
6643 |
This will process and if successful save the result in
|
6644 |
self.out_data and the other parameters.
|
6645 |
|
6646 |
"""
|
6647 |
try:
|
6648 |
rdict = serializer.Load(self.out_text)
|
6649 |
except Exception, err: |
6650 |
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) |
6651 |
|
6652 |
if not isinstance(rdict, dict): |
6653 |
raise errors.OpExecError("Can't parse iallocator results: not a dict") |
6654 |
|
6655 |
for key in "success", "info", "nodes": |
6656 |
if key not in rdict: |
6657 |
raise errors.OpExecError("Can't parse iallocator results:" |
6658 |
" missing key '%s'" % key)
|
6659 |
setattr(self, key, rdict[key]) |
6660 |
|
6661 |
if not isinstance(rdict["nodes"], list): |
6662 |
raise errors.OpExecError("Can't parse iallocator results: 'nodes' key" |
6663 |
" is not a list")
|
6664 |
self.out_data = rdict
|
6665 |
|
6666 |
|
6667 |
class LUTestAllocator(NoHooksLU): |
6668 |
"""Run allocator tests.
|
6669 |
|
6670 |
This LU runs the allocator tests
|
6671 |
|
6672 |
"""
|
6673 |
_OP_REQP = ["direction", "mode", "name"] |
6674 |
|
6675 |
def CheckPrereq(self): |
6676 |
"""Check prerequisites.
|
6677 |
|
6678 |
This checks the opcode parameters depending on the director and mode test.
|
6679 |
|
6680 |
"""
|
6681 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
6682 |
for attr in ["name", "mem_size", "disks", "disk_template", |
6683 |
"os", "tags", "nics", "vcpus"]: |
6684 |
if not hasattr(self.op, attr): |
6685 |
raise errors.OpPrereqError("Missing attribute '%s' on opcode input" % |
6686 |
attr) |
6687 |
iname = self.cfg.ExpandInstanceName(self.op.name) |
6688 |
if iname is not None: |
6689 |
raise errors.OpPrereqError("Instance '%s' already in the cluster" % |
6690 |
iname) |
6691 |
if not isinstance(self.op.nics, list): |
6692 |
raise errors.OpPrereqError("Invalid parameter 'nics'") |
6693 |
for row in self.op.nics: |
6694 |
if (not isinstance(row, dict) or |
6695 |
"mac" not in row or |
6696 |
"ip" not in row or |
6697 |
"bridge" not in row): |
6698 |
raise errors.OpPrereqError("Invalid contents of the" |
6699 |
" 'nics' parameter")
|
6700 |
if not isinstance(self.op.disks, list): |
6701 |
raise errors.OpPrereqError("Invalid parameter 'disks'") |
6702 |
for row in self.op.disks: |
6703 |
if (not isinstance(row, dict) or |
6704 |
"size" not in row or |
6705 |
not isinstance(row["size"], int) or |
6706 |
"mode" not in row or |
6707 |
row["mode"] not in ['r', 'w']): |
6708 |
raise errors.OpPrereqError("Invalid contents of the" |
6709 |
" 'disks' parameter")
|
6710 |
if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None: |
6711 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
6712 |
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: |
6713 |
if not hasattr(self.op, "name"): |
6714 |
raise errors.OpPrereqError("Missing attribute 'name' on opcode input") |
6715 |
fname = self.cfg.ExpandInstanceName(self.op.name) |
6716 |
if fname is None: |
6717 |
raise errors.OpPrereqError("Instance '%s' not found for relocation" % |
6718 |
self.op.name)
|
6719 |
self.op.name = fname
|
6720 |
self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes |
6721 |
else:
|
6722 |
raise errors.OpPrereqError("Invalid test allocator mode '%s'" % |
6723 |
self.op.mode)
|
6724 |
|
6725 |
if self.op.direction == constants.IALLOCATOR_DIR_OUT: |
6726 |
if not hasattr(self.op, "allocator") or self.op.allocator is None: |
6727 |
raise errors.OpPrereqError("Missing allocator name") |
6728 |
elif self.op.direction != constants.IALLOCATOR_DIR_IN: |
6729 |
raise errors.OpPrereqError("Wrong allocator test '%s'" % |
6730 |
self.op.direction)
|
6731 |
|
6732 |
def Exec(self, feedback_fn): |
6733 |
"""Run the allocator test.
|
6734 |
|
6735 |
"""
|
6736 |
if self.op.mode == constants.IALLOCATOR_MODE_ALLOC: |
6737 |
ial = IAllocator(self,
|
6738 |
mode=self.op.mode,
|
6739 |
name=self.op.name,
|
6740 |
mem_size=self.op.mem_size,
|
6741 |
disks=self.op.disks,
|
6742 |
disk_template=self.op.disk_template,
|
6743 |
os=self.op.os,
|
6744 |
tags=self.op.tags,
|
6745 |
nics=self.op.nics,
|
6746 |
vcpus=self.op.vcpus,
|
6747 |
hypervisor=self.op.hypervisor,
|
6748 |
) |
6749 |
else:
|
6750 |
ial = IAllocator(self,
|
6751 |
mode=self.op.mode,
|
6752 |
name=self.op.name,
|
6753 |
relocate_from=list(self.relocate_from), |
6754 |
) |
6755 |
|
6756 |
if self.op.direction == constants.IALLOCATOR_DIR_IN: |
6757 |
result = ial.in_text |
6758 |
else:
|
6759 |
ial.Run(self.op.allocator, validate=False) |
6760 |
result = ial.out_text |
6761 |
return result
|