root / lib / mcpu.py @ 557838c1
History | View | Annotate | Download (18.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the logic behind the cluster operations
|
23 |
|
24 |
This module implements the logic for doing operations in the cluster. There
|
25 |
are two kinds of classes defined:
|
26 |
- logical units, which know how to deal with their specific opcode only
|
27 |
- the processor, which dispatches the opcodes to their logical units
|
28 |
|
29 |
"""
|
30 |
|
31 |
import logging |
32 |
import random |
33 |
import time |
34 |
|
35 |
from ganeti import opcodes |
36 |
from ganeti import constants |
37 |
from ganeti import errors |
38 |
from ganeti import rpc |
39 |
from ganeti import cmdlib |
40 |
from ganeti import locking |
41 |
from ganeti import utils |
42 |
|
43 |
|
44 |
class LockAcquireTimeout(Exception): |
45 |
"""Exception to report timeouts on acquiring locks.
|
46 |
|
47 |
"""
|
48 |
|
49 |
|
50 |
def _CalculateLockAttemptTimeouts(): |
51 |
"""Calculate timeouts for lock attempts.
|
52 |
|
53 |
"""
|
54 |
result = [1.0]
|
55 |
|
56 |
# Wait for a total of at least 150s before doing a blocking acquire
|
57 |
while sum(result) < 150.0: |
58 |
timeout = (result[-1] * 1.05) ** 1.25 |
59 |
|
60 |
# Cap timeout at 10 seconds. This gives other jobs a chance to run
|
61 |
# even if we're still trying to get our locks, before finally moving
|
62 |
# to a blocking acquire.
|
63 |
if timeout > 10.0: |
64 |
timeout = 10.0
|
65 |
|
66 |
elif timeout < 0.1: |
67 |
# Lower boundary for safety
|
68 |
timeout = 0.1
|
69 |
|
70 |
result.append(timeout) |
71 |
|
72 |
return result
|
73 |
|
74 |
|
75 |
class LockAttemptTimeoutStrategy(object): |
76 |
"""Class with lock acquire timeout strategy.
|
77 |
|
78 |
"""
|
79 |
__slots__ = [ |
80 |
"_timeouts",
|
81 |
"_random_fn",
|
82 |
"_time_fn",
|
83 |
] |
84 |
|
85 |
_TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts() |
86 |
|
87 |
def __init__(self, _time_fn=time.time, _random_fn=random.random): |
88 |
"""Initializes this class.
|
89 |
|
90 |
@param _time_fn: Time function for unittests
|
91 |
@param _random_fn: Random number generator for unittests
|
92 |
|
93 |
"""
|
94 |
object.__init__(self) |
95 |
|
96 |
self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT) |
97 |
self._time_fn = _time_fn
|
98 |
self._random_fn = _random_fn
|
99 |
|
100 |
def NextAttempt(self): |
101 |
"""Returns the timeout for the next attempt.
|
102 |
|
103 |
"""
|
104 |
try:
|
105 |
timeout = self._timeouts.next()
|
106 |
except StopIteration: |
107 |
# No more timeouts, do blocking acquire
|
108 |
timeout = None
|
109 |
|
110 |
if timeout is not None: |
111 |
# Add a small variation (-/+ 5%) to timeout. This helps in situations
|
112 |
# where two or more jobs are fighting for the same lock(s).
|
113 |
variation_range = timeout * 0.1
|
114 |
timeout += ((self._random_fn() * variation_range) -
|
115 |
(variation_range * 0.5))
|
116 |
|
117 |
return timeout
|
118 |
|
119 |
|
120 |
class OpExecCbBase: # pylint: disable-msg=W0232 |
121 |
"""Base class for OpCode execution callbacks.
|
122 |
|
123 |
"""
|
124 |
def NotifyStart(self): |
125 |
"""Called when we are about to execute the LU.
|
126 |
|
127 |
This function is called when we're about to start the lu's Exec() method,
|
128 |
that is, after we have acquired all locks.
|
129 |
|
130 |
"""
|
131 |
|
132 |
def Feedback(self, *args): |
133 |
"""Sends feedback from the LU code to the end-user.
|
134 |
|
135 |
"""
|
136 |
|
137 |
def CheckCancel(self): |
138 |
"""Check whether job has been cancelled.
|
139 |
|
140 |
"""
|
141 |
|
142 |
|
143 |
class Processor(object): |
144 |
"""Object which runs OpCodes"""
|
145 |
DISPATCH_TABLE = { |
146 |
# Cluster
|
147 |
opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster, |
148 |
opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster, |
149 |
opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo, |
150 |
opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster, |
151 |
opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues, |
152 |
opcodes.OpRenameCluster: cmdlib.LURenameCluster, |
153 |
opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks, |
154 |
opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams, |
155 |
opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig, |
156 |
opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes, |
157 |
# node lu
|
158 |
opcodes.OpAddNode: cmdlib.LUAddNode, |
159 |
opcodes.OpQueryNodes: cmdlib.LUQueryNodes, |
160 |
opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes, |
161 |
opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage, |
162 |
opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage, |
163 |
opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage, |
164 |
opcodes.OpRemoveNode: cmdlib.LURemoveNode, |
165 |
opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams, |
166 |
opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode, |
167 |
opcodes.OpMigrateNode: cmdlib.LUMigrateNode, |
168 |
opcodes.OpNodeEvacuationStrategy: cmdlib.LUNodeEvacuationStrategy, |
169 |
# instance lu
|
170 |
opcodes.OpCreateInstance: cmdlib.LUCreateInstance, |
171 |
opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance, |
172 |
opcodes.OpRemoveInstance: cmdlib.LURemoveInstance, |
173 |
opcodes.OpRenameInstance: cmdlib.LURenameInstance, |
174 |
opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks, |
175 |
opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance, |
176 |
opcodes.OpStartupInstance: cmdlib.LUStartupInstance, |
177 |
opcodes.OpRebootInstance: cmdlib.LURebootInstance, |
178 |
opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks, |
179 |
opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks, |
180 |
opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks, |
181 |
opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance, |
182 |
opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance, |
183 |
opcodes.OpMoveInstance: cmdlib.LUMoveInstance, |
184 |
opcodes.OpConnectConsole: cmdlib.LUConnectConsole, |
185 |
opcodes.OpQueryInstances: cmdlib.LUQueryInstances, |
186 |
opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData, |
187 |
opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams, |
188 |
opcodes.OpGrowDisk: cmdlib.LUGrowDisk, |
189 |
# os lu
|
190 |
opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS, |
191 |
# exports lu
|
192 |
opcodes.OpQueryExports: cmdlib.LUQueryExports, |
193 |
opcodes.OpPrepareExport: cmdlib.LUPrepareExport, |
194 |
opcodes.OpExportInstance: cmdlib.LUExportInstance, |
195 |
opcodes.OpRemoveExport: cmdlib.LURemoveExport, |
196 |
# tags lu
|
197 |
opcodes.OpGetTags: cmdlib.LUGetTags, |
198 |
opcodes.OpSearchTags: cmdlib.LUSearchTags, |
199 |
opcodes.OpAddTags: cmdlib.LUAddTags, |
200 |
opcodes.OpDelTags: cmdlib.LUDelTags, |
201 |
# test lu
|
202 |
opcodes.OpTestDelay: cmdlib.LUTestDelay, |
203 |
opcodes.OpTestAllocator: cmdlib.LUTestAllocator, |
204 |
opcodes.OpTestJobqueue: cmdlib.LUTestJobqueue, |
205 |
} |
206 |
|
207 |
def __init__(self, context, ec_id): |
208 |
"""Constructor for Processor
|
209 |
|
210 |
@type context: GanetiContext
|
211 |
@param context: global Ganeti context
|
212 |
@type ec_id: string
|
213 |
@param ec_id: execution context identifier
|
214 |
|
215 |
"""
|
216 |
self.context = context
|
217 |
self._ec_id = ec_id
|
218 |
self._cbs = None |
219 |
self.rpc = rpc.RpcRunner(context.cfg)
|
220 |
self.hmclass = HooksMaster
|
221 |
|
222 |
def _AcquireLocks(self, level, names, shared, timeout, priority): |
223 |
"""Acquires locks via the Ganeti lock manager.
|
224 |
|
225 |
@type level: int
|
226 |
@param level: Lock level
|
227 |
@type names: list or string
|
228 |
@param names: Lock names
|
229 |
@type shared: bool
|
230 |
@param shared: Whether the locks should be acquired in shared mode
|
231 |
@type timeout: None or float
|
232 |
@param timeout: Timeout for acquiring the locks
|
233 |
@raise LockAcquireTimeout: In case locks couldn't be acquired in specified
|
234 |
amount of time
|
235 |
|
236 |
"""
|
237 |
if self._cbs: |
238 |
self._cbs.CheckCancel()
|
239 |
|
240 |
acquired = self.context.glm.acquire(level, names, shared=shared,
|
241 |
timeout=timeout, priority=priority) |
242 |
|
243 |
if acquired is None: |
244 |
raise LockAcquireTimeout()
|
245 |
|
246 |
return acquired
|
247 |
|
248 |
def _ExecLU(self, lu): |
249 |
"""Logical Unit execution sequence.
|
250 |
|
251 |
"""
|
252 |
write_count = self.context.cfg.write_count
|
253 |
lu.CheckPrereq() |
254 |
hm = HooksMaster(self.rpc.call_hooks_runner, lu)
|
255 |
h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE) |
256 |
lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results, |
257 |
self.Log, None) |
258 |
|
259 |
if getattr(lu.op, "dry_run", False): |
260 |
# in this mode, no post-hooks are run, and the config is not
|
261 |
# written (as it might have been modified by another LU, and we
|
262 |
# shouldn't do writeout on behalf of other threads
|
263 |
self.LogInfo("dry-run mode requested, not actually executing" |
264 |
" the operation")
|
265 |
return lu.dry_run_result
|
266 |
|
267 |
try:
|
268 |
result = lu.Exec(self.Log)
|
269 |
h_results = hm.RunPhase(constants.HOOKS_PHASE_POST) |
270 |
result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results, |
271 |
self.Log, result)
|
272 |
finally:
|
273 |
# FIXME: This needs locks if not lu_class.REQ_BGL
|
274 |
if write_count != self.context.cfg.write_count: |
275 |
hm.RunConfigUpdate() |
276 |
|
277 |
return result
|
278 |
|
279 |
def _LockAndExecLU(self, lu, level, calc_timeout, priority): |
280 |
"""Execute a Logical Unit, with the needed locks.
|
281 |
|
282 |
This is a recursive function that starts locking the given level, and
|
283 |
proceeds up, till there are no more locks to acquire. Then it executes the
|
284 |
given LU and its opcodes.
|
285 |
|
286 |
"""
|
287 |
adding_locks = level in lu.add_locks
|
288 |
acquiring_locks = level in lu.needed_locks
|
289 |
if level not in locking.LEVELS: |
290 |
if self._cbs: |
291 |
self._cbs.NotifyStart()
|
292 |
|
293 |
result = self._ExecLU(lu)
|
294 |
|
295 |
elif adding_locks and acquiring_locks: |
296 |
# We could both acquire and add locks at the same level, but for now we
|
297 |
# don't need this, so we'll avoid the complicated code needed.
|
298 |
raise NotImplementedError("Can't declare locks to acquire when adding" |
299 |
" others")
|
300 |
|
301 |
elif adding_locks or acquiring_locks: |
302 |
lu.DeclareLocks(level) |
303 |
share = lu.share_locks[level] |
304 |
|
305 |
try:
|
306 |
assert adding_locks ^ acquiring_locks, \
|
307 |
"Locks must be either added or acquired"
|
308 |
|
309 |
if acquiring_locks:
|
310 |
# Acquiring locks
|
311 |
needed_locks = lu.needed_locks[level] |
312 |
|
313 |
acquired = self._AcquireLocks(level, needed_locks, share,
|
314 |
calc_timeout(), priority) |
315 |
else:
|
316 |
# Adding locks
|
317 |
add_locks = lu.add_locks[level] |
318 |
lu.remove_locks[level] = add_locks |
319 |
|
320 |
try:
|
321 |
self.context.glm.add(level, add_locks, acquired=1, shared=share) |
322 |
except errors.LockError:
|
323 |
raise errors.OpPrereqError(
|
324 |
"Couldn't add locks (%s), probably because of a race condition"
|
325 |
" with another job, who added them first" % add_locks,
|
326 |
errors.ECODE_FAULT) |
327 |
|
328 |
acquired = add_locks |
329 |
|
330 |
try:
|
331 |
lu.acquired_locks[level] = acquired |
332 |
|
333 |
result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority) |
334 |
finally:
|
335 |
if level in lu.remove_locks: |
336 |
self.context.glm.remove(level, lu.remove_locks[level])
|
337 |
finally:
|
338 |
if self.context.glm.is_owned(level): |
339 |
self.context.glm.release(level)
|
340 |
|
341 |
else:
|
342 |
result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority) |
343 |
|
344 |
return result
|
345 |
|
346 |
def ExecOpCode(self, op, cbs, timeout=None, priority=None): |
347 |
"""Execute an opcode.
|
348 |
|
349 |
@type op: an OpCode instance
|
350 |
@param op: the opcode to be executed
|
351 |
@type cbs: L{OpExecCbBase}
|
352 |
@param cbs: Runtime callbacks
|
353 |
@type timeout: float or None
|
354 |
@param timeout: Maximum time to acquire all locks, None for no timeout
|
355 |
@type priority: number or None
|
356 |
@param priority: Priority for acquiring lock(s)
|
357 |
@raise LockAcquireTimeout: In case locks couldn't be acquired in specified
|
358 |
amount of time
|
359 |
|
360 |
"""
|
361 |
if not isinstance(op, opcodes.OpCode): |
362 |
raise errors.ProgrammerError("Non-opcode instance passed" |
363 |
" to ExecOpcode")
|
364 |
|
365 |
lu_class = self.DISPATCH_TABLE.get(op.__class__, None) |
366 |
if lu_class is None: |
367 |
raise errors.OpCodeUnknown("Unknown opcode") |
368 |
|
369 |
if timeout is None: |
370 |
calc_timeout = lambda: None |
371 |
else:
|
372 |
calc_timeout = utils.RunningTimeout(timeout, False).Remaining
|
373 |
|
374 |
self._cbs = cbs
|
375 |
try:
|
376 |
# Acquire the Big Ganeti Lock exclusively if this LU requires it,
|
377 |
# and in a shared fashion otherwise (to prevent concurrent run with
|
378 |
# an exclusive LU.
|
379 |
self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
|
380 |
not lu_class.REQ_BGL, calc_timeout(),
|
381 |
priority) |
382 |
try:
|
383 |
lu = lu_class(self, op, self.context, self.rpc) |
384 |
lu.ExpandNames() |
385 |
assert lu.needed_locks is not None, "needed_locks not set by LU" |
386 |
|
387 |
try:
|
388 |
return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout, |
389 |
priority) |
390 |
finally:
|
391 |
if self._ec_id: |
392 |
self.context.cfg.DropECReservations(self._ec_id) |
393 |
finally:
|
394 |
self.context.glm.release(locking.LEVEL_CLUSTER)
|
395 |
finally:
|
396 |
self._cbs = None |
397 |
|
398 |
def Log(self, *args): |
399 |
"""Forward call to feedback callback function.
|
400 |
|
401 |
"""
|
402 |
if self._cbs: |
403 |
self._cbs.Feedback(*args)
|
404 |
|
405 |
def LogStep(self, current, total, message): |
406 |
"""Log a change in LU execution progress.
|
407 |
|
408 |
"""
|
409 |
logging.debug("Step %d/%d %s", current, total, message)
|
410 |
self.Log("STEP %d/%d %s" % (current, total, message)) |
411 |
|
412 |
def LogWarning(self, message, *args, **kwargs): |
413 |
"""Log a warning to the logs and the user.
|
414 |
|
415 |
The optional keyword argument is 'hint' and can be used to show a
|
416 |
hint to the user (presumably related to the warning). If the
|
417 |
message is empty, it will not be printed at all, allowing one to
|
418 |
show only a hint.
|
419 |
|
420 |
"""
|
421 |
assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \ |
422 |
"Invalid keyword arguments for LogWarning (%s)" % str(kwargs) |
423 |
if args:
|
424 |
message = message % tuple(args)
|
425 |
if message:
|
426 |
logging.warning(message) |
427 |
self.Log(" - WARNING: %s" % message) |
428 |
if "hint" in kwargs: |
429 |
self.Log(" Hint: %s" % kwargs["hint"]) |
430 |
|
431 |
def LogInfo(self, message, *args): |
432 |
"""Log an informational message to the logs and the user.
|
433 |
|
434 |
"""
|
435 |
if args:
|
436 |
message = message % tuple(args)
|
437 |
logging.info(message) |
438 |
self.Log(" - INFO: %s" % message) |
439 |
|
440 |
def GetECId(self): |
441 |
if not self._ec_id: |
442 |
errors.ProgrammerError("Tried to use execution context id when not set")
|
443 |
return self._ec_id |
444 |
|
445 |
|
446 |
class HooksMaster(object): |
447 |
"""Hooks master.
|
448 |
|
449 |
This class distributes the run commands to the nodes based on the
|
450 |
specific LU class.
|
451 |
|
452 |
In order to remove the direct dependency on the rpc module, the
|
453 |
constructor needs a function which actually does the remote
|
454 |
call. This will usually be rpc.call_hooks_runner, but any function
|
455 |
which behaves the same works.
|
456 |
|
457 |
"""
|
458 |
def __init__(self, callfn, lu): |
459 |
self.callfn = callfn
|
460 |
self.lu = lu
|
461 |
self.op = lu.op
|
462 |
self.env, node_list_pre, node_list_post = self._BuildEnv() |
463 |
self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
|
464 |
constants.HOOKS_PHASE_POST: node_list_post} |
465 |
|
466 |
def _BuildEnv(self): |
467 |
"""Compute the environment and the target nodes.
|
468 |
|
469 |
Based on the opcode and the current node list, this builds the
|
470 |
environment for the hooks and the target node list for the run.
|
471 |
|
472 |
"""
|
473 |
env = { |
474 |
"PATH": "/sbin:/bin:/usr/sbin:/usr/bin", |
475 |
"GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
|
476 |
"GANETI_OP_CODE": self.op.OP_ID, |
477 |
"GANETI_OBJECT_TYPE": self.lu.HTYPE, |
478 |
"GANETI_DATA_DIR": constants.DATA_DIR,
|
479 |
} |
480 |
|
481 |
if self.lu.HPATH is not None: |
482 |
lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
|
483 |
if lu_env:
|
484 |
for key in lu_env: |
485 |
env["GANETI_" + key] = lu_env[key]
|
486 |
else:
|
487 |
lu_nodes_pre = lu_nodes_post = [] |
488 |
|
489 |
return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post) |
490 |
|
491 |
def _RunWrapper(self, node_list, hpath, phase): |
492 |
"""Simple wrapper over self.callfn.
|
493 |
|
494 |
This method fixes the environment before doing the rpc call.
|
495 |
|
496 |
"""
|
497 |
env = self.env.copy()
|
498 |
env["GANETI_HOOKS_PHASE"] = phase
|
499 |
env["GANETI_HOOKS_PATH"] = hpath
|
500 |
if self.lu.cfg is not None: |
501 |
env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName() |
502 |
env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode() |
503 |
|
504 |
env = dict([(str(key), str(val)) for key, val in env.iteritems()]) |
505 |
|
506 |
return self.callfn(node_list, hpath, phase, env) |
507 |
|
508 |
def RunPhase(self, phase, nodes=None): |
509 |
"""Run all the scripts for a phase.
|
510 |
|
511 |
This is the main function of the HookMaster.
|
512 |
|
513 |
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
514 |
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
515 |
@param nodes: overrides the predefined list of nodes for the given phase
|
516 |
@return: the processed results of the hooks multi-node rpc call
|
517 |
@raise errors.HooksFailure: on communication failure to the nodes
|
518 |
@raise errors.HooksAbort: on failure of one of the hooks
|
519 |
|
520 |
"""
|
521 |
if not self.node_list[phase] and not nodes: |
522 |
# empty node list, we should not attempt to run this as either
|
523 |
# we're in the cluster init phase and the rpc client part can't
|
524 |
# even attempt to run, or this LU doesn't do hooks at all
|
525 |
return
|
526 |
hpath = self.lu.HPATH
|
527 |
if nodes is not None: |
528 |
results = self._RunWrapper(nodes, hpath, phase)
|
529 |
else:
|
530 |
results = self._RunWrapper(self.node_list[phase], hpath, phase) |
531 |
errs = [] |
532 |
if not results: |
533 |
msg = "Communication Failure"
|
534 |
if phase == constants.HOOKS_PHASE_PRE:
|
535 |
raise errors.HooksFailure(msg)
|
536 |
else:
|
537 |
self.lu.LogWarning(msg)
|
538 |
return results
|
539 |
for node_name in results: |
540 |
res = results[node_name] |
541 |
if res.offline:
|
542 |
continue
|
543 |
msg = res.fail_msg |
544 |
if msg:
|
545 |
self.lu.LogWarning("Communication failure to node %s: %s", |
546 |
node_name, msg) |
547 |
continue
|
548 |
for script, hkr, output in res.payload: |
549 |
if hkr == constants.HKR_FAIL:
|
550 |
if phase == constants.HOOKS_PHASE_PRE:
|
551 |
errs.append((node_name, script, output)) |
552 |
else:
|
553 |
if not output: |
554 |
output = "(no output)"
|
555 |
self.lu.LogWarning("On %s script %s failed, output: %s" % |
556 |
(node_name, script, output)) |
557 |
if errs and phase == constants.HOOKS_PHASE_PRE: |
558 |
raise errors.HooksAbort(errs)
|
559 |
return results
|
560 |
|
561 |
def RunConfigUpdate(self): |
562 |
"""Run the special configuration update hook
|
563 |
|
564 |
This is a special hook that runs only on the master after each
|
565 |
top-level LI if the configuration has been updated.
|
566 |
|
567 |
"""
|
568 |
phase = constants.HOOKS_PHASE_POST |
569 |
hpath = constants.HOOKS_NAME_CFGUPDATE |
570 |
nodes = [self.lu.cfg.GetMasterNode()]
|
571 |
self._RunWrapper(nodes, hpath, phase)
|