4 # Copyright (C) 2006, 2007, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module implementing the logic behind the cluster operations
24 This module implements the logic for doing operations in the cluster. There
25 are two kinds of classes defined:
26 - logical units, which know how to deal with their specific opcode only
27 - the processor, which dispatches the opcodes to their logical units
35 from ganeti import opcodes
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import cmdlib
39 from ganeti import locking
40 from ganeti import utils
41 from ganeti import compat
48 class LockAcquireTimeout(Exception):
49 """Exception to report timeouts on acquiring locks.
54 def _CalculateLockAttemptTimeouts():
55 """Calculate timeouts for lock attempts.
58 result = [constants.LOCK_ATTEMPTS_MINWAIT]
59 running_sum = result[0]
61 # Wait for a total of at least LOCK_ATTEMPTS_TIMEOUT before doing a
63 while running_sum < constants.LOCK_ATTEMPTS_TIMEOUT:
64 timeout = (result[-1] * 1.05) ** 1.25
66 # Cap max timeout. This gives other jobs a chance to run even if
67 # we're still trying to get our locks, before finally moving to a
69 timeout = min(timeout, constants.LOCK_ATTEMPTS_MAXWAIT)
70 # And also cap the lower boundary for safety
71 timeout = max(timeout, constants.LOCK_ATTEMPTS_MINWAIT)
73 result.append(timeout)
74 running_sum += timeout
79 class LockAttemptTimeoutStrategy(object):
80 """Class with lock acquire timeout strategy.
89 _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
91 def __init__(self, _time_fn=time.time, _random_fn=random.random):
92 """Initializes this class.
94 @param _time_fn: Time function for unittests
95 @param _random_fn: Random number generator for unittests
100 self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
101 self._time_fn = _time_fn
102 self._random_fn = _random_fn
104 def NextAttempt(self):
105 """Returns the timeout for the next attempt.
109 timeout = self._timeouts.next()
110 except StopIteration:
111 # No more timeouts, do blocking acquire
114 if timeout is not None:
115 # Add a small variation (-/+ 5%) to timeout. This helps in situations
116 # where two or more jobs are fighting for the same lock(s).
117 variation_range = timeout * 0.1
118 timeout += ((self._random_fn() * variation_range) -
119 (variation_range * 0.5))
124 class OpExecCbBase: # pylint: disable=W0232
125 """Base class for OpCode execution callbacks.
128 def NotifyStart(self):
129 """Called when we are about to execute the LU.
131 This function is called when we're about to start the lu's Exec() method,
132 that is, after we have acquired all locks.
136 def Feedback(self, *args):
137 """Sends feedback from the LU code to the end-user.
141 def CheckCancel(self):
142 """Check whether job has been cancelled.
146 def SubmitManyJobs(self, jobs):
147 """Submits jobs for processing.
149 See L{jqueue.JobQueue.SubmitManyJobs}.
152 raise NotImplementedError
155 def _LUNameForOpName(opname):
156 """Computes the LU name for a given OpCode name.
159 assert opname.startswith(_OP_PREFIX), \
160 "Invalid OpCode name, doesn't start with %s: %s" % (_OP_PREFIX, opname)
162 return _LU_PREFIX + opname[len(_OP_PREFIX):]
165 def _ComputeDispatchTable():
166 """Computes the opcode-to-lu dispatch table.
169 return dict((op, getattr(cmdlib, _LUNameForOpName(op.__name__)))
170 for op in opcodes.OP_MAPPING.values()
174 def _RpcResultsToHooksResults(rpc_results):
175 """Function to convert RPC results to the format expected by HooksMaster.
177 @type rpc_results: dict(node: L{rpc.RpcResult})
178 @param rpc_results: RPC results
179 @rtype: dict(node: (fail_msg, offline, hooks_results))
180 @return: RPC results unpacked according to the format expected by
184 return dict((node, (rpc_res.fail_msg, rpc_res.offline, rpc_res.payload))
185 for (node, rpc_res) in rpc_results.items())
188 class Processor(object):
189 """Object which runs OpCodes"""
190 DISPATCH_TABLE = _ComputeDispatchTable()
192 def __init__(self, context, ec_id):
193 """Constructor for Processor
195 @type context: GanetiContext
196 @param context: global Ganeti context
198 @param ec_id: execution context identifier
201 self.context = context
204 self.rpc = context.rpc
205 self.hmclass = HooksMaster
207 def _AcquireLocks(self, level, names, shared, timeout, priority):
208 """Acquires locks via the Ganeti lock manager.
211 @param level: Lock level
212 @type names: list or string
213 @param names: Lock names
215 @param shared: Whether the locks should be acquired in shared mode
216 @type timeout: None or float
217 @param timeout: Timeout for acquiring the locks
218 @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
223 self._cbs.CheckCancel()
225 acquired = self.context.glm.acquire(level, names, shared=shared,
226 timeout=timeout, priority=priority)
229 raise LockAcquireTimeout()
233 def _ProcessResult(self, result):
234 """Examines opcode result.
236 If necessary, additional processing on the result is done.
239 if isinstance(result, cmdlib.ResultWithJobs):
241 job_submission = self._cbs.SubmitManyJobs(result.jobs)
244 result = result.other
246 assert constants.JOB_IDS_KEY not in result, \
247 "Key '%s' found in additional return values" % constants.JOB_IDS_KEY
249 result[constants.JOB_IDS_KEY] = job_submission
253 def _ExecLU(self, lu):
254 """Logical Unit execution sequence.
257 write_count = self.context.cfg.write_count
260 hm = self.BuildHooksManager(lu)
261 h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
262 lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
265 if getattr(lu.op, "dry_run", False):
266 # in this mode, no post-hooks are run, and the config is not
267 # written (as it might have been modified by another LU, and we
268 # shouldn't do writeout on behalf of other threads
269 self.LogInfo("dry-run mode requested, not actually executing"
271 return lu.dry_run_result
274 result = self._ProcessResult(lu.Exec(self.Log))
275 h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
276 result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
279 # FIXME: This needs locks if not lu_class.REQ_BGL
280 if write_count != self.context.cfg.write_count:
285 def BuildHooksManager(self, lu):
286 return self.hmclass.BuildFromLu(lu.rpc.call_hooks_runner, lu)
288 def _LockAndExecLU(self, lu, level, calc_timeout, priority):
289 """Execute a Logical Unit, with the needed locks.
291 This is a recursive function that starts locking the given level, and
292 proceeds up, till there are no more locks to acquire. Then it executes the
293 given LU and its opcodes.
296 adding_locks = level in lu.add_locks
297 acquiring_locks = level in lu.needed_locks
298 if level not in locking.LEVELS:
300 self._cbs.NotifyStart()
302 result = self._ExecLU(lu)
304 elif adding_locks and acquiring_locks:
305 # We could both acquire and add locks at the same level, but for now we
306 # don't need this, so we'll avoid the complicated code needed.
307 raise NotImplementedError("Can't declare locks to acquire when adding"
310 elif adding_locks or acquiring_locks:
311 lu.DeclareLocks(level)
312 share = lu.share_locks[level]
315 assert adding_locks ^ acquiring_locks, \
316 "Locks must be either added or acquired"
320 needed_locks = lu.needed_locks[level]
322 self._AcquireLocks(level, needed_locks, share,
323 calc_timeout(), priority)
326 add_locks = lu.add_locks[level]
327 lu.remove_locks[level] = add_locks
330 self.context.glm.add(level, add_locks, acquired=1, shared=share)
331 except errors.LockError:
332 raise errors.OpPrereqError(
333 "Couldn't add locks (%s), probably because of a race condition"
334 " with another job, who added them first" % add_locks,
338 result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
340 if level in lu.remove_locks:
341 self.context.glm.remove(level, lu.remove_locks[level])
343 if self.context.glm.is_owned(level):
344 self.context.glm.release(level)
347 result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
351 def ExecOpCode(self, op, cbs, timeout=None, priority=None):
352 """Execute an opcode.
354 @type op: an OpCode instance
355 @param op: the opcode to be executed
356 @type cbs: L{OpExecCbBase}
357 @param cbs: Runtime callbacks
358 @type timeout: float or None
359 @param timeout: Maximum time to acquire all locks, None for no timeout
360 @type priority: number or None
361 @param priority: Priority for acquiring lock(s)
362 @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
366 if not isinstance(op, opcodes.OpCode):
367 raise errors.ProgrammerError("Non-opcode instance passed"
368 " to ExecOpcode (%s)" % type(op))
370 lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
372 raise errors.OpCodeUnknown("Unknown opcode")
375 calc_timeout = lambda: None
377 calc_timeout = utils.RunningTimeout(timeout, False).Remaining
381 # Acquire the Big Ganeti Lock exclusively if this LU requires it,
382 # and in a shared fashion otherwise (to prevent concurrent run with
384 self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
385 not lu_class.REQ_BGL, calc_timeout(),
388 lu = lu_class(self, op, self.context, self.rpc)
390 assert lu.needed_locks is not None, "needed_locks not set by LU"
393 result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout,
397 self.context.cfg.DropECReservations(self._ec_id)
399 self.context.glm.release(locking.LEVEL_CLUSTER)
403 resultcheck_fn = op.OP_RESULT
404 if not (resultcheck_fn is None or resultcheck_fn(result)):
405 logging.error("Expected opcode result matching %s, got %s",
406 resultcheck_fn, result)
407 raise errors.OpResultError("Opcode result does not match %s: %s" %
408 (resultcheck_fn, utils.Truncate(result, 80)))
412 def Log(self, *args):
413 """Forward call to feedback callback function.
417 self._cbs.Feedback(*args)
419 def LogStep(self, current, total, message):
420 """Log a change in LU execution progress.
423 logging.debug("Step %d/%d %s", current, total, message)
424 self.Log("STEP %d/%d %s" % (current, total, message))
426 def LogWarning(self, message, *args, **kwargs):
427 """Log a warning to the logs and the user.
429 The optional keyword argument is 'hint' and can be used to show a
430 hint to the user (presumably related to the warning). If the
431 message is empty, it will not be printed at all, allowing one to
435 assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
436 "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
438 message = message % tuple(args)
440 logging.warning(message)
441 self.Log(" - WARNING: %s" % message)
443 self.Log(" Hint: %s" % kwargs["hint"])
445 def LogInfo(self, message, *args):
446 """Log an informational message to the logs and the user.
450 message = message % tuple(args)
451 logging.info(message)
452 self.Log(" - INFO: %s" % message)
455 """Returns the current execution context ID.
459 raise errors.ProgrammerError("Tried to use execution context id when"
464 class HooksMaster(object):
465 def __init__(self, opcode, hooks_path, nodes, hooks_execution_fn,
466 hooks_results_adapt_fn, build_env_fn, log_fn, htype=None, cluster_name=None,
468 """Base class for hooks masters.
470 This class invokes the execution of hooks according to the behaviour
471 specified by its parameters.
474 @param opcode: opcode of the operation to which the hooks are tied
475 @type hooks_path: string
476 @param hooks_path: prefix of the hooks directories
477 @type nodes: 2-tuple of lists
478 @param nodes: 2-tuple of lists containing nodes on which pre-hooks must be
479 run and nodes on which post-hooks must be run
480 @type hooks_execution_fn: function that accepts the following parameters:
481 (node_list, hooks_path, phase, environment)
482 @param hooks_execution_fn: function that will execute the hooks; can be
483 None, indicating that no conversion is necessary.
484 @type hooks_results_adapt_fn: function
485 @param hooks_results_adapt_fn: function that will adapt the return value of
486 hooks_execution_fn to the format expected by RunPhase
487 @type build_env_fn: function that returns a dictionary having strings as
489 @param build_env_fn: function that builds the environment for the hooks
490 @type log_fn: function that accepts a string
491 @param log_fn: logging function
492 @type htype: string or None
493 @param htype: None or one of L{constants.HTYPE_CLUSTER},
494 L{constants.HTYPE_NODE}, L{constants.HTYPE_INSTANCE}
495 @type cluster_name: string
496 @param cluster_name: name of the cluster
497 @type master_name: string
498 @param master_name: name of the master
502 self.hooks_path = hooks_path
503 self.hooks_execution_fn = hooks_execution_fn
504 self.hooks_results_adapt_fn = hooks_results_adapt_fn
505 self.build_env_fn = build_env_fn
508 self.cluster_name = cluster_name
509 self.master_name = master_name
511 self.pre_env = self._BuildEnv(constants.HOOKS_PHASE_PRE)
512 (self.pre_nodes, self.post_nodes) = nodes
514 def _BuildEnv(self, phase):
515 """Compute the environment and the target nodes.
517 Based on the opcode and the current node list, this builds the
518 environment for the hooks and the target node list for the run.
521 if phase == constants.HOOKS_PHASE_PRE:
523 elif phase == constants.HOOKS_PHASE_POST:
524 prefix = "GANETI_POST_"
526 raise AssertionError("Unknown phase '%s'" % phase)
530 if self.hooks_path is not None:
531 phase_env = self.build_env_fn()
533 assert not compat.any(key.upper().startswith(prefix)
534 for key in phase_env)
535 env.update(("%s%s" % (prefix, key), value)
536 for (key, value) in phase_env.items())
538 if phase == constants.HOOKS_PHASE_PRE:
539 assert compat.all((key.startswith("GANETI_") and
540 not key.startswith("GANETI_POST_"))
543 elif phase == constants.HOOKS_PHASE_POST:
544 assert compat.all(key.startswith("GANETI_POST_") for key in env)
545 assert isinstance(self.pre_env, dict)
547 # Merge with pre-phase environment
548 assert not compat.any(key.startswith("GANETI_POST_")
549 for key in self.pre_env)
550 env.update(self.pre_env)
552 raise AssertionError("Unknown phase '%s'" % phase)
556 def _RunWrapper(self, node_list, hpath, phase, phase_env):
557 """Simple wrapper over self.callfn.
559 This method fixes the environment before executing the hooks.
563 "PATH": constants.HOOKS_PATH,
564 "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
565 "GANETI_OP_CODE": self.opcode,
566 "GANETI_DATA_DIR": constants.DATA_DIR,
567 "GANETI_HOOKS_PHASE": phase,
568 "GANETI_HOOKS_PATH": hpath,
572 env["GANETI_OBJECT_TYPE"] = self.htype
574 if self.cluster_name is not None:
575 env["GANETI_CLUSTER"] = self.cluster_name
577 if self.master_name is not None:
578 env["GANETI_MASTER"] = self.master_name
581 env = utils.algo.JoinDisjointDicts(env, phase_env)
583 # Convert everything to strings
584 env = dict([(str(key), str(val)) for key, val in env.iteritems()])
586 assert compat.all(key == "PATH" or key.startswith("GANETI_")
589 return self.hooks_execution_fn(node_list, hpath, phase, env)
591 def RunPhase(self, phase, nodes=None):
592 """Run all the scripts for a phase.
594 This is the main function of the HookMaster.
595 It executes self.hooks_execution_fn, and after running
596 self.hooks_results_adapt_fn on its results it expects them to be in the form
597 {node_name: (fail_msg, [(script, result, output), ...]}).
599 @param phase: one of L{constants.HOOKS_PHASE_POST} or
600 L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
601 @param nodes: overrides the predefined list of nodes for the given phase
602 @return: the processed results of the hooks multi-node rpc call
603 @raise errors.HooksFailure: on communication failure to the nodes
604 @raise errors.HooksAbort: on failure of one of the hooks
607 if phase == constants.HOOKS_PHASE_PRE:
609 nodes = self.pre_nodes
611 elif phase == constants.HOOKS_PHASE_POST:
613 nodes = self.post_nodes
614 env = self._BuildEnv(phase)
616 raise AssertionError("Unknown phase '%s'" % phase)
619 # empty node list, we should not attempt to run this as either
620 # we're in the cluster init phase and the rpc client part can't
621 # even attempt to run, or this LU doesn't do hooks at all
624 results = self._RunWrapper(nodes, self.hooks_path, phase, env)
626 msg = "Communication Failure"
627 if phase == constants.HOOKS_PHASE_PRE:
628 raise errors.HooksFailure(msg)
633 converted_res = results
634 if self.hooks_results_adapt_fn:
635 converted_res = self.hooks_results_adapt_fn(results)
638 for node_name, (fail_msg, offline, hooks_results) in converted_res.items():
643 self.log_fn("Communication failure to node %s: %s", node_name, fail_msg)
646 for script, hkr, output in hooks_results:
647 if hkr == constants.HKR_FAIL:
648 if phase == constants.HOOKS_PHASE_PRE:
649 errs.append((node_name, script, output))
652 output = "(no output)"
653 self.log_fn("On %s script %s failed, output: %s" %
654 (node_name, script, output))
656 if errs and phase == constants.HOOKS_PHASE_PRE:
657 raise errors.HooksAbort(errs)
661 def RunConfigUpdate(self):
662 """Run the special configuration update hook
664 This is a special hook that runs only on the master after each
665 top-level LI if the configuration has been updated.
668 phase = constants.HOOKS_PHASE_POST
669 hpath = constants.HOOKS_NAME_CFGUPDATE
670 nodes = [self.master_name]
671 self._RunWrapper(nodes, hpath, phase, self.pre_env)
674 def BuildFromLu(hooks_execution_fn, lu):
678 nodes = map(frozenset, lu.BuildHooksNodes())
680 master_name = cluster_name = None
682 master_name = lu.cfg.GetMasterNode()
683 cluster_name = lu.cfg.GetClusterName()
685 return HooksMaster(lu.op.OP_ID, lu.HPATH, nodes, hooks_execution_fn,
686 _RpcResultsToHooksResults, lu.BuildHooksEnv,
687 lu.LogWarning, lu.HTYPE, cluster_name, master_name)