from ganeti import config
from ganeti import ssconf
from ganeti import logger
+from ganeti import locking
class Processor(object):
"""
self.context = context
self._feedback_fn = feedback
+ self.exclusive_BGL = False
+
+ def _ExecLU(self, lu):
+ """Logical Unit execution sequence.
+
+ """
+ write_count = self.context.cfg.write_count
+ lu.CheckPrereq()
+ hm = HooksMaster(rpc.call_hooks_runner, self, lu)
+ h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
+ lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
+ self._feedback_fn, None)
+ try:
+ result = lu.Exec(self._feedback_fn)
+ h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
+ result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
+ self._feedback_fn, result)
+ finally:
+ # FIXME: This needs locks if not lu_class.REQ_BGL
+ if write_count != self.context.cfg.write_count:
+ hm.RunConfigUpdate()
+
+ return result
+
+ def _LockAndExecLU(self, lu, level):
+ """Execute a Logical Unit, with the needed locks.
+
+ This is a recursive function that starts locking the given level, and
+ proceeds up, till there are no more locks to acquire. Then it executes the
+ given LU and its opcodes.
+
+ """
+ if level in lu.needed_locks:
+ # This is always safe to do, as we can't acquire more/less locks than
+ # what was requested.
+ lu.needed_locks[level] = self.context.glm.acquire(level,
+ lu.needed_locks[level])
+ try:
+ result = self._LockAndExecLU(lu, level + 1)
+ finally:
+ if lu.needed_locks[level]:
+ self.context.glm.release(level)
+ else:
+ result = self._ExecLU(lu)
+
+ return result
def ExecOpCode(self, op):
"""Execute an opcode.
else:
sstore = ssconf.SimpleStore()
- write_count = self.context.cfg.write_count
- lu = lu_class(self, op, self.context.cfg, sstore)
- lu.CheckPrereq()
- hm = HooksMaster(rpc.call_hooks_runner, self, lu)
- h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
- lu.HooksCallBack(constants.HOOKS_PHASE_PRE,
- h_results, self._feedback_fn, None)
+ # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
+ # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
+ self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
+ shared=not lu_class.REQ_BGL)
try:
- result = lu.Exec(self._feedback_fn)
- h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
- result = lu.HooksCallBack(constants.HOOKS_PHASE_POST,
- h_results, self._feedback_fn, result)
+ self.exclusive_BGL = lu_class.REQ_BGL
+ lu = lu_class(self, op, self.context, sstore)
+ lu.ExpandNames()
+ assert lu.needed_locks is not None, "needed_locks not set by LU"
+ result = self._LockAndExecLU(lu, locking.LEVEL_NODE)
finally:
- if lu.cfg is not None:
- # we use lu.cfg and not self.cfg as for init cluster, self.cfg
- # is None but lu.cfg has been recently initialized in the
- # lu.Exec method
- if write_count != lu.cfg.write_count:
- hm.RunConfigUpdate()
+ self.context.glm.release(locking.LEVEL_CLUSTER)
+ self.exclusive_BGL = False
return result
if lu_class is None:
raise errors.OpCodeUnknown("Unknown opcode")
+ if lu_class.REQ_BGL and not self.exclusive_BGL:
+ raise errors.ProgrammerError("LUs which require the BGL cannot"
+ " be chained to granular ones.")
+
if lu_class.REQ_WSSTORE:
sstore = ssconf.WritableSimpleStore()
else:
sstore = ssconf.SimpleStore()
#do_hooks = lu_class.HPATH is not None
- lu = lu_class(self, op, self.context.cfg, sstore)
+ lu = lu_class(self, op, self.context, sstore)
lu.CheckPrereq()
#if do_hooks:
# hm = HooksMaster(rpc.call_hooks_runner, self, lu)