X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/984f7c32208f7834c6d6ae22e98dfa126e175981..ac0930b925974d3bb1174cf3922bafc4fbe50596:/lib/mcpu.py diff --git a/lib/mcpu.py b/lib/mcpu.py index be9a372..4bdfb46 100644 --- a/lib/mcpu.py +++ b/lib/mcpu.py @@ -101,6 +101,51 @@ class Processor(object): self._feedback_fn = feedback self.exclusive_BGL = False + def _ExecLU(self, lu): + """Logical Unit execution sequence. + + """ + write_count = self.context.cfg.write_count + lu.CheckPrereq() + hm = HooksMaster(rpc.call_hooks_runner, self, lu) + h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE) + lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results, + self._feedback_fn, None) + try: + result = lu.Exec(self._feedback_fn) + h_results = hm.RunPhase(constants.HOOKS_PHASE_POST) + result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results, + self._feedback_fn, result) + finally: + # FIXME: This needs locks if not lu_class.REQ_BGL + if write_count != self.context.cfg.write_count: + hm.RunConfigUpdate() + + return result + + def _LockAndExecLU(self, lu, level): + """Execute a Logical Unit, with the needed locks. + + This is a recursive function that starts locking the given level, and + proceeds up, till there are no more locks to acquire. Then it executes the + given LU and its opcodes. + + """ + if level in lu.needed_locks: + # This is always safe to do, as we can't acquire more/less locks than + # what was requested. + lu.needed_locks[level] = self.context.glm.acquire(level, + lu.needed_locks[level]) + try: + result = self._LockAndExecLU(lu, level + 1) + finally: + if lu.needed_locks[level]: + self.context.glm.release(level) + else: + result = self._ExecLU(lu) + + return result + def ExecOpCode(self, op): """Execute an opcode. @@ -121,29 +166,16 @@ class Processor(object): else: sstore = ssconf.SimpleStore() - write_count = self.context.cfg.write_count - # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a # shared fashion otherwise (to prevent concurrent run with an exclusive LU. self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL], shared=not lu_class.REQ_BGL) try: self.exclusive_BGL = lu_class.REQ_BGL - lu = lu_class(self, op, self.context.cfg, sstore) - lu.CheckPrereq() - hm = HooksMaster(rpc.call_hooks_runner, self, lu) - h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE) - lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results, - self._feedback_fn, None) - try: - result = lu.Exec(self._feedback_fn) - h_results = hm.RunPhase(constants.HOOKS_PHASE_POST) - result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results, - self._feedback_fn, result) - finally: - # FIXME: This needs locks if not lu_class.REQ_BGL - if write_count != self.context.cfg.write_count: - hm.RunConfigUpdate() + lu = lu_class(self, op, self.context, sstore) + lu.ExpandNames() + assert lu.needed_locks is not None, "needed_locks not set by LU" + result = self._LockAndExecLU(lu, locking.LEVEL_NODE) finally: self.context.glm.release(locking.LEVEL_CLUSTER) self.exclusive_BGL = False @@ -177,7 +209,7 @@ class Processor(object): sstore = ssconf.SimpleStore() #do_hooks = lu_class.HPATH is not None - lu = lu_class(self, op, self.context.cfg, sstore) + lu = lu_class(self, op, self.context, sstore) lu.CheckPrereq() #if do_hooks: # hm = HooksMaster(rpc.call_hooks_runner, self, lu)