from ganeti import errors
from ganeti import rpc
from ganeti import cmdlib
-from ganeti import config
from ganeti import ssconf
from ganeti import logger
+from ganeti import locking
class Processor(object):
"""Object which runs OpCodes"""
DISPATCH_TABLE = {
# Cluster
- opcodes.OpInitCluster: cmdlib.LUInitCluster,
opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
- opcodes.OpClusterCopyFile: cmdlib.LUClusterCopyFile,
- opcodes.OpRunClusterCommand: cmdlib.LURunClusterCommand,
opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
- opcodes.OpMasterFailover: cmdlib.LUMasterFailover,
opcodes.OpDumpClusterConfig: cmdlib.LUDumpClusterConfig,
opcodes.OpRenameCluster: cmdlib.LURenameCluster,
opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
+ opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
# node lu
opcodes.OpAddNode: cmdlib.LUAddNode,
opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
opcodes.OpRebootInstance: cmdlib.LURebootInstance,
opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
- opcodes.OpAddMDDRBDComponent: cmdlib.LUAddMDDRBDComponent,
- opcodes.OpRemoveMDDRBDComponent: cmdlib.LURemoveMDDRBDComponent,
opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
- opcodes.OpSetInstanceParms: cmdlib.LUSetInstanceParms,
+ opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
+ opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
# os lu
opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
# exports lu
opcodes.OpQueryExports: cmdlib.LUQueryExports,
opcodes.OpExportInstance: cmdlib.LUExportInstance,
+ opcodes.OpRemoveExport: cmdlib.LURemoveExport,
# tags lu
opcodes.OpGetTags: cmdlib.LUGetTags,
opcodes.OpSearchTags: cmdlib.LUSearchTags,
opcodes.OpDelTags: cmdlib.LUDelTags,
# test lu
opcodes.OpTestDelay: cmdlib.LUTestDelay,
+ opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
}
- def __init__(self, feedback=None):
+ def __init__(self, context):
"""Constructor for Processor
Args:
- feedback_fn: the feedback function (taking one string) to be run when
interesting events are happening
"""
- self.cfg = None
- self.sstore = None
- self._feedback_fn = feedback
+ self.context = context
+ self._feedback_fn = None
+ self.exclusive_BGL = False
- def ExecOpCode(self, op):
+ def _ExecLU(self, lu):
+ """Logical Unit execution sequence.
+
+ """
+ write_count = self.context.cfg.write_count
+ lu.CheckPrereq()
+ hm = HooksMaster(rpc.call_hooks_runner, self, lu)
+ h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
+ lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
+ self._feedback_fn, None)
+ try:
+ result = lu.Exec(self._feedback_fn)
+ h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
+ result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
+ self._feedback_fn, result)
+ finally:
+ # FIXME: This needs locks if not lu_class.REQ_BGL
+ if write_count != self.context.cfg.write_count:
+ hm.RunConfigUpdate()
+
+ return result
+
+ def _LockAndExecLU(self, lu, level):
+ """Execute a Logical Unit, with the needed locks.
+
+ This is a recursive function that starts locking the given level, and
+ proceeds up, till there are no more locks to acquire. Then it executes the
+ given LU and its opcodes.
+
+ """
+ if level in lu.needed_locks:
+ # This gives a chance to LUs to make last-minute changes after acquiring
+ # locks at any preceding level.
+ lu.DeclareLocks(level)
+ needed_locks = lu.needed_locks[level]
+ share = lu.share_locks[level]
+ # This is always safe to do, as we can't acquire more/less locks than
+ # what was requested.
+ lu.needed_locks[level] = self.context.glm.acquire(level,
+ needed_locks,
+ shared=share)
+ try:
+ result = self._LockAndExecLU(lu, level + 1)
+ finally:
+ if lu.needed_locks[level]:
+ self.context.glm.release(level)
+ else:
+ result = self._ExecLU(lu)
+
+ return result
+
+ def ExecOpCode(self, op, feedback_fn):
"""Execute an opcode.
Args:
raise errors.ProgrammerError("Non-opcode instance passed"
" to ExecOpcode")
+ self._feedback_fn = feedback_fn
lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
if lu_class is None:
raise errors.OpCodeUnknown("Unknown opcode")
- if lu_class.REQ_CLUSTER and self.cfg is None:
- self.cfg = config.ConfigWriter()
- self.sstore = ssconf.SimpleStore()
- if self.cfg is not None:
- write_count = self.cfg.write_count
+ if lu_class.REQ_WSSTORE:
+ sstore = ssconf.WritableSimpleStore()
else:
- write_count = 0
- lu = lu_class(self, op, self.cfg, self.sstore)
- lu.CheckPrereq()
- hm = HooksMaster(rpc.call_hooks_runner, self, lu)
- hm.RunPhase(constants.HOOKS_PHASE_PRE)
- result = lu.Exec(self._feedback_fn)
- hm.RunPhase(constants.HOOKS_PHASE_POST)
- if lu.cfg is not None:
- # we use lu.cfg and not self.cfg as for init cluster, self.cfg
- # is None but lu.cfg has been recently initialized in the
- # lu.Exec method
- if write_count != lu.cfg.write_count:
- hm.RunConfigUpdate()
+ sstore = ssconf.SimpleStore()
+
+ # Acquire the Big Ganeti Lock exclusively if this LU requires it, and in a
+ # shared fashion otherwise (to prevent concurrent run with an exclusive LU.
+ self.context.glm.acquire(locking.LEVEL_CLUSTER, [locking.BGL],
+ shared=not lu_class.REQ_BGL)
+ try:
+ self.exclusive_BGL = lu_class.REQ_BGL
+ lu = lu_class(self, op, self.context, sstore)
+ lu.ExpandNames()
+ assert lu.needed_locks is not None, "needed_locks not set by LU"
+ result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
+ finally:
+ self.context.glm.release(locking.LEVEL_CLUSTER)
+ self.exclusive_BGL = False
return result
if lu_class is None:
raise errors.OpCodeUnknown("Unknown opcode")
- if lu_class.REQ_CLUSTER and self.cfg is None:
- self.cfg = config.ConfigWriter()
- self.sstore = ssconf.SimpleStore()
+ if lu_class.REQ_BGL and not self.exclusive_BGL:
+ raise errors.ProgrammerError("LUs which require the BGL cannot"
+ " be chained to granular ones.")
+
+ assert lu_class.REQ_BGL, "ChainOpCode is still BGL-only"
+
+ if lu_class.REQ_WSSTORE:
+ sstore = ssconf.WritableSimpleStore()
+ else:
+ sstore = ssconf.SimpleStore()
+
#do_hooks = lu_class.HPATH is not None
- lu = lu_class(self, op, self.cfg, self.sstore)
+ lu = lu_class(self, op, self.context, sstore)
lu.CheckPrereq()
#if do_hooks:
# hm = HooksMaster(rpc.call_hooks_runner, self, lu)
- # hm.RunPhase(constants.HOOKS_PHASE_PRE)
+ # h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
+ # lu.HooksCallBack(constants.HOOKS_PHASE_PRE,
+ # h_results, self._feedback_fn, None)
result = lu.Exec(self._feedback_fn)
#if do_hooks:
- # hm.RunPhase(constants.HOOKS_PHASE_POST)
+ # h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
+ # result = lu.HooksCallBack(constants.HOOKS_PHASE_POST,
+ # h_results, self._feedback_fn, result)
return result
def LogStep(self, current, total, message):
This is the main function of the HookMaster.
+ Args:
+ phase: the hooks phase to run
+
+ Returns:
+ the result of the hooks multi-node rpc call
+
"""
if not self.node_list[phase]:
# empty node list, we should not attempt to run this as either
errs.append((node_name, script, output))
if errs:
raise errors.HooksAbort(errs)
+ return results
def RunConfigUpdate(self):
"""Run the special configuration update hook