+ return None
+
+ def SubmitManyJobs(self, jobs):
+ """Submits jobs for processing.
+
+ See L{jqueue.JobQueue.SubmitManyJobs}.
+
+ """
+ raise NotImplementedError
+
+
+def _LUNameForOpName(opname):
+ """Computes the LU name for a given OpCode name.
+
+ """
+ assert opname.startswith(_OP_PREFIX), \
+ "Invalid OpCode name, doesn't start with %s: %s" % (_OP_PREFIX, opname)
+
+ return _LU_PREFIX + opname[len(_OP_PREFIX):]
+
+
+def _ComputeDispatchTable():
+ """Computes the opcode-to-lu dispatch table.
+
+ """
+ return dict((op, getattr(cmdlib, _LUNameForOpName(op.__name__)))
+ for op in opcodes.OP_MAPPING.values()
+ if op.WITH_LU)
+
+
+def _SetBaseOpParams(src, defcomment, dst):
+ """Copies basic opcode parameters.
+
+ @type src: L{opcodes.OpCode}
+ @param src: Source opcode
+ @type defcomment: string
+ @param defcomment: Comment to specify if not already given
+ @type dst: L{opcodes.OpCode}
+ @param dst: Destination opcode
+
+ """
+ if hasattr(src, "debug_level"):
+ dst.debug_level = src.debug_level
+
+ if (getattr(dst, "priority", None) is None and
+ hasattr(src, "priority")):
+ dst.priority = src.priority
+
+ if not getattr(dst, opcodes.COMMENT_ATTR, None):
+ dst.comment = defcomment
+
+
+def _ProcessResult(submit_fn, op, result):
+ """Examines opcode result.
+
+ If necessary, additional processing on the result is done.
+
+ """
+ if isinstance(result, cmdlib.ResultWithJobs):
+ # Copy basic parameters (e.g. priority)
+ map(compat.partial(_SetBaseOpParams, op,
+ "Submitted by %s" % op.OP_ID),
+ itertools.chain(*result.jobs))
+
+ # Submit jobs
+ job_submission = submit_fn(result.jobs)
+
+ # Build dictionary
+ result = result.other
+
+ assert constants.JOB_IDS_KEY not in result, \
+ "Key '%s' found in additional return values" % constants.JOB_IDS_KEY
+
+ result[constants.JOB_IDS_KEY] = job_submission
+
+ return result
+
+
+def _FailingSubmitManyJobs(_):
+ """Implementation of L{OpExecCbBase.SubmitManyJobs} to raise an exception.
+
+ """
+ raise errors.ProgrammerError("Opcodes processed without callbacks (e.g."
+ " queries) can not submit jobs")
+
+
+def _VerifyLocks(lu, glm, _mode_whitelist=_NODE_ALLOC_MODE_WHITELIST,
+ _nal_whitelist=_NODE_ALLOC_WHITELIST):
+ """Performs consistency checks on locks acquired by a logical unit.
+
+ @type lu: L{cmdlib.LogicalUnit}
+ @param lu: Logical unit instance
+ @type glm: L{locking.GanetiLockManager}
+ @param glm: Lock manager
+
+ """
+ if not __debug__:
+ return
+
+ have_nal = glm.check_owned(locking.LEVEL_NODE_ALLOC, locking.NAL)
+
+ for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
+ # TODO: Verify using actual lock mode, not using LU variables
+ if level in lu.needed_locks:
+ share_node_alloc = lu.share_locks[locking.LEVEL_NODE_ALLOC]
+ share_level = lu.share_locks[level]
+
+ if lu.__class__ in _mode_whitelist:
+ assert share_node_alloc != share_level, \
+ "LU is whitelisted to use different modes for node allocation lock"
+ else:
+ assert bool(share_node_alloc) == bool(share_level), \
+ ("Node allocation lock must be acquired using the same mode as nodes"
+ " and node resources")
+
+ if lu.__class__ in _nal_whitelist:
+ assert not have_nal, \
+ "LU is whitelisted for not acquiring the node allocation lock"
+ elif lu.needed_locks[level] == locking.ALL_SET or glm.owning_all(level):
+ assert have_nal, \
+ ("Node allocation lock must be used if an LU acquires all nodes"
+ " or node resources")