+ """
+ not_marked = True
+ for op in self.ops:
+ if op.status in constants.OPS_FINALIZED:
+ assert not_marked, "Finalized opcodes found after non-finalized ones"
+ continue
+ op.status = status
+ op.result = result
+ not_marked = False
+
+ def Finalize(self):
+ """Marks the job as finalized.
+
+ """
+ self.end_timestamp = TimeStampNow()
+
+ def Cancel(self):
+ """Marks job as canceled/-ing if possible.
+
+ @rtype: tuple; (bool, string)
+ @return: Boolean describing whether job was successfully canceled or marked
+ as canceling and a text message
+
+ """
+ status = self.CalcStatus()
+
+ if status == constants.JOB_STATUS_QUEUED:
+ self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
+ "Job canceled by request")
+ self.Finalize()
+ return (True, "Job %s canceled" % self.id)
+
+ elif status == constants.JOB_STATUS_WAITING:
+ # The worker will notice the new status and cancel the job
+ self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
+ return (True, "Job %s will be canceled" % self.id)
+
+ else:
+ logging.debug("Job %s is no longer waiting in the queue", self.id)
+ return (False, "Job %s is no longer waiting in the queue" % self.id)
+
+
+class _OpExecCallbacks(mcpu.OpExecCbBase):
+ def __init__(self, queue, job, op):
+ """Initializes this class.
+
+ @type queue: L{JobQueue}
+ @param queue: Job queue
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @type op: L{_QueuedOpCode}
+ @param op: OpCode
+
+ """
+ assert queue, "Queue is missing"
+ assert job, "Job is missing"
+ assert op, "Opcode is missing"
+
+ self._queue = queue
+ self._job = job
+ self._op = op
+
+ def _CheckCancel(self):
+ """Raises an exception to cancel the job if asked to.
+
+ """
+ # Cancel here if we were asked to
+ if self._op.status == constants.OP_STATUS_CANCELING:
+ logging.debug("Canceling opcode")
+ raise CancelJob()
+
+ @locking.ssynchronized(_QUEUE, shared=1)
+ def NotifyStart(self):
+ """Mark the opcode as running, not lock-waiting.
+
+ This is called from the mcpu code as a notifier function, when the LU is
+ finally about to start the Exec() method. Of course, to have end-user
+ visible results, the opcode must be initially (before calling into
+ Processor.ExecOpCode) set to OP_STATUS_WAITING.
+
+ """
+ assert self._op in self._job.ops
+ assert self._op.status in (constants.OP_STATUS_WAITING,
+ constants.OP_STATUS_CANCELING)
+
+ # Cancel here if we were asked to
+ self._CheckCancel()
+
+ logging.debug("Opcode is now running")
+
+ self._op.status = constants.OP_STATUS_RUNNING
+ self._op.exec_timestamp = TimeStampNow()
+
+ # And finally replicate the job status
+ self._queue.UpdateJobUnlocked(self._job)
+
+ @locking.ssynchronized(_QUEUE, shared=1)
+ def _AppendFeedback(self, timestamp, log_type, log_msg):
+ """Internal feedback append function, with locks
+
+ """
+ self._job.log_serial += 1
+ self._op.log.append((self._job.log_serial, timestamp, log_type, log_msg))
+ self._queue.UpdateJobUnlocked(self._job, replicate=False)
+
+ def Feedback(self, *args):
+ """Append a log entry.
+
+ """
+ assert len(args) < 3
+
+ if len(args) == 1:
+ log_type = constants.ELOG_MESSAGE
+ log_msg = args[0]
+ else:
+ (log_type, log_msg) = args
+
+ # The time is split to make serialization easier and not lose
+ # precision.
+ timestamp = utils.SplitTime(time.time())
+ self._AppendFeedback(timestamp, log_type, log_msg)
+
+ def CheckCancel(self):
+ """Check whether job has been cancelled.
+
+ """
+ assert self._op.status in (constants.OP_STATUS_WAITING,
+ constants.OP_STATUS_CANCELING)
+
+ # Cancel here if we were asked to
+ self._CheckCancel()
+
+ def SubmitManyJobs(self, jobs):
+ """Submits jobs for processing.
+
+ See L{JobQueue.SubmitManyJobs}.
+
+ """
+ # Locking is done in job queue
+ return self._queue.SubmitManyJobs(jobs)
+
+
+class _JobChangesChecker(object):
+ def __init__(self, fields, prev_job_info, prev_log_serial):
+ """Initializes this class.
+
+ @type fields: list of strings
+ @param fields: Fields requested by LUXI client
+ @type prev_job_info: string
+ @param prev_job_info: previous job info, as passed by the LUXI client
+ @type prev_log_serial: string
+ @param prev_log_serial: previous job serial, as passed by the LUXI client
+
+ """
+ self._fields = fields
+ self._prev_job_info = prev_job_info
+ self._prev_log_serial = prev_log_serial
+
+ def __call__(self, job):
+ """Checks whether job has changed.
+
+ @type job: L{_QueuedJob}
+ @param job: Job object
+
+ """
+ assert not job.writable, "Expected read-only job"
+
+ status = job.CalcStatus()
+ job_info = job.GetInfo(self._fields)
+ log_entries = job.GetLogEntries(self._prev_log_serial)
+
+ # Serializing and deserializing data can cause type changes (e.g. from
+ # tuple to list) or precision loss. We're doing it here so that we get
+ # the same modifications as the data received from the client. Without
+ # this, the comparison afterwards might fail without the data being
+ # significantly different.
+ # TODO: we just deserialized from disk, investigate how to make sure that
+ # the job info and log entries are compatible to avoid this further step.
+ # TODO: Doing something like in testutils.py:UnifyValueType might be more
+ # efficient, though floats will be tricky
+ job_info = serializer.LoadJson(serializer.DumpJson(job_info))
+ log_entries = serializer.LoadJson(serializer.DumpJson(log_entries))
+
+ # Don't even try to wait if the job is no longer running, there will be
+ # no changes.
+ if (status not in (constants.JOB_STATUS_QUEUED,
+ constants.JOB_STATUS_RUNNING,
+ constants.JOB_STATUS_WAITING) or
+ job_info != self._prev_job_info or
+ (log_entries and self._prev_log_serial != log_entries[0][0])):
+ logging.debug("Job %s changed", job.id)
+ return (job_info, log_entries)
+
+ return None
+
+
+class _JobFileChangesWaiter(object):
+ def __init__(self, filename):
+ """Initializes this class.
+
+ @type filename: string
+ @param filename: Path to job file
+ @raises errors.InotifyError: if the notifier cannot be setup
+
+ """
+ self._wm = pyinotify.WatchManager()
+ self._inotify_handler = \
+ asyncnotifier.SingleFileEventHandler(self._wm, self._OnInotify, filename)
+ self._notifier = \
+ pyinotify.Notifier(self._wm, default_proc_fun=self._inotify_handler)
+ try:
+ self._inotify_handler.enable()
+ except Exception:
+ # pyinotify doesn't close file descriptors automatically
+ self._notifier.stop()
+ raise
+
+ def _OnInotify(self, notifier_enabled):
+ """Callback for inotify.
+
+ """
+ if not notifier_enabled:
+ self._inotify_handler.enable()
+
+ def Wait(self, timeout):
+ """Waits for the job file to change.
+
+ @type timeout: float
+ @param timeout: Timeout in seconds
+ @return: Whether there have been events
+
+ """
+ assert timeout >= 0
+ have_events = self._notifier.check_events(timeout * 1000)
+ if have_events:
+ self._notifier.read_events()
+ self._notifier.process_events()
+ return have_events
+
+ def Close(self):
+ """Closes underlying notifier and its file descriptor.
+
+ """
+ self._notifier.stop()
+
+
+class _JobChangesWaiter(object):
+ def __init__(self, filename):
+ """Initializes this class.
+
+ @type filename: string
+ @param filename: Path to job file
+
+ """
+ self._filewaiter = None
+ self._filename = filename
+
+ def Wait(self, timeout):
+ """Waits for a job to change.
+
+ @type timeout: float
+ @param timeout: Timeout in seconds
+ @return: Whether there have been events
+
+ """
+ if self._filewaiter:
+ return self._filewaiter.Wait(timeout)
+
+ # Lazy setup: Avoid inotify setup cost when job file has already changed.
+ # If this point is reached, return immediately and let caller check the job
+ # file again in case there were changes since the last check. This avoids a
+ # race condition.
+ self._filewaiter = _JobFileChangesWaiter(self._filename)
+
+ return True
+
+ def Close(self):
+ """Closes underlying waiter.
+
+ """
+ if self._filewaiter:
+ self._filewaiter.Close()
+
+
+class _WaitForJobChangesHelper(object):
+ """Helper class using inotify to wait for changes in a job file.
+
+ This class takes a previous job status and serial, and alerts the client when
+ the current job status has changed.
+
+ """
+ @staticmethod
+ def _CheckForChanges(counter, job_load_fn, check_fn):
+ if counter.next() > 0:
+ # If this isn't the first check the job is given some more time to change
+ # again. This gives better performance for jobs generating many
+ # changes/messages.
+ time.sleep(0.1)
+
+ job = job_load_fn()
+ if not job:
+ raise errors.JobLost()
+
+ result = check_fn(job)
+ if result is None:
+ raise utils.RetryAgain()
+
+ return result
+
+ def __call__(self, filename, job_load_fn,
+ fields, prev_job_info, prev_log_serial, timeout):
+ """Waits for changes on a job.
+
+ @type filename: string
+ @param filename: File on which to wait for changes
+ @type job_load_fn: callable
+ @param job_load_fn: Function to load job
+ @type fields: list of strings
+ @param fields: Which fields to check for changes
+ @type prev_job_info: list or None
+ @param prev_job_info: Last job information returned
+ @type prev_log_serial: int
+ @param prev_log_serial: Last job message serial number
+ @type timeout: float
+ @param timeout: maximum time to wait in seconds
+
+ """
+ counter = itertools.count()
+ try:
+ check_fn = _JobChangesChecker(fields, prev_job_info, prev_log_serial)
+ waiter = _JobChangesWaiter(filename)
+ try:
+ return utils.Retry(compat.partial(self._CheckForChanges,
+ counter, job_load_fn, check_fn),
+ utils.RETRY_REMAINING_TIME, timeout,
+ wait_fn=waiter.Wait)
+ finally:
+ waiter.Close()
+ except (errors.InotifyError, errors.JobLost):
+ return None
+ except utils.RetryTimeout:
+ return constants.JOB_NOTCHANGED
+
+
+def _EncodeOpError(err):
+ """Encodes an error which occurred while processing an opcode.
+
+ """
+ if isinstance(err, errors.GenericError):
+ to_encode = err
+ else:
+ to_encode = errors.OpExecError(str(err))
+
+ return errors.EncodeException(to_encode)
+
+
+class _TimeoutStrategyWrapper:
+ def __init__(self, fn):
+ """Initializes this class.
+
+ """
+ self._fn = fn
+ self._next = None
+
+ def _Advance(self):
+ """Gets the next timeout if necessary.
+
+ """
+ if self._next is None:
+ self._next = self._fn()
+
+ def Peek(self):
+ """Returns the next timeout.
+
+ """
+ self._Advance()
+ return self._next
+
+ def Next(self):
+ """Returns the current timeout and advances the internal state.
+
+ """
+ self._Advance()
+ result = self._next
+ self._next = None
+ return result
+
+
+class _OpExecContext:
+ def __init__(self, op, index, log_prefix, timeout_strategy_factory):
+ """Initializes this class.
+
+ """
+ self.op = op
+ self.index = index
+ self.log_prefix = log_prefix
+ self.summary = op.input.Summary()
+
+ # Create local copy to modify
+ if getattr(op.input, opcodes.DEPEND_ATTR, None):
+ self.jobdeps = op.input.depends[:]
+ else:
+ self.jobdeps = None
+
+ self._timeout_strategy_factory = timeout_strategy_factory
+ self._ResetTimeoutStrategy()
+
+ def _ResetTimeoutStrategy(self):
+ """Creates a new timeout strategy.
+
+ """
+ self._timeout_strategy = \
+ _TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)
+
+ def CheckPriorityIncrease(self):
+ """Checks whether priority can and should be increased.
+
+ Called when locks couldn't be acquired.
+
+ """
+ op = self.op
+
+ # Exhausted all retries and next round should not use blocking acquire
+ # for locks?
+ if (self._timeout_strategy.Peek() is None and
+ op.priority > constants.OP_PRIO_HIGHEST):
+ logging.debug("Increasing priority")
+ op.priority -= 1
+ self._ResetTimeoutStrategy()
+ return True
+
+ return False
+
+ def GetNextLockTimeout(self):
+ """Returns the next lock acquire timeout.
+
+ """
+ return self._timeout_strategy.Next()
+
+
+class _JobProcessor(object):
+ (DEFER,
+ WAITDEP,
+ FINISHED) = range(1, 4)
+
+ def __init__(self, queue, opexec_fn, job,
+ _timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
+ """Initializes this class.
+
+ """
+ self.queue = queue
+ self.opexec_fn = opexec_fn
+ self.job = job
+ self._timeout_strategy_factory = _timeout_strategy_factory
+
+ @staticmethod
+ def _FindNextOpcode(job, timeout_strategy_factory):
+ """Locates the next opcode to run.
+
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @param timeout_strategy_factory: Callable to create new timeout strategy
+
+ """
+ # Create some sort of a cache to speed up locating next opcode for future
+ # lookups
+ # TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
+ # pending and one for processed ops.
+ if job.ops_iter is None:
+ job.ops_iter = enumerate(job.ops)
+
+ # Find next opcode to run
+ while True:
+ try:
+ (idx, op) = job.ops_iter.next()
+ except StopIteration:
+ raise errors.ProgrammerError("Called for a finished job")
+
+ if op.status == constants.OP_STATUS_RUNNING:
+ # Found an opcode already marked as running
+ raise errors.ProgrammerError("Called for job marked as running")
+
+ opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
+ timeout_strategy_factory)
+
+ if op.status not in constants.OPS_FINALIZED:
+ return opctx
+
+ # This is a job that was partially completed before master daemon
+ # shutdown, so it can be expected that some opcodes are already
+ # completed successfully (if any did error out, then the whole job
+ # should have been aborted and not resubmitted for processing).
+ logging.info("%s: opcode %s already processed, skipping",
+ opctx.log_prefix, opctx.summary)
+
+ @staticmethod
+ def _MarkWaitlock(job, op):
+ """Marks an opcode as waiting for locks.
+
+ The job's start timestamp is also set if necessary.
+
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @type op: L{_QueuedOpCode}
+ @param op: Opcode object
+
+ """
+ assert op in job.ops
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITING)
+
+ update = False
+
+ op.result = None
+
+ if op.status == constants.OP_STATUS_QUEUED:
+ op.status = constants.OP_STATUS_WAITING
+ update = True
+
+ if op.start_timestamp is None:
+ op.start_timestamp = TimeStampNow()
+ update = True
+
+ if job.start_timestamp is None:
+ job.start_timestamp = op.start_timestamp
+ update = True
+
+ assert op.status == constants.OP_STATUS_WAITING
+
+ return update
+
+ @staticmethod
+ def _CheckDependencies(queue, job, opctx):
+ """Checks if an opcode has dependencies and if so, processes them.
+
+ @type queue: L{JobQueue}
+ @param queue: Queue object
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @type opctx: L{_OpExecContext}
+ @param opctx: Opcode execution context
+ @rtype: bool
+ @return: Whether opcode will be re-scheduled by dependency tracker
+
+ """
+ op = opctx.op
+
+ result = False
+
+ while opctx.jobdeps:
+ (dep_job_id, dep_status) = opctx.jobdeps[0]
+
+ (depresult, depmsg) = queue.depmgr.CheckAndRegister(job, dep_job_id,
+ dep_status)
+ assert ht.TNonEmptyString(depmsg), "No dependency message"
+
+ logging.info("%s: %s", opctx.log_prefix, depmsg)
+
+ if depresult == _JobDependencyManager.CONTINUE:
+ # Remove dependency and continue
+ opctx.jobdeps.pop(0)
+
+ elif depresult == _JobDependencyManager.WAIT:
+ # Need to wait for notification, dependency tracker will re-add job
+ # to workerpool
+ result = True
+ break
+
+ elif depresult == _JobDependencyManager.CANCEL:
+ # Job was cancelled, cancel this job as well
+ job.Cancel()
+ assert op.status == constants.OP_STATUS_CANCELING
+ break
+
+ elif depresult in (_JobDependencyManager.WRONGSTATUS,
+ _JobDependencyManager.ERROR):
+ # Job failed or there was an error, this job must fail
+ op.status = constants.OP_STATUS_ERROR
+ op.result = _EncodeOpError(errors.OpExecError(depmsg))
+ break
+
+ else:
+ raise errors.ProgrammerError("Unknown dependency result '%s'" %
+ depresult)
+
+ return result
+
+ def _ExecOpCodeUnlocked(self, opctx):
+ """Processes one opcode and returns the result.
+
+ """
+ op = opctx.op
+
+ assert op.status == constants.OP_STATUS_WAITING
+
+ timeout = opctx.GetNextLockTimeout()
+
+ try:
+ # Make sure not to hold queue lock while calling ExecOpCode
+ result = self.opexec_fn(op.input,
+ _OpExecCallbacks(self.queue, self.job, op),
+ timeout=timeout, priority=op.priority)
+ except mcpu.LockAcquireTimeout:
+ assert timeout is not None, "Received timeout for blocking acquire"
+ logging.debug("Couldn't acquire locks in %0.6fs", timeout)
+
+ assert op.status in (constants.OP_STATUS_WAITING,
+ constants.OP_STATUS_CANCELING)
+
+ # Was job cancelled while we were waiting for the lock?
+ if op.status == constants.OP_STATUS_CANCELING:
+ return (constants.OP_STATUS_CANCELING, None)
+
+ # Stay in waitlock while trying to re-acquire lock
+ return (constants.OP_STATUS_WAITING, None)
+ except CancelJob:
+ logging.exception("%s: Canceling job", opctx.log_prefix)
+ assert op.status == constants.OP_STATUS_CANCELING
+ return (constants.OP_STATUS_CANCELING, None)
+ except Exception, err: # pylint: disable=W0703
+ logging.exception("%s: Caught exception in %s",
+ opctx.log_prefix, opctx.summary)
+ return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
+ else:
+ logging.debug("%s: %s successful",
+ opctx.log_prefix, opctx.summary)
+ return (constants.OP_STATUS_SUCCESS, result)
+
+ def __call__(self, _nextop_fn=None):
+ """Continues execution of a job.
+
+ @param _nextop_fn: Callback function for tests
+ @return: C{FINISHED} if job is fully processed, C{DEFER} if the job should
+ be deferred and C{WAITDEP} if the dependency manager
+ (L{_JobDependencyManager}) will re-schedule the job when appropriate
+
+ """
+ queue = self.queue
+ job = self.job
+
+ logging.debug("Processing job %s", job.id)
+
+ queue.acquire(shared=1)
+ try:
+ opcount = len(job.ops)
+
+ assert job.writable, "Expected writable job"
+
+ # Don't do anything for finalized jobs
+ if job.CalcStatus() in constants.JOBS_FINALIZED:
+ return self.FINISHED
+
+ # Is a previous opcode still pending?
+ if job.cur_opctx:
+ opctx = job.cur_opctx
+ job.cur_opctx = None
+ else:
+ if __debug__ and _nextop_fn:
+ _nextop_fn()
+ opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)
+
+ op = opctx.op
+
+ # Consistency check
+ assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_CANCELING)
+ for i in job.ops[opctx.index + 1:])
+
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITING,
+ constants.OP_STATUS_CANCELING)
+
+ assert (op.priority <= constants.OP_PRIO_LOWEST and
+ op.priority >= constants.OP_PRIO_HIGHEST)
+
+ waitjob = None
+
+ if op.status != constants.OP_STATUS_CANCELING:
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITING)
+
+ # Prepare to start opcode
+ if self._MarkWaitlock(job, op):
+ # Write to disk
+ queue.UpdateJobUnlocked(job)
+
+ assert op.status == constants.OP_STATUS_WAITING
+ assert job.CalcStatus() == constants.JOB_STATUS_WAITING
+ assert job.start_timestamp and op.start_timestamp
+ assert waitjob is None
+
+ # Check if waiting for a job is necessary
+ waitjob = self._CheckDependencies(queue, job, opctx)
+
+ assert op.status in (constants.OP_STATUS_WAITING,
+ constants.OP_STATUS_CANCELING,
+ constants.OP_STATUS_ERROR)
+
+ if not (waitjob or op.status in (constants.OP_STATUS_CANCELING,
+ constants.OP_STATUS_ERROR)):
+ logging.info("%s: opcode %s waiting for locks",
+ opctx.log_prefix, opctx.summary)
+
+ assert not opctx.jobdeps, "Not all dependencies were removed"
+
+ queue.release()
+ try:
+ (op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
+ finally:
+ queue.acquire(shared=1)
+
+ op.status = op_status
+ op.result = op_result
+
+ assert not waitjob
+
+ if op.status == constants.OP_STATUS_WAITING:
+ # Couldn't get locks in time
+ assert not op.end_timestamp
+ else:
+ # Finalize opcode
+ op.end_timestamp = TimeStampNow()
+
+ if op.status == constants.OP_STATUS_CANCELING:
+ assert not compat.any(i.status != constants.OP_STATUS_CANCELING
+ for i in job.ops[opctx.index:])
+ else:
+ assert op.status in constants.OPS_FINALIZED
+
+ if op.status == constants.OP_STATUS_WAITING or waitjob:
+ finalize = False
+
+ if not waitjob and opctx.CheckPriorityIncrease():
+ # Priority was changed, need to update on-disk file
+ queue.UpdateJobUnlocked(job)
+
+ # Keep around for another round
+ job.cur_opctx = opctx
+
+ assert (op.priority <= constants.OP_PRIO_LOWEST and
+ op.priority >= constants.OP_PRIO_HIGHEST)
+
+ # In no case must the status be finalized here
+ assert job.CalcStatus() == constants.JOB_STATUS_WAITING
+
+ else:
+ # Ensure all opcodes so far have been successful
+ assert (opctx.index == 0 or
+ compat.all(i.status == constants.OP_STATUS_SUCCESS
+ for i in job.ops[:opctx.index]))
+
+ # Reset context
+ job.cur_opctx = None
+
+ if op.status == constants.OP_STATUS_SUCCESS:
+ finalize = False
+
+ elif op.status == constants.OP_STATUS_ERROR:
+ # Ensure failed opcode has an exception as its result
+ assert errors.GetEncodedError(job.ops[opctx.index].result)
+
+ to_encode = errors.OpExecError("Preceding opcode failed")
+ job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
+ _EncodeOpError(to_encode))
+ finalize = True
+
+ # Consistency check
+ assert compat.all(i.status == constants.OP_STATUS_ERROR and
+ errors.GetEncodedError(i.result)
+ for i in job.ops[opctx.index:])
+
+ elif op.status == constants.OP_STATUS_CANCELING:
+ job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
+ "Job canceled by request")
+ finalize = True
+
+ else:
+ raise errors.ProgrammerError("Unknown status '%s'" % op.status)
+
+ if opctx.index == (opcount - 1):
+ # Finalize on last opcode
+ finalize = True
+
+ if finalize:
+ # All opcodes have been run, finalize job
+ job.Finalize()
+
+ # Write to disk. If the job status is final, this is the final write
+ # allowed. Once the file has been written, it can be archived anytime.
+ queue.UpdateJobUnlocked(job)
+
+ assert not waitjob
+
+ if finalize:
+ logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
+ return self.FINISHED
+
+ assert not waitjob or queue.depmgr.JobWaiting(job)
+
+ if waitjob:
+ return self.WAITDEP
+ else:
+ return self.DEFER
+ finally:
+ assert job.writable, "Job became read-only while being processed"
+ queue.release()
+
+
+def _EvaluateJobProcessorResult(depmgr, job, result):
+ """Looks at a result from L{_JobProcessor} for a job.
+
+ To be used in a L{_JobQueueWorker}.
+
+ """
+ if result == _JobProcessor.FINISHED:
+ # Notify waiting jobs
+ depmgr.NotifyWaiters(job.id)
+
+ elif result == _JobProcessor.DEFER:
+ # Schedule again
+ raise workerpool.DeferTask(priority=job.CalcPriority())
+
+ elif result == _JobProcessor.WAITDEP:
+ # No-op, dependency manager will re-schedule
+ pass
+
+ else:
+ raise errors.ProgrammerError("Job processor returned unknown status %s" %
+ (result, ))
+
+
+class _JobQueueWorker(workerpool.BaseWorker):
+ """The actual job workers.
+
+ """
+ def RunTask(self, job): # pylint: disable=W0221
+ """Job executor.
+
+ @type job: L{_QueuedJob}
+ @param job: the job to be processed
+
+ """
+ assert job.writable, "Expected writable job"
+
+ # Ensure only one worker is active on a single job. If a job registers for
+ # a dependency job, and the other job notifies before the first worker is
+ # done, the job can end up in the tasklist more than once.
+ job.processor_lock.acquire()
+ try:
+ return self._RunTaskInner(job)
+ finally:
+ job.processor_lock.release()
+
+ def _RunTaskInner(self, job):
+ """Executes a job.
+
+ Must be called with per-job lock acquired.
+
+ """
+ queue = job.queue
+ assert queue == self.pool.queue
+
+ setname_fn = lambda op: self.SetTaskName(self._GetWorkerName(job, op))
+ setname_fn(None)
+
+ proc = mcpu.Processor(queue.context, job.id)
+
+ # Create wrapper for setting thread name
+ wrap_execop_fn = compat.partial(self._WrapExecOpCode, setname_fn,
+ proc.ExecOpCode)
+
+ _EvaluateJobProcessorResult(queue.depmgr, job,
+ _JobProcessor(queue, wrap_execop_fn, job)())
+
+ @staticmethod
+ def _WrapExecOpCode(setname_fn, execop_fn, op, *args, **kwargs):
+ """Updates the worker thread name to include a short summary of the opcode.
+
+ @param setname_fn: Callable setting worker thread name
+ @param execop_fn: Callable for executing opcode (usually
+ L{mcpu.Processor.ExecOpCode})
+
+ """
+ setname_fn(op)
+ try:
+ return execop_fn(op, *args, **kwargs)
+ finally:
+ setname_fn(None)
+
+ @staticmethod
+ def _GetWorkerName(job, op):
+ """Sets the worker thread name.
+
+ @type job: L{_QueuedJob}
+ @type op: L{opcodes.OpCode}
+
+ """
+ parts = ["Job%s" % job.id]
+
+ if op:
+ parts.append(op.TinySummary())
+
+ return "/".join(parts)
+
+
+class _JobQueueWorkerPool(workerpool.WorkerPool):
+ """Simple class implementing a job-processing workerpool.
+
+ """
+ def __init__(self, queue):
+ super(_JobQueueWorkerPool, self).__init__("Jq",
+ JOBQUEUE_THREADS,
+ _JobQueueWorker)
+ self.queue = queue
+