X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/0aeeb6e3416ef1289cf86235ca6e914658dd5cc5..def6577f00a482c310f4e20bb315fa90290ad5b7:/lib/jqueue.py?ds=sidebyside diff --git a/lib/jqueue.py b/lib/jqueue.py index d137443..110d386 100644 --- a/lib/jqueue.py +++ b/lib/jqueue.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -31,12 +31,14 @@ used by all other classes in this module. import logging import errno -import re import time import weakref +import threading +import itertools +import operator try: - # pylint: disable-msg=E0611 + # pylint: disable=E0611 from pyinotify import pyinotify except ImportError: import pyinotify @@ -55,15 +57,22 @@ from ganeti import rpc from ganeti import runtime from ganeti import netutils from ganeti import compat +from ganeti import ht +from ganeti import query +from ganeti import qlang +from ganeti import pathutils +from ganeti import vcluster JOBQUEUE_THREADS = 25 -JOBS_PER_ARCHIVE_DIRECTORY = 10000 # member lock names to be passed to @ssynchronized decorator _LOCK = "_lock" _QUEUE = "_queue" +#: Retrieves "id" attribute +_GetIdAttr = operator.attrgetter("id") + class CancelJob(Exception): """Special exception to cancel a job. @@ -71,6 +80,12 @@ class CancelJob(Exception): """ +class QueueShutdown(Exception): + """Special exception to abort a job when the job queue is shutting down. + + """ + + def TimeStampNow(): """Returns the current timestamp. @@ -81,6 +96,33 @@ def TimeStampNow(): return utils.SplitTime(time.time()) +def _CallJqUpdate(runner, names, file_name, content): + """Updates job queue file after virtualizing filename. + + """ + virt_file_name = vcluster.MakeVirtualPath(file_name) + return runner.call_jobqueue_update(names, virt_file_name, content) + + +class _SimpleJobQuery: + """Wrapper for job queries. + + Instance keeps list of fields cached, useful e.g. in L{_JobChangesChecker}. + + """ + def __init__(self, fields): + """Initializes this class. + + """ + self._query = query.Query(query.JOB_FIELDS, fields) + + def __call__(self, job): + """Executes a job query using cached field list. + + """ + return self._query.OldStyleQuery([(job.id, job)], sort_by_name=False)[0] + + class _QueuedOpCode(object): """Encapsulates an opcode object. @@ -99,7 +141,7 @@ class _QueuedOpCode(object): "__weakref__"] def __init__(self, op): - """Constructor for the _QuededOpCode. + """Initializes instances of this class. @type op: L{opcodes.OpCode} @param op: the opcode we encapsulate @@ -172,14 +214,31 @@ class _QueuedJob(object): @ivar received_timestamp: the timestamp for when the job was received @ivar start_timestmap: the timestamp for start of execution @ivar end_timestamp: the timestamp for end of execution + @ivar writable: Whether the job is allowed to be modified """ - # pylint: disable-msg=W0212 + # pylint: disable=W0212 __slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx", "received_timestamp", "start_timestamp", "end_timestamp", - "__weakref__"] + "__weakref__", "processor_lock", "writable", "archived"] + + def _AddReasons(self): + """Extend the reason trail - def __init__(self, queue, job_id, ops): + Add the reason for all the opcodes of this job to be executed. + + """ + count = 0 + for queued_op in self.ops: + op = queued_op.input + reason_src = opcodes.NameToReasonSrc(op.__class__.__name__) + reason_text = "job=%d;index=%d" % (self.id, count) + reason = getattr(op, "reason", []) + reason.append((reason_src, reason_text, utils.EpochNano())) + op.reason = reason + count = count + 1 + + def __init__(self, queue, job_id, ops, writable): """Constructor for the _QueuedJob. @type queue: L{JobQueue} @@ -189,29 +248,42 @@ class _QueuedJob(object): @type ops: list @param ops: the list of opcodes we hold, which will be encapsulated in _QueuedOpCodes + @type writable: bool + @param writable: Whether job can be modified """ if not ops: raise errors.GenericError("A job needs at least one opcode") self.queue = queue - self.id = job_id + self.id = int(job_id) self.ops = [_QueuedOpCode(op) for op in ops] + self._AddReasons() self.log_serial = 0 self.received_timestamp = TimeStampNow() self.start_timestamp = None self.end_timestamp = None + self.archived = False - self._InitInMemory(self) + self._InitInMemory(self, writable) + + assert not self.archived, "New jobs can not be marked as archived" @staticmethod - def _InitInMemory(obj): + def _InitInMemory(obj, writable): """Initializes in-memory variables. """ + obj.writable = writable obj.ops_iter = None obj.cur_opctx = None + # Read-only jobs are not processed and therefore don't need a lock + if writable: + obj.processor_lock = threading.Lock() + else: + obj.processor_lock = None + def __repr__(self): status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__), "id=%s" % self.id, @@ -220,23 +292,28 @@ class _QueuedJob(object): return "<%s at %#x>" % (" ".join(status), id(self)) @classmethod - def Restore(cls, queue, state): + def Restore(cls, queue, state, writable, archived): """Restore a _QueuedJob from serialized state: @type queue: L{JobQueue} @param queue: to which queue the restored job belongs @type state: dict @param state: the serialized state + @type writable: bool + @param writable: Whether job can be modified + @type archived: bool + @param archived: Whether job was already archived @rtype: _JobQueue @return: the restored _JobQueue instance """ obj = _QueuedJob.__new__(cls) obj.queue = queue - obj.id = state["id"] + obj.id = int(state["id"]) obj.received_timestamp = state.get("received_timestamp", None) obj.start_timestamp = state.get("start_timestamp", None) obj.end_timestamp = state.get("end_timestamp", None) + obj.archived = archived obj.ops = [] obj.log_serial = 0 @@ -246,7 +323,7 @@ class _QueuedJob(object): obj.log_serial = max(obj.log_serial, log_entry[0]) obj.ops.append(op) - cls._InitInMemory(obj) + cls._InitInMemory(obj, writable) return obj @@ -298,8 +375,8 @@ class _QueuedJob(object): if op.status == constants.OP_STATUS_QUEUED: pass - elif op.status == constants.OP_STATUS_WAITLOCK: - status = constants.JOB_STATUS_WAITLOCK + elif op.status == constants.OP_STATUS_WAITING: + status = constants.JOB_STATUS_WAITING elif op.status == constants.OP_STATUS_RUNNING: status = constants.JOB_STATUS_RUNNING elif op.status == constants.OP_STATUS_CANCELING: @@ -369,41 +446,7 @@ class _QueuedJob(object): has been passed """ - row = [] - for fname in fields: - if fname == "id": - row.append(self.id) - elif fname == "status": - row.append(self.CalcStatus()) - elif fname == "priority": - row.append(self.CalcPriority()) - elif fname == "ops": - row.append([op.input.__getstate__() for op in self.ops]) - elif fname == "opresult": - row.append([op.result for op in self.ops]) - elif fname == "opstatus": - row.append([op.status for op in self.ops]) - elif fname == "oplog": - row.append([op.log for op in self.ops]) - elif fname == "opstart": - row.append([op.start_timestamp for op in self.ops]) - elif fname == "opexec": - row.append([op.exec_timestamp for op in self.ops]) - elif fname == "opend": - row.append([op.end_timestamp for op in self.ops]) - elif fname == "oppriority": - row.append([op.priority for op in self.ops]) - elif fname == "received_ts": - row.append(self.received_timestamp) - elif fname == "start_ts": - row.append(self.start_timestamp) - elif fname == "end_ts": - row.append(self.end_timestamp) - elif fname == "summary": - row.append([op.input.Summary() for op in self.ops]) - else: - raise errors.OpExecError("Invalid self query field '%s'" % fname) - return row + return _SimpleJobQuery(fields)(self) def MarkUnfinishedOps(self, status, result): """Mark unfinished opcodes with a given status and result. @@ -425,6 +468,12 @@ class _QueuedJob(object): op.result = result not_marked = False + def Finalize(self): + """Marks the job as finalized. + + """ + self.end_timestamp = TimeStampNow() + def Cancel(self): """Marks job as canceled/-ing if possible. @@ -438,9 +487,10 @@ class _QueuedJob(object): if status == constants.JOB_STATUS_QUEUED: self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED, "Job canceled by request") + self.Finalize() return (True, "Job %s canceled" % self.id) - elif status == constants.JOB_STATUS_WAITLOCK: + elif status == constants.JOB_STATUS_WAITING: # The worker will notice the new status and cancel the job self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None) return (True, "Job %s will be canceled" % self.id) @@ -449,6 +499,50 @@ class _QueuedJob(object): logging.debug("Job %s is no longer waiting in the queue", self.id) return (False, "Job %s is no longer waiting in the queue" % self.id) + def ChangePriority(self, priority): + """Changes the job priority. + + @type priority: int + @param priority: New priority + @rtype: tuple; (bool, string) + @return: Boolean describing whether job's priority was successfully changed + and a text message + + """ + status = self.CalcStatus() + + if status in constants.JOBS_FINALIZED: + return (False, "Job %s is finished" % self.id) + elif status == constants.JOB_STATUS_CANCELING: + return (False, "Job %s is cancelling" % self.id) + else: + assert status in (constants.JOB_STATUS_QUEUED, + constants.JOB_STATUS_WAITING, + constants.JOB_STATUS_RUNNING) + + changed = False + for op in self.ops: + if (op.status == constants.OP_STATUS_RUNNING or + op.status in constants.OPS_FINALIZED): + assert not changed, \ + ("Found opcode for which priority should not be changed after" + " priority has been changed for previous opcodes") + continue + + assert op.status in (constants.OP_STATUS_QUEUED, + constants.OP_STATUS_WAITING) + + changed = True + + # Set new priority (doesn't modify opcode input) + op.priority = priority + + if changed: + return (True, ("Priorities of pending opcodes for job %s have been" + " changed to %s" % (self.id, priority))) + else: + return (False, "Job %s had no pending opcodes" % self.id) + class _OpExecCallbacks(mcpu.OpExecCbBase): def __init__(self, queue, job, op): @@ -479,6 +573,11 @@ class _OpExecCallbacks(mcpu.OpExecCbBase): logging.debug("Canceling opcode") raise CancelJob() + # See if queue is shutting down + if not self._queue.AcceptingJobsUnlocked(): + logging.debug("Queue is shutting down") + raise QueueShutdown() + @locking.ssynchronized(_QUEUE, shared=1) def NotifyStart(self): """Mark the opcode as running, not lock-waiting. @@ -486,11 +585,11 @@ class _OpExecCallbacks(mcpu.OpExecCbBase): This is called from the mcpu code as a notifier function, when the LU is finally about to start the Exec() method. Of course, to have end-user visible results, the opcode must be initially (before calling into - Processor.ExecOpCode) set to OP_STATUS_WAITLOCK. + Processor.ExecOpCode) set to OP_STATUS_WAITING. """ assert self._op in self._job.ops - assert self._op.status in (constants.OP_STATUS_WAITLOCK, + assert self._op.status in (constants.OP_STATUS_WAITING, constants.OP_STATUS_CANCELING) # Cancel here if we were asked to @@ -530,16 +629,18 @@ class _OpExecCallbacks(mcpu.OpExecCbBase): timestamp = utils.SplitTime(time.time()) self._AppendFeedback(timestamp, log_type, log_msg) - def CheckCancel(self): - """Check whether job has been cancelled. + def CurrentPriority(self): + """Returns current priority for opcode. """ - assert self._op.status in (constants.OP_STATUS_WAITLOCK, + assert self._op.status in (constants.OP_STATUS_WAITING, constants.OP_STATUS_CANCELING) # Cancel here if we were asked to self._CheckCancel() + return self._op.priority + def SubmitManyJobs(self, jobs): """Submits jobs for processing. @@ -562,7 +663,7 @@ class _JobChangesChecker(object): @param prev_log_serial: previous job serial, as passed by the LUXI client """ - self._fields = fields + self._squery = _SimpleJobQuery(fields) self._prev_job_info = prev_job_info self._prev_log_serial = prev_log_serial @@ -573,8 +674,10 @@ class _JobChangesChecker(object): @param job: Job object """ + assert not job.writable, "Expected read-only job" + status = job.CalcStatus() - job_info = job.GetInfo(self._fields) + job_info = self._squery(job) log_entries = job.GetLogEntries(self._prev_log_serial) # Serializing and deserializing data can cause type changes (e.g. from @@ -593,7 +696,7 @@ class _JobChangesChecker(object): # no changes. if (status not in (constants.JOB_STATUS_QUEUED, constants.JOB_STATUS_RUNNING, - constants.JOB_STATUS_WAITLOCK) or + constants.JOB_STATUS_WAITING) or job_info != self._prev_job_info or (log_entries and self._prev_log_serial != log_entries[0][0])): logging.debug("Job %s changed", job.id) @@ -603,7 +706,7 @@ class _JobChangesChecker(object): class _JobFileChangesWaiter(object): - def __init__(self, filename): + def __init__(self, filename, _inotify_wm_cls=pyinotify.WatchManager): """Initializes this class. @type filename: string @@ -611,7 +714,7 @@ class _JobFileChangesWaiter(object): @raises errors.InotifyError: if the notifier cannot be setup """ - self._wm = pyinotify.WatchManager() + self._wm = _inotify_wm_cls() self._inotify_handler = \ asyncnotifier.SingleFileEventHandler(self._wm, self._OnInotify, filename) self._notifier = \ @@ -653,7 +756,7 @@ class _JobFileChangesWaiter(object): class _JobChangesWaiter(object): - def __init__(self, filename): + def __init__(self, filename, _waiter_cls=_JobFileChangesWaiter): """Initializes this class. @type filename: string @@ -662,6 +765,7 @@ class _JobChangesWaiter(object): """ self._filewaiter = None self._filename = filename + self._waiter_cls = _waiter_cls def Wait(self, timeout): """Waits for a job to change. @@ -678,7 +782,7 @@ class _JobChangesWaiter(object): # If this point is reached, return immediately and let caller check the job # file again in case there were changes since the last check. This avoids a # race condition. - self._filewaiter = _JobFileChangesWaiter(self._filename) + self._filewaiter = self._waiter_cls(self._filename) return True @@ -698,7 +802,13 @@ class _WaitForJobChangesHelper(object): """ @staticmethod - def _CheckForChanges(job_load_fn, check_fn): + def _CheckForChanges(counter, job_load_fn, check_fn): + if counter.next() > 0: + # If this isn't the first check the job is given some more time to change + # again. This gives better performance for jobs generating many + # changes/messages. + time.sleep(0.1) + job = job_load_fn() if not job: raise errors.JobLost() @@ -710,7 +820,8 @@ class _WaitForJobChangesHelper(object): return result def __call__(self, filename, job_load_fn, - fields, prev_job_info, prev_log_serial, timeout): + fields, prev_job_info, prev_log_serial, timeout, + _waiter_cls=_JobChangesWaiter): """Waits for changes on a job. @type filename: string @@ -727,17 +838,18 @@ class _WaitForJobChangesHelper(object): @param timeout: maximum time to wait in seconds """ + counter = itertools.count() try: check_fn = _JobChangesChecker(fields, prev_job_info, prev_log_serial) - waiter = _JobChangesWaiter(filename) + waiter = _waiter_cls(filename) try: return utils.Retry(compat.partial(self._CheckForChanges, - job_load_fn, check_fn), + counter, job_load_fn, check_fn), utils.RETRY_REMAINING_TIME, timeout, wait_fn=waiter.Wait) finally: waiter.Close() - except (errors.InotifyError, errors.JobLost): + except errors.JobLost: return None except utils.RetryTimeout: return constants.JOB_NOTCHANGED @@ -797,6 +909,12 @@ class _OpExecContext: self.log_prefix = log_prefix self.summary = op.input.Summary() + # Create local copy to modify + if getattr(op.input, opcodes.DEPEND_ATTR, None): + self.jobdeps = op.input.depends[:] + else: + self.jobdeps = None + self._timeout_strategy_factory = timeout_strategy_factory self._ResetTimeoutStrategy() @@ -834,6 +952,10 @@ class _OpExecContext: class _JobProcessor(object): + (DEFER, + WAITDEP, + FINISHED) = range(1, 4) + def __init__(self, queue, opexec_fn, job, _timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy): """Initializes this class. @@ -874,21 +996,15 @@ class _JobProcessor(object): opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)), timeout_strategy_factory) - if op.status == constants.OP_STATUS_CANCELED: - # Cancelled jobs are handled by the caller - assert not compat.any(i.status != constants.OP_STATUS_CANCELED - for i in job.ops[idx:]) - - elif op.status in constants.OPS_FINALIZED: - # This is a job that was partially completed before master daemon - # shutdown, so it can be expected that some opcodes are already - # completed successfully (if any did error out, then the whole job - # should have been aborted and not resubmitted for processing). - logging.info("%s: opcode %s already processed, skipping", - opctx.log_prefix, opctx.summary) - continue + if op.status not in constants.OPS_FINALIZED: + return opctx - return opctx + # This is a job that was partially completed before master daemon + # shutdown, so it can be expected that some opcodes are already + # completed successfully (if any did error out, then the whole job + # should have been aborted and not resubmitted for processing). + logging.info("%s: opcode %s already processed, skipping", + opctx.log_prefix, opctx.summary) @staticmethod def _MarkWaitlock(job, op): @@ -904,14 +1020,14 @@ class _JobProcessor(object): """ assert op in job.ops assert op.status in (constants.OP_STATUS_QUEUED, - constants.OP_STATUS_WAITLOCK) + constants.OP_STATUS_WAITING) update = False op.result = None if op.status == constants.OP_STATUS_QUEUED: - op.status = constants.OP_STATUS_WAITLOCK + op.status = constants.OP_STATUS_WAITING update = True if op.start_timestamp is None: @@ -922,17 +1038,73 @@ class _JobProcessor(object): job.start_timestamp = op.start_timestamp update = True - assert op.status == constants.OP_STATUS_WAITLOCK + assert op.status == constants.OP_STATUS_WAITING return update + @staticmethod + def _CheckDependencies(queue, job, opctx): + """Checks if an opcode has dependencies and if so, processes them. + + @type queue: L{JobQueue} + @param queue: Queue object + @type job: L{_QueuedJob} + @param job: Job object + @type opctx: L{_OpExecContext} + @param opctx: Opcode execution context + @rtype: bool + @return: Whether opcode will be re-scheduled by dependency tracker + + """ + op = opctx.op + + result = False + + while opctx.jobdeps: + (dep_job_id, dep_status) = opctx.jobdeps[0] + + (depresult, depmsg) = queue.depmgr.CheckAndRegister(job, dep_job_id, + dep_status) + assert ht.TNonEmptyString(depmsg), "No dependency message" + + logging.info("%s: %s", opctx.log_prefix, depmsg) + + if depresult == _JobDependencyManager.CONTINUE: + # Remove dependency and continue + opctx.jobdeps.pop(0) + + elif depresult == _JobDependencyManager.WAIT: + # Need to wait for notification, dependency tracker will re-add job + # to workerpool + result = True + break + + elif depresult == _JobDependencyManager.CANCEL: + # Job was cancelled, cancel this job as well + job.Cancel() + assert op.status == constants.OP_STATUS_CANCELING + break + + elif depresult in (_JobDependencyManager.WRONGSTATUS, + _JobDependencyManager.ERROR): + # Job failed or there was an error, this job must fail + op.status = constants.OP_STATUS_ERROR + op.result = _EncodeOpError(errors.OpExecError(depmsg)) + break + + else: + raise errors.ProgrammerError("Unknown dependency result '%s'" % + depresult) + + return result + def _ExecOpCodeUnlocked(self, opctx): """Processes one opcode and returns the result. """ op = opctx.op - assert op.status == constants.OP_STATUS_WAITLOCK + assert op.status == constants.OP_STATUS_WAITING timeout = opctx.GetNextLockTimeout() @@ -940,25 +1112,38 @@ class _JobProcessor(object): # Make sure not to hold queue lock while calling ExecOpCode result = self.opexec_fn(op.input, _OpExecCallbacks(self.queue, self.job, op), - timeout=timeout, priority=op.priority) + timeout=timeout) except mcpu.LockAcquireTimeout: assert timeout is not None, "Received timeout for blocking acquire" logging.debug("Couldn't acquire locks in %0.6fs", timeout) - assert op.status in (constants.OP_STATUS_WAITLOCK, + assert op.status in (constants.OP_STATUS_WAITING, constants.OP_STATUS_CANCELING) # Was job cancelled while we were waiting for the lock? if op.status == constants.OP_STATUS_CANCELING: return (constants.OP_STATUS_CANCELING, None) + # Queue is shutting down, return to queued + if not self.queue.AcceptingJobsUnlocked(): + return (constants.OP_STATUS_QUEUED, None) + # Stay in waitlock while trying to re-acquire lock - return (constants.OP_STATUS_WAITLOCK, None) + return (constants.OP_STATUS_WAITING, None) except CancelJob: logging.exception("%s: Canceling job", opctx.log_prefix) assert op.status == constants.OP_STATUS_CANCELING return (constants.OP_STATUS_CANCELING, None) - except Exception, err: # pylint: disable-msg=W0703 + + except QueueShutdown: + logging.exception("%s: Queue is shutting down", opctx.log_prefix) + + assert op.status == constants.OP_STATUS_WAITING + + # Job hadn't been started yet, so it should return to the queue + return (constants.OP_STATUS_QUEUED, None) + + except Exception, err: # pylint: disable=W0703 logging.exception("%s: Caught exception in %s", opctx.log_prefix, opctx.summary) return (constants.OP_STATUS_ERROR, _EncodeOpError(err)) @@ -971,9 +1156,9 @@ class _JobProcessor(object): """Continues execution of a job. @param _nextop_fn: Callback function for tests - @rtype: bool - @return: True if job is finished, False if processor needs to be called - again + @return: C{FINISHED} if job is fully processed, C{DEFER} if the job should + be deferred and C{WAITDEP} if the dependency manager + (L{_JobDependencyManager}) will re-schedule the job when appropriate """ queue = self.queue @@ -985,6 +1170,12 @@ class _JobProcessor(object): try: opcount = len(job.ops) + assert job.writable, "Expected writable job" + + # Don't do anything for finalized jobs + if job.CalcStatus() in constants.JOBS_FINALIZED: + return self.FINISHED + # Is a previous opcode still pending? if job.cur_opctx: opctx = job.cur_opctx @@ -998,46 +1189,61 @@ class _JobProcessor(object): # Consistency check assert compat.all(i.status in (constants.OP_STATUS_QUEUED, - constants.OP_STATUS_CANCELING, - constants.OP_STATUS_CANCELED) + constants.OP_STATUS_CANCELING) for i in job.ops[opctx.index + 1:]) assert op.status in (constants.OP_STATUS_QUEUED, - constants.OP_STATUS_WAITLOCK, - constants.OP_STATUS_CANCELING, - constants.OP_STATUS_CANCELED) + constants.OP_STATUS_WAITING, + constants.OP_STATUS_CANCELING) assert (op.priority <= constants.OP_PRIO_LOWEST and op.priority >= constants.OP_PRIO_HIGHEST) - if op.status not in (constants.OP_STATUS_CANCELING, - constants.OP_STATUS_CANCELED): + waitjob = None + + if op.status != constants.OP_STATUS_CANCELING: assert op.status in (constants.OP_STATUS_QUEUED, - constants.OP_STATUS_WAITLOCK) + constants.OP_STATUS_WAITING) # Prepare to start opcode if self._MarkWaitlock(job, op): # Write to disk queue.UpdateJobUnlocked(job) - assert op.status == constants.OP_STATUS_WAITLOCK - assert job.CalcStatus() == constants.JOB_STATUS_WAITLOCK + assert op.status == constants.OP_STATUS_WAITING + assert job.CalcStatus() == constants.JOB_STATUS_WAITING assert job.start_timestamp and op.start_timestamp + assert waitjob is None - logging.info("%s: opcode %s waiting for locks", - opctx.log_prefix, opctx.summary) + # Check if waiting for a job is necessary + waitjob = self._CheckDependencies(queue, job, opctx) - queue.release() - try: - (op_status, op_result) = self._ExecOpCodeUnlocked(opctx) - finally: - queue.acquire(shared=1) + assert op.status in (constants.OP_STATUS_WAITING, + constants.OP_STATUS_CANCELING, + constants.OP_STATUS_ERROR) + + if not (waitjob or op.status in (constants.OP_STATUS_CANCELING, + constants.OP_STATUS_ERROR)): + logging.info("%s: opcode %s waiting for locks", + opctx.log_prefix, opctx.summary) - op.status = op_status - op.result = op_result + assert not opctx.jobdeps, "Not all dependencies were removed" - if op.status == constants.OP_STATUS_WAITLOCK: - # Couldn't get locks in time + queue.release() + try: + (op_status, op_result) = self._ExecOpCodeUnlocked(opctx) + finally: + queue.acquire(shared=1) + + op.status = op_status + op.result = op_result + + assert not waitjob + + if op.status in (constants.OP_STATUS_WAITING, + constants.OP_STATUS_QUEUED): + # waiting: Couldn't get locks in time + # queued: Queue is shutting down assert not op.end_timestamp else: # Finalize opcode @@ -1049,10 +1255,22 @@ class _JobProcessor(object): else: assert op.status in constants.OPS_FINALIZED - if op.status == constants.OP_STATUS_WAITLOCK: + if op.status == constants.OP_STATUS_QUEUED: + # Queue is shutting down + assert not waitjob + + finalize = False + + # Reset context + job.cur_opctx = None + + # In no case must the status be finalized here + assert job.CalcStatus() == constants.JOB_STATUS_QUEUED + + elif op.status == constants.OP_STATUS_WAITING or waitjob: finalize = False - if opctx.CheckPriorityIncrease(): + if not waitjob and opctx.CheckPriorityIncrease(): # Priority was changed, need to update on-disk file queue.UpdateJobUnlocked(job) @@ -1063,7 +1281,7 @@ class _JobProcessor(object): op.priority >= constants.OP_PRIO_HIGHEST) # In no case must the status be finalized here - assert job.CalcStatus() == constants.JOB_STATUS_WAITLOCK + assert job.CalcStatus() == constants.JOB_STATUS_WAITING else: # Ensure all opcodes so far have been successful @@ -1096,44 +1314,89 @@ class _JobProcessor(object): "Job canceled by request") finalize = True - elif op.status == constants.OP_STATUS_CANCELED: - finalize = True - else: raise errors.ProgrammerError("Unknown status '%s'" % op.status) - # Finalizing or last opcode? - if finalize or opctx.index == (opcount - 1): + if opctx.index == (opcount - 1): + # Finalize on last opcode + finalize = True + + if finalize: # All opcodes have been run, finalize job - job.end_timestamp = TimeStampNow() + job.Finalize() # Write to disk. If the job status is final, this is the final write # allowed. Once the file has been written, it can be archived anytime. queue.UpdateJobUnlocked(job) - if finalize or opctx.index == (opcount - 1): + assert not waitjob + + if finalize: logging.info("Finished job %s, status = %s", job.id, job.CalcStatus()) - return True + return self.FINISHED - return False + assert not waitjob or queue.depmgr.JobWaiting(job) + + if waitjob: + return self.WAITDEP + else: + return self.DEFER finally: + assert job.writable, "Job became read-only while being processed" queue.release() +def _EvaluateJobProcessorResult(depmgr, job, result): + """Looks at a result from L{_JobProcessor} for a job. + + To be used in a L{_JobQueueWorker}. + + """ + if result == _JobProcessor.FINISHED: + # Notify waiting jobs + depmgr.NotifyWaiters(job.id) + + elif result == _JobProcessor.DEFER: + # Schedule again + raise workerpool.DeferTask(priority=job.CalcPriority()) + + elif result == _JobProcessor.WAITDEP: + # No-op, dependency manager will re-schedule + pass + + else: + raise errors.ProgrammerError("Job processor returned unknown status %s" % + (result, )) + + class _JobQueueWorker(workerpool.BaseWorker): """The actual job workers. """ - def RunTask(self, job): # pylint: disable-msg=W0221 + def RunTask(self, job): # pylint: disable=W0221 """Job executor. - This functions processes a job. It is closely tied to the L{_QueuedJob} and - L{_QueuedOpCode} classes. - @type job: L{_QueuedJob} @param job: the job to be processed """ + assert job.writable, "Expected writable job" + + # Ensure only one worker is active on a single job. If a job registers for + # a dependency job, and the other job notifies before the first worker is + # done, the job can end up in the tasklist more than once. + job.processor_lock.acquire() + try: + return self._RunTaskInner(job) + finally: + job.processor_lock.release() + + def _RunTaskInner(self, job): + """Executes a job. + + Must be called with per-job lock acquired. + + """ queue = job.queue assert queue == self.pool.queue @@ -1146,9 +1409,8 @@ class _JobQueueWorker(workerpool.BaseWorker): wrap_execop_fn = compat.partial(self._WrapExecOpCode, setname_fn, proc.ExecOpCode) - if not _JobProcessor(queue, wrap_execop_fn, job)(): - # Schedule again - raise workerpool.DeferTask(priority=job.CalcPriority()) + _EvaluateJobProcessorResult(queue.depmgr, job, + _JobProcessor(queue, wrap_execop_fn, job)()) @staticmethod def _WrapExecOpCode(setname_fn, execop_fn, op, *args, **kwargs): @@ -1192,6 +1454,142 @@ class _JobQueueWorkerPool(workerpool.WorkerPool): self.queue = queue +class _JobDependencyManager: + """Keeps track of job dependencies. + + """ + (WAIT, + ERROR, + CANCEL, + CONTINUE, + WRONGSTATUS) = range(1, 6) + + def __init__(self, getstatus_fn, enqueue_fn): + """Initializes this class. + + """ + self._getstatus_fn = getstatus_fn + self._enqueue_fn = enqueue_fn + + self._waiters = {} + self._lock = locking.SharedLock("JobDepMgr") + + @locking.ssynchronized(_LOCK, shared=1) + def GetLockInfo(self, requested): # pylint: disable=W0613 + """Retrieves information about waiting jobs. + + @type requested: set + @param requested: Requested information, see C{query.LQ_*} + + """ + # No need to sort here, that's being done by the lock manager and query + # library. There are no priorities for notifying jobs, hence all show up as + # one item under "pending". + return [("job/%s" % job_id, None, None, + [("job", [job.id for job in waiters])]) + for job_id, waiters in self._waiters.items() + if waiters] + + @locking.ssynchronized(_LOCK, shared=1) + def JobWaiting(self, job): + """Checks if a job is waiting. + + """ + return compat.any(job in jobs + for jobs in self._waiters.values()) + + @locking.ssynchronized(_LOCK) + def CheckAndRegister(self, job, dep_job_id, dep_status): + """Checks if a dependency job has the requested status. + + If the other job is not yet in a finalized status, the calling job will be + notified (re-added to the workerpool) at a later point. + + @type job: L{_QueuedJob} + @param job: Job object + @type dep_job_id: int + @param dep_job_id: ID of dependency job + @type dep_status: list + @param dep_status: Required status + + """ + assert ht.TJobId(job.id) + assert ht.TJobId(dep_job_id) + assert ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))(dep_status) + + if job.id == dep_job_id: + return (self.ERROR, "Job can't depend on itself") + + # Get status of dependency job + try: + status = self._getstatus_fn(dep_job_id) + except errors.JobLost, err: + return (self.ERROR, "Dependency error: %s" % err) + + assert status in constants.JOB_STATUS_ALL + + job_id_waiters = self._waiters.setdefault(dep_job_id, set()) + + if status not in constants.JOBS_FINALIZED: + # Register for notification and wait for job to finish + job_id_waiters.add(job) + return (self.WAIT, + "Need to wait for job %s, wanted status '%s'" % + (dep_job_id, dep_status)) + + # Remove from waiters list + if job in job_id_waiters: + job_id_waiters.remove(job) + + if (status == constants.JOB_STATUS_CANCELED and + constants.JOB_STATUS_CANCELED not in dep_status): + return (self.CANCEL, "Dependency job %s was cancelled" % dep_job_id) + + elif not dep_status or status in dep_status: + return (self.CONTINUE, + "Dependency job %s finished with status '%s'" % + (dep_job_id, status)) + + else: + return (self.WRONGSTATUS, + "Dependency job %s finished with status '%s'," + " not one of '%s' as required" % + (dep_job_id, status, utils.CommaJoin(dep_status))) + + def _RemoveEmptyWaitersUnlocked(self): + """Remove all jobs without actual waiters. + + """ + for job_id in [job_id for (job_id, waiters) in self._waiters.items() + if not waiters]: + del self._waiters[job_id] + + def NotifyWaiters(self, job_id): + """Notifies all jobs waiting for a certain job ID. + + @attention: Do not call until L{CheckAndRegister} returned a status other + than C{WAITDEP} for C{job_id}, or behaviour is undefined + @type job_id: int + @param job_id: Job ID + + """ + assert ht.TJobId(job_id) + + self._lock.acquire() + try: + self._RemoveEmptyWaitersUnlocked() + + jobs = self._waiters.pop(job_id, None) + finally: + self._lock.release() + + if jobs: + # Re-add jobs to workerpool + logging.debug("Re-adding %s jobs which were waiting for job %s", + len(jobs), job_id) + self._enqueue_fn(jobs) + + def _RequireOpenQueue(fn): """Decorator for "public" functions. @@ -1211,20 +1609,41 @@ def _RequireOpenQueue(fn): """ def wrapper(self, *args, **kwargs): - # pylint: disable-msg=W0212 + # pylint: disable=W0212 assert self._queue_filelock is not None, "Queue should be open" return fn(self, *args, **kwargs) return wrapper -class JobQueue(object): - """Queue used to manage the jobs. +def _RequireNonDrainedQueue(fn): + """Decorator checking for a non-drained queue. - @cvar _RE_JOB_FILE: regex matching the valid job file names + To be used with functions submitting new jobs. """ - _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE) + def wrapper(self, *args, **kwargs): + """Wrapper function. + @raise errors.JobQueueDrainError: if the job queue is marked for draining + + """ + # Ok when sharing the big job queue lock, as the drain file is created when + # the lock is exclusive. + # Needs access to protected member, pylint: disable=W0212 + if self._drained: + raise errors.JobQueueDrainError("Job queue is drained, refusing job") + + if not self._accepting_jobs: + raise errors.JobQueueError("Job queue is shutting down, refusing job") + + return fn(self, *args, **kwargs) + return wrapper + + +class JobQueue(object): + """Queue used to manage the jobs. + + """ def __init__(self, context): """Constructor for JobQueue. @@ -1252,6 +1671,9 @@ class JobQueue(object): self.acquire = self._lock.acquire self.release = self._lock.release + # Accept jobs by default + self._accepting_jobs = True + # Initialize the queue, and acquire the filelock. # This ensures no other process is working on the job queue. self._queue_filelock = jstore.InitAndVerifyQueue(must_lock=True) @@ -1271,10 +1693,16 @@ class JobQueue(object): # TODO: Check consistency across nodes - self._queue_size = 0 + self._queue_size = None self._UpdateQueueSizeUnlocked() + assert ht.TInt(self._queue_size) self._drained = jstore.CheckDrainFlag() + # Job dependencies + self.depmgr = _JobDependencyManager(self._GetJobStatusForDependencies, + self._EnqueueJobs) + self.context.glm.AddToLockMonitor(self.depmgr) + # Setup worker pool self._wpool = _JobQueueWorkerPool(self) try: @@ -1319,26 +1747,33 @@ class JobQueue(object): restartjobs.append(job) elif status in (constants.JOB_STATUS_RUNNING, - constants.JOB_STATUS_WAITLOCK, + constants.JOB_STATUS_WAITING, constants.JOB_STATUS_CANCELING): logging.warning("Unfinished job %s found: %s", job.id, job) - if status == constants.JOB_STATUS_WAITLOCK: + if status == constants.JOB_STATUS_WAITING: # Restart job job.MarkUnfinishedOps(constants.OP_STATUS_QUEUED, None) restartjobs.append(job) else: job.MarkUnfinishedOps(constants.OP_STATUS_ERROR, "Unclean master daemon shutdown") + job.Finalize() self.UpdateJobUnlocked(job) if restartjobs: logging.info("Restarting %s jobs", len(restartjobs)) - self._EnqueueJobs(restartjobs) + self._EnqueueJobsUnlocked(restartjobs) logging.info("Job queue inspection finished") + def _GetRpc(self, address_list): + """Gets RPC runner with context. + + """ + return rpc.JobQueueRunner(self.context, address_list) + @locking.ssynchronized(_LOCK) @_RequireOpenQueue def AddNode(self, node): @@ -1352,7 +1787,7 @@ class JobQueue(object): assert node_name != self._my_hostname # Clean queue directory on added node - result = rpc.RpcRunner.call_jobqueue_purge(node_name) + result = self._GetRpc(None).call_jobqueue_purge(node_name) msg = result.fail_msg if msg: logging.warning("Cannot cleanup queue directory on node %s: %s", @@ -1368,20 +1803,31 @@ class JobQueue(object): files = [self._GetJobPath(job_id) for job_id in self._GetJobIDsUnlocked()] # Upload current serial file - files.append(constants.JOB_QUEUE_SERIAL_FILE) + files.append(pathutils.JOB_QUEUE_SERIAL_FILE) + + # Static address list + addrs = [node.primary_ip] for file_name in files: # Read file content content = utils.ReadFile(file_name) - result = rpc.RpcRunner.call_jobqueue_update([node_name], - [node.primary_ip], - file_name, content) + result = _CallJqUpdate(self._GetRpc(addrs), [node_name], + file_name, content) msg = result[node_name].fail_msg if msg: logging.error("Failed to upload file %s to node %s: %s", file_name, node_name, msg) + # Set queue drained flag + result = \ + self._GetRpc(addrs).call_jobqueue_set_drain_flag([node_name], + self._drained) + msg = result[node_name].fail_msg + if msg: + logging.error("Failed to set queue drained flag on node %s: %s", + node_name, msg) + self._nodes[node_name] = node.primary_ip @locking.ssynchronized(_LOCK) @@ -1456,11 +1902,12 @@ class JobQueue(object): """ getents = runtime.GetEnts() utils.WriteFile(file_name, data=data, uid=getents.masterd_uid, - gid=getents.masterd_gid) + gid=getents.daemons_gid, + mode=constants.JOB_QUEUE_FILES_PERMS) if replicate: names, addrs = self._GetNodeIp() - result = rpc.RpcRunner.call_jobqueue_update(names, addrs, file_name, data) + result = _CallJqUpdate(self._GetRpc(addrs), names, file_name, data) self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name) def _RenameFilesUnlocked(self, rename): @@ -1479,42 +1926,9 @@ class JobQueue(object): # ... and on all nodes names, addrs = self._GetNodeIp() - result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename) + result = self._GetRpc(addrs).call_jobqueue_rename(names, rename) self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename) - @staticmethod - def _FormatJobID(job_id): - """Convert a job ID to string format. - - Currently this just does C{str(job_id)} after performing some - checks, but if we want to change the job id format this will - abstract this change. - - @type job_id: int or long - @param job_id: the numeric job id - @rtype: str - @return: the formatted job id - - """ - if not isinstance(job_id, (int, long)): - raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id) - if job_id < 0: - raise errors.ProgrammerError("Job ID %s is negative" % job_id) - - return str(job_id) - - @classmethod - def _GetArchiveDirectory(cls, job_id): - """Returns the archive directory for a job. - - @type job_id: str - @param job_id: Job identifier - @rtype: str - @return: Directory name - - """ - return str(int(job_id) / JOBS_PER_ARCHIVE_DIRECTORY) - def _NewSerialsUnlocked(self, count): """Generates a new job identifier. @@ -1522,23 +1936,27 @@ class JobQueue(object): @type count: integer @param count: how many serials to return - @rtype: str - @return: a string representing the job identifier. + @rtype: list of int + @return: a list of job identifiers. """ - assert count > 0 + assert ht.TNonNegativeInt(count) + # New number serial = self._last_serial + count # Write to file - self._UpdateJobQueueFile(constants.JOB_QUEUE_SERIAL_FILE, + self._UpdateJobQueueFile(pathutils.JOB_QUEUE_SERIAL_FILE, "%s\n" % serial, True) - result = [self._FormatJobID(v) - for v in range(self._last_serial, serial + 1)] + result = [jstore.FormatJobID(v) + for v in range(self._last_serial + 1, serial + 1)] + # Keep it only if we were able to write the file self._last_serial = serial + assert len(result) == count + return result @staticmethod @@ -1551,10 +1969,10 @@ class JobQueue(object): @return: the path to the job file """ - return utils.PathJoin(constants.QUEUE_DIR, "job-%s" % job_id) + return utils.PathJoin(pathutils.QUEUE_DIR, "job-%s" % job_id) - @classmethod - def _GetArchivedJobPath(cls, job_id): + @staticmethod + def _GetArchivedJobPath(job_id): """Returns the archived job file for a give job id. @type job_id: str @@ -1563,10 +1981,30 @@ class JobQueue(object): @return: the path to the archived job file """ - return utils.PathJoin(constants.JOB_QUEUE_ARCHIVE_DIR, - cls._GetArchiveDirectory(job_id), "job-%s" % job_id) + return utils.PathJoin(pathutils.JOB_QUEUE_ARCHIVE_DIR, + jstore.GetArchiveDirectory(job_id), + "job-%s" % job_id) + + @staticmethod + def _DetermineJobDirectories(archived): + """Build list of directories containing job files. + + @type archived: bool + @param archived: Whether to include directories for archived jobs + @rtype: list + + """ + result = [pathutils.QUEUE_DIR] + + if archived: + archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR + result.extend(map(compat.partial(utils.PathJoin, archive_path), + utils.ListVisibleFiles(archive_path))) - def _GetJobIDsUnlocked(self, sort=True): + return result + + @classmethod + def _GetJobIDsUnlocked(cls, sort=True, archived=False): """Return all known job IDs. The method only looks at disk because it's a requirement that all @@ -1580,12 +2018,15 @@ class JobQueue(object): """ jlist = [] - for filename in utils.ListVisibleFiles(constants.QUEUE_DIR): - m = self._RE_JOB_FILE.match(filename) - if m: - jlist.append(m.group(1)) + + for path in cls._DetermineJobDirectories(archived): + for filename in utils.ListVisibleFiles(path): + m = constants.JOB_FILE_RE.match(filename) + if m: + jlist.append(int(m.group(1))) + if sort: - jlist = utils.NiceSort(jlist) + jlist.sort() return jlist def _LoadJobUnlocked(self, job_id): @@ -1595,6 +2036,7 @@ class JobQueue(object): existing, or try to load the job from the disk. If loading from disk, it will also add the job to the cache. + @type job_id: int @param job_id: the job id @rtype: L{_QueuedJob} or None @return: either None or the job object @@ -1603,10 +2045,11 @@ class JobQueue(object): job = self._memcache.get(job_id, None) if job: logging.debug("Found job %s in memcache", job_id) + assert job.writable, "Found read-only job in memcache" return job try: - job = self._LoadJobFromDisk(job_id) + job = self._LoadJobFromDisk(job_id, False) if job is None: return job except errors.JobFileCorrupted: @@ -1621,53 +2064,75 @@ class JobQueue(object): self._RenameFilesUnlocked([(old_path, new_path)]) return None + assert job.writable, "Job just loaded is not writable" + self._memcache[job_id] = job logging.debug("Added job %s to the cache", job_id) return job - def _LoadJobFromDisk(self, job_id): + def _LoadJobFromDisk(self, job_id, try_archived, writable=None): """Load the given job file from disk. Given a job file, read, load and restore it in a _QueuedJob format. - @type job_id: string + @type job_id: int @param job_id: job identifier + @type try_archived: bool + @param try_archived: Whether to try loading an archived job @rtype: L{_QueuedJob} or None @return: either None or the job object """ - filepath = self._GetJobPath(job_id) - logging.debug("Loading job from %s", filepath) - try: - raw_data = utils.ReadFile(filepath) - except EnvironmentError, err: - if err.errno in (errno.ENOENT, ): - return None - raise + path_functions = [(self._GetJobPath, False)] + + if try_archived: + path_functions.append((self._GetArchivedJobPath, True)) + + raw_data = None + archived = None + + for (fn, archived) in path_functions: + filepath = fn(job_id) + logging.debug("Loading job from %s", filepath) + try: + raw_data = utils.ReadFile(filepath) + except EnvironmentError, err: + if err.errno != errno.ENOENT: + raise + else: + break + + if not raw_data: + return None + + if writable is None: + writable = not archived try: data = serializer.LoadJson(raw_data) - job = _QueuedJob.Restore(self, data) - except Exception, err: # pylint: disable-msg=W0703 + job = _QueuedJob.Restore(self, data, writable, archived) + except Exception, err: # pylint: disable=W0703 raise errors.JobFileCorrupted(err) return job - def SafeLoadJobFromDisk(self, job_id): + def SafeLoadJobFromDisk(self, job_id, try_archived, writable=None): """Load the given job file from disk. Given a job file, read, load and restore it in a _QueuedJob format. In case of error reading the job, it gets returned as None, and the exception is logged. - @type job_id: string + @type job_id: int @param job_id: job identifier + @type try_archived: bool + @param try_archived: Whether to try loading an archived job @rtype: L{_QueuedJob} or None @return: either None or the job object """ try: - return self._LoadJobFromDisk(job_id) + return self._LoadJobFromDisk(job_id, try_archived, writable=writable) except (errors.JobFileCorrupted, EnvironmentError): logging.exception("Can't load/parse job %s", job_id) return None @@ -1687,10 +2152,18 @@ class JobQueue(object): @param drain_flag: Whether to set or unset the drain flag """ + # Change flag locally jstore.SetDrainFlag(drain_flag) self._drained = drain_flag + # ... and on all nodes + (names, addrs) = self._GetNodeIp() + result = \ + self._GetRpc(addrs).call_jobqueue_set_drain_flag(names, drain_flag) + self._CheckRpcResult(result, self._nodes, + "Setting queue drain flag to %s" % drain_flag) + return True @_RequireOpenQueue @@ -1706,28 +2179,30 @@ class JobQueue(object): @param ops: The list of OpCodes that will become the new job. @rtype: L{_QueuedJob} @return: the job object to be queued - @raise errors.JobQueueDrainError: if the job queue is marked for draining @raise errors.JobQueueFull: if the job queue has too many jobs in it @raise errors.GenericError: If an opcode is not valid """ - # Ok when sharing the big job queue lock, as the drain file is created when - # the lock is exclusive. - if self._drained: - raise errors.JobQueueDrainError("Job queue is drained, refusing job") - if self._queue_size >= constants.JOB_QUEUE_SIZE_HARD_LIMIT: raise errors.JobQueueFull() - job = _QueuedJob(self, job_id, ops) + job = _QueuedJob(self, job_id, ops, True) - # Check priority for idx, op in enumerate(job.ops): + # Check priority if op.priority not in constants.OP_PRIO_SUBMIT_VALID: allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID) raise errors.GenericError("Opcode %s has invalid priority %s, allowed" " are %s" % (idx, op.priority, allowed)) + # Check job dependencies + dependencies = getattr(op.input, opcodes.DEPEND_ATTR, None) + if not opcodes.TNoRelativeJobDependencies(dependencies): + raise errors.GenericError("Opcode %s has invalid dependencies, must" + " match %s: %s" % + (idx, opcodes.TNoRelativeJobDependencies, + dependencies)) + # Write to disk self.UpdateJobUnlocked(job) @@ -1740,42 +2215,115 @@ class JobQueue(object): @locking.ssynchronized(_LOCK) @_RequireOpenQueue + @_RequireNonDrainedQueue def SubmitJob(self, ops): """Create and store a new job. @see: L{_SubmitJobUnlocked} """ - job_id = self._NewSerialsUnlocked(1)[0] - self._EnqueueJobs([self._SubmitJobUnlocked(job_id, ops)]) + (job_id, ) = self._NewSerialsUnlocked(1) + self._EnqueueJobsUnlocked([self._SubmitJobUnlocked(job_id, ops)]) return job_id @locking.ssynchronized(_LOCK) @_RequireOpenQueue + @_RequireNonDrainedQueue def SubmitManyJobs(self, jobs): """Create and store multiple jobs. @see: L{_SubmitJobUnlocked} """ - results = [] - added_jobs = [] all_job_ids = self._NewSerialsUnlocked(len(jobs)) - for job_id, ops in zip(all_job_ids, jobs): - try: - added_jobs.append(self._SubmitJobUnlocked(job_id, ops)) - status = True - data = job_id - except errors.GenericError, err: - data = ("%s; opcodes %s" % - (err, utils.CommaJoin(op.Summary() for op in ops))) - status = False - results.append((status, data)) - self._EnqueueJobs(added_jobs) + (results, added_jobs) = \ + self._SubmitManyJobsUnlocked(jobs, all_job_ids, []) + + self._EnqueueJobsUnlocked(added_jobs) return results + @staticmethod + def _FormatSubmitError(msg, ops): + """Formats errors which occurred while submitting a job. + + """ + return ("%s; opcodes %s" % + (msg, utils.CommaJoin(op.Summary() for op in ops))) + + @staticmethod + def _ResolveJobDependencies(resolve_fn, deps): + """Resolves relative job IDs in dependencies. + + @type resolve_fn: callable + @param resolve_fn: Function to resolve a relative job ID + @type deps: list + @param deps: Dependencies + @rtype: tuple; (boolean, string or list) + @return: If successful (first tuple item), the returned list contains + resolved job IDs along with the requested status; if not successful, + the second element is an error message + + """ + result = [] + + for (dep_job_id, dep_status) in deps: + if ht.TRelativeJobId(dep_job_id): + assert ht.TInt(dep_job_id) and dep_job_id < 0 + try: + job_id = resolve_fn(dep_job_id) + except IndexError: + # Abort + return (False, "Unable to resolve relative job ID %s" % dep_job_id) + else: + job_id = dep_job_id + + result.append((job_id, dep_status)) + + return (True, result) + + def _SubmitManyJobsUnlocked(self, jobs, job_ids, previous_job_ids): + """Create and store multiple jobs. + + @see: L{_SubmitJobUnlocked} + + """ + results = [] + added_jobs = [] + + def resolve_fn(job_idx, reljobid): + assert reljobid < 0 + return (previous_job_ids + job_ids[:job_idx])[reljobid] + + for (idx, (job_id, ops)) in enumerate(zip(job_ids, jobs)): + for op in ops: + if getattr(op, opcodes.DEPEND_ATTR, None): + (status, data) = \ + self._ResolveJobDependencies(compat.partial(resolve_fn, idx), + op.depends) + if not status: + # Abort resolving dependencies + assert ht.TNonEmptyString(data), "No error message" + break + # Use resolved dependencies + op.depends = data + else: + try: + job = self._SubmitJobUnlocked(job_id, ops) + except errors.GenericError, err: + status = False + data = self._FormatSubmitError(str(err), ops) + else: + status = True + data = job_id + added_jobs.append(job) + + results.append((status, data)) + + return (results, added_jobs) + + @locking.ssynchronized(_LOCK) def _EnqueueJobs(self, jobs): """Helper function to add jobs to worker pool's queue. @@ -1783,8 +2331,39 @@ class JobQueue(object): @param jobs: List of all jobs """ + return self._EnqueueJobsUnlocked(jobs) + + def _EnqueueJobsUnlocked(self, jobs): + """Helper function to add jobs to worker pool's queue. + + @type jobs: list + @param jobs: List of all jobs + + """ + assert self._lock.is_owned(shared=0), "Must own lock in exclusive mode" self._wpool.AddManyTasks([(job, ) for job in jobs], - priority=[job.CalcPriority() for job in jobs]) + priority=[job.CalcPriority() for job in jobs], + task_id=map(_GetIdAttr, jobs)) + + def _GetJobStatusForDependencies(self, job_id): + """Gets the status of a job for dependencies. + + @type job_id: int + @param job_id: Job ID + @raise errors.JobLost: If job can't be found + + """ + # Not using in-memory cache as doing so would require an exclusive lock + + # Try to load from disk + job = self.SafeLoadJobFromDisk(job_id, True, writable=False) + + assert not job.writable, "Got writable job" # pylint: disable=E1101 + + if job: + return job.CalcStatus() + + raise errors.JobLost("Job %s not found" % job_id) @_RequireOpenQueue def UpdateJobUnlocked(self, job, replicate=True): @@ -1800,8 +2379,14 @@ class JobQueue(object): @param replicate: whether to replicate the change to remote nodes """ + if __debug__: + finalized = job.CalcStatus() in constants.JOBS_FINALIZED + assert (finalized ^ (job.end_timestamp is None)) + assert job.writable, "Can't update read-only job" + assert not job.archived, "Can't update archived job" + filename = self._GetJobPath(job.id) - data = serializer.DumpJson(job.Serialize(), indent=False) + data = serializer.DumpJson(job.Serialize()) logging.debug("Writing job %s to %s", job.id, filename) self._UpdateJobQueueFile(filename, data, replicate) @@ -1809,7 +2394,7 @@ class JobQueue(object): timeout): """Waits for changes in a job. - @type job_id: string + @type job_id: int @param job_id: Job identifier @type fields: list of strings @param fields: Which fields to check for changes @@ -1829,7 +2414,8 @@ class JobQueue(object): as such by the clients """ - load_fn = compat.partial(self.SafeLoadJobFromDisk, job_id) + load_fn = compat.partial(self.SafeLoadJobFromDisk, job_id, True, + writable=False) helper = _WaitForJobChangesHelper() @@ -1843,20 +2429,68 @@ class JobQueue(object): This will only succeed if the job has not started yet. - @type job_id: string + @type job_id: int @param job_id: job ID of job to be cancelled. """ logging.info("Cancelling job %s", job_id) + return self._ModifyJobUnlocked(job_id, lambda job: job.Cancel()) + + @locking.ssynchronized(_LOCK) + @_RequireOpenQueue + def ChangeJobPriority(self, job_id, priority): + """Changes a job's priority. + + @type job_id: int + @param job_id: ID of the job whose priority should be changed + @type priority: int + @param priority: New priority + + """ + logging.info("Changing priority of job %s to %s", job_id, priority) + + if priority not in constants.OP_PRIO_SUBMIT_VALID: + allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID) + raise errors.GenericError("Invalid priority %s, allowed are %s" % + (priority, allowed)) + + def fn(job): + (success, msg) = job.ChangePriority(priority) + + if success: + try: + self._wpool.ChangeTaskPriority(job.id, job.CalcPriority()) + except workerpool.NoSuchTask: + logging.debug("Job %s is not in workerpool at this time", job.id) + + return (success, msg) + + return self._ModifyJobUnlocked(job_id, fn) + + def _ModifyJobUnlocked(self, job_id, mod_fn): + """Modifies a job. + + @type job_id: int + @param job_id: Job ID + @type mod_fn: callable + @param mod_fn: Modifying function, receiving job object as parameter, + returning tuple of (status boolean, message string) + + """ job = self._LoadJobUnlocked(job_id) if not job: logging.debug("Job %s not found", job_id) return (False, "Job %s not found" % job_id) - (success, msg) = job.Cancel() + assert job.writable, "Can't modify read-only job" + assert not job.archived, "Can't modify archived job" + + (success, msg) = mod_fn(job) if success: + # If the job was finalized (e.g. cancelled), this is the final write + # allowed. The job can be archived anytime. self.UpdateJobUnlocked(job) return (success, msg) @@ -1874,6 +2508,9 @@ class JobQueue(object): archive_jobs = [] rename_files = [] for job in jobs: + assert job.writable, "Can't archive read-only job" + assert not job.archived, "Can't cancel archived job" + if job.CalcStatus() not in constants.JOBS_FINALIZED: logging.debug("Job %s is not yet done", job.id) continue @@ -1904,7 +2541,7 @@ class JobQueue(object): This is just a wrapper over L{_ArchiveJobsUnlocked}. - @type job_id: string + @type job_id: int @param job_id: Job ID of job to be archived. @rtype: bool @return: Whether job was archived @@ -1975,7 +2612,47 @@ class JobQueue(object): return (archived_count, len(all_job_ids) - last_touched) - def QueryJobs(self, job_ids, fields): + def _Query(self, fields, qfilter): + qobj = query.Query(query.JOB_FIELDS, fields, qfilter=qfilter, + namefield="id") + + # Archived jobs are only looked at if the "archived" field is referenced + # either as a requested field or in the filter. By default archived jobs + # are ignored. + include_archived = (query.JQ_ARCHIVED in qobj.RequestedData()) + + job_ids = qobj.RequestedNames() + + list_all = (job_ids is None) + + if list_all: + # Since files are added to/removed from the queue atomically, there's no + # risk of getting the job ids in an inconsistent state. + job_ids = self._GetJobIDsUnlocked(archived=include_archived) + + jobs = [] + + for job_id in job_ids: + job = self.SafeLoadJobFromDisk(job_id, True, writable=False) + if job is not None or not list_all: + jobs.append((job_id, job)) + + return (qobj, jobs, list_all) + + def QueryJobs(self, fields, qfilter): + """Returns a list of jobs in queue. + + @type fields: sequence + @param fields: List of wanted fields + @type qfilter: None or query2 filter (list) + @param qfilter: Query filter + + """ + (qobj, ctx, _) = self._Query(fields, qfilter) + + return query.GetQueryResponse(qobj, ctx, sort_by_name=False) + + def OldStyleQueryJobs(self, job_ids, fields): """Returns a list of jobs in queue. @type job_ids: list @@ -1987,22 +2664,49 @@ class JobQueue(object): the requested fields """ - jobs = [] - list_all = False - if not job_ids: - # Since files are added to/removed from the queue atomically, there's no - # risk of getting the job ids in an inconsistent state. - job_ids = self._GetJobIDsUnlocked() - list_all = True + # backwards compat: + job_ids = [int(jid) for jid in job_ids] + qfilter = qlang.MakeSimpleFilter("id", job_ids) - for job_id in job_ids: - job = self.SafeLoadJobFromDisk(job_id) - if job is not None: - jobs.append(job.GetInfo(fields)) - elif not list_all: - jobs.append(None) + (qobj, ctx, _) = self._Query(fields, qfilter) + + return qobj.OldStyleQuery(ctx, sort_by_name=False) + + @locking.ssynchronized(_LOCK) + def PrepareShutdown(self): + """Prepare to stop the job queue. + + Disables execution of jobs in the workerpool and returns whether there are + any jobs currently running. If the latter is the case, the job queue is not + yet ready for shutdown. Once this function returns C{True} L{Shutdown} can + be called without interfering with any job. Queued and unfinished jobs will + be resumed next time. + + Once this function has been called no new job submissions will be accepted + (see L{_RequireNonDrainedQueue}). - return jobs + @rtype: bool + @return: Whether there are any running jobs + + """ + if self._accepting_jobs: + self._accepting_jobs = False + + # Tell worker pool to stop processing pending tasks + self._wpool.SetActive(False) + + return self._wpool.HasRunningTasks() + + def AcceptingJobsUnlocked(self): + """Returns whether jobs are accepted. + + Once L{PrepareShutdown} has been called, no new jobs are accepted and the + queue is shutting down. + + @rtype: bool + + """ + return self._accepting_jobs @locking.ssynchronized(_LOCK) @_RequireOpenQueue