#
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# 02110-1301, USA.
-"""Module implementing the job queue handling."""
+"""Module implementing the job queue handling.
+
+Locking: there's a single, large lock in the L{JobQueue} class. It's
+used by all other classes in this module.
+
+@var JOBQUEUE_THREADS: the number of worker threads we start for
+ processing jobs
+
+"""
-import os
import logging
-import threading
import errno
import re
import time
+import weakref
+
+try:
+ # pylint: disable-msg=E0611
+ from pyinotify import pyinotify
+except ImportError:
+ import pyinotify
+from ganeti import asyncnotifier
from ganeti import constants
from ganeti import serializer
from ganeti import workerpool
+from ganeti import locking
from ganeti import opcodes
from ganeti import errors
from ganeti import mcpu
from ganeti import utils
+from ganeti import jstore
from ganeti import rpc
+from ganeti import runtime
+from ganeti import netutils
+from ganeti import compat
-JOBQUEUE_THREADS = 5
+JOBQUEUE_THREADS = 25
+JOBS_PER_ARCHIVE_DIRECTORY = 10000
+# member lock names to be passed to @ssynchronized decorator
+_LOCK = "_lock"
+_QUEUE = "_queue"
-class _QueuedOpCode(object):
- """Encasulates an opcode object.
- Access is synchronized by the '_lock' attribute.
+class CancelJob(Exception):
+ """Special exception to cancel a job.
+
+ """
+
- The 'log' attribute holds the execution log and consists of tuples
- of the form (timestamp, level, message).
+def TimeStampNow():
+ """Returns the current timestamp.
+
+ @rtype: tuple
+ @return: the current time in the (seconds, microseconds) format
+
+ """
+ return utils.SplitTime(time.time())
+
+
+class _QueuedOpCode(object):
+ """Encapsulates an opcode object.
+
+ @ivar log: holds the execution log and consists of tuples
+ of the form C{(log_serial, timestamp, level, message)}
+ @ivar input: the OpCode we encapsulate
+ @ivar status: the current status
+ @ivar result: the result of the LU execution
+ @ivar start_timestamp: timestamp for the start of the execution
+ @ivar exec_timestamp: timestamp for the actual LU Exec() function invocation
+ @ivar stop_timestamp: timestamp for the end of the execution
"""
+ __slots__ = ["input", "status", "result", "log", "priority",
+ "start_timestamp", "exec_timestamp", "end_timestamp",
+ "__weakref__"]
+
def __init__(self, op):
- self.__Setup(op, constants.OP_STATUS_QUEUED, None, [])
+ """Constructor for the _QuededOpCode.
- def __Setup(self, input_, status, result, log):
- self._lock = threading.Lock()
- self.input = input_
- self.status = status
- self.result = result
- self.log = log
+ @type op: L{opcodes.OpCode}
+ @param op: the opcode we encapsulate
+
+ """
+ self.input = op
+ self.status = constants.OP_STATUS_QUEUED
+ self.result = None
+ self.log = []
+ self.start_timestamp = None
+ self.exec_timestamp = None
+ self.end_timestamp = None
+
+ # Get initial priority (it might change during the lifetime of this opcode)
+ self.priority = getattr(op, "priority", constants.OP_PRIO_DEFAULT)
@classmethod
def Restore(cls, state):
- obj = object.__new__(cls)
- obj.__Setup(opcodes.OpCode.LoadOpCode(state["input"]),
- state["status"], state["result"], state["log"])
+ """Restore the _QueuedOpCode from the serialized form.
+
+ @type state: dict
+ @param state: the serialized state
+ @rtype: _QueuedOpCode
+ @return: a new _QueuedOpCode instance
+
+ """
+ obj = _QueuedOpCode.__new__(cls)
+ obj.input = opcodes.OpCode.LoadOpCode(state["input"])
+ obj.status = state["status"]
+ obj.result = state["result"]
+ obj.log = state["log"]
+ obj.start_timestamp = state.get("start_timestamp", None)
+ obj.exec_timestamp = state.get("exec_timestamp", None)
+ obj.end_timestamp = state.get("end_timestamp", None)
+ obj.priority = state.get("priority", constants.OP_PRIO_DEFAULT)
return obj
- @utils.LockedMethod
def Serialize(self):
+ """Serializes this _QueuedOpCode.
+
+ @rtype: dict
+ @return: the dictionary holding the serialized state
+
+ """
return {
"input": self.input.__getstate__(),
"status": self.status,
"result": self.result,
"log": self.log,
+ "start_timestamp": self.start_timestamp,
+ "exec_timestamp": self.exec_timestamp,
+ "end_timestamp": self.end_timestamp,
+ "priority": self.priority,
}
- @utils.LockedMethod
- def GetInput(self):
- """Returns the original opcode.
-
- """
- return self.input
-
- @utils.LockedMethod
- def SetStatus(self, status, result):
- """Update the opcode status and result.
- """
- self.status = status
- self.result = result
+class _QueuedJob(object):
+ """In-memory job representation.
- @utils.LockedMethod
- def GetStatus(self):
- """Get the opcode status.
+ This is what we use to track the user-submitted jobs. Locking must
+ be taken care of by users of this class.
- """
- return self.status
+ @type queue: L{JobQueue}
+ @ivar queue: the parent queue
+ @ivar id: the job ID
+ @type ops: list
+ @ivar ops: the list of _QueuedOpCode that constitute the job
+ @type log_serial: int
+ @ivar log_serial: holds the index for the next log entry
+ @ivar received_timestamp: the timestamp for when the job was received
+ @ivar start_timestmap: the timestamp for start of execution
+ @ivar end_timestamp: the timestamp for end of execution
- @utils.LockedMethod
- def GetResult(self):
- """Get the opcode result.
+ """
+ # pylint: disable-msg=W0212
+ __slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx",
+ "received_timestamp", "start_timestamp", "end_timestamp",
+ "__weakref__"]
+
+ def __init__(self, queue, job_id, ops):
+ """Constructor for the _QueuedJob.
+
+ @type queue: L{JobQueue}
+ @param queue: our parent queue
+ @type job_id: job_id
+ @param job_id: our job id
+ @type ops: list
+ @param ops: the list of opcodes we hold, which will be encapsulated
+ in _QueuedOpCodes
"""
- return self.result
-
- @utils.LockedMethod
- def Log(self, *args):
- """Append a log entry.
+ if not ops:
+ raise errors.GenericError("A job needs at least one opcode")
- """
- assert len(args) < 2
+ self.queue = queue
+ self.id = job_id
+ self.ops = [_QueuedOpCode(op) for op in ops]
+ self.log_serial = 0
+ self.received_timestamp = TimeStampNow()
+ self.start_timestamp = None
+ self.end_timestamp = None
- if len(args) == 1:
- log_type = constants.ELOG_MESSAGE
- log_msg = args[0]
- else:
- log_type, log_msg = args
- self.log.append((time.time(), log_type, log_msg))
+ self._InitInMemory(self)
- @utils.LockedMethod
- def RetrieveLog(self, start_at=0):
- """Retrieve (a part of) the execution log.
+ @staticmethod
+ def _InitInMemory(obj):
+ """Initializes in-memory variables.
"""
- return self.log[start_at:]
+ obj.ops_iter = None
+ obj.cur_opctx = None
+ def __repr__(self):
+ status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
+ "id=%s" % self.id,
+ "ops=%s" % ",".join([op.input.Summary() for op in self.ops])]
-class _QueuedJob(object):
- """In-memory job representation.
+ return "<%s at %#x>" % (" ".join(status), id(self))
- This is what we use to track the user-submitted jobs.
-
- """
- def __init__(self, storage, job_id, ops):
- if not ops:
- # TODO
- raise Exception("No opcodes")
+ @classmethod
+ def Restore(cls, queue, state):
+ """Restore a _QueuedJob from serialized state:
- self.__Setup(storage, job_id, [_QueuedOpCode(op) for op in ops], -1)
+ @type queue: L{JobQueue}
+ @param queue: to which queue the restored job belongs
+ @type state: dict
+ @param state: the serialized state
+ @rtype: _JobQueue
+ @return: the restored _JobQueue instance
- def __Setup(self, storage, job_id, ops, run_op_index):
- self._lock = threading.Lock()
- self.storage = storage
- self.id = job_id
- self._ops = ops
- self.run_op_index = run_op_index
+ """
+ obj = _QueuedJob.__new__(cls)
+ obj.queue = queue
+ obj.id = state["id"]
+ obj.received_timestamp = state.get("received_timestamp", None)
+ obj.start_timestamp = state.get("start_timestamp", None)
+ obj.end_timestamp = state.get("end_timestamp", None)
+
+ obj.ops = []
+ obj.log_serial = 0
+ for op_state in state["ops"]:
+ op = _QueuedOpCode.Restore(op_state)
+ for log_entry in op.log:
+ obj.log_serial = max(obj.log_serial, log_entry[0])
+ obj.ops.append(op)
+
+ cls._InitInMemory(obj)
- @classmethod
- def Restore(cls, storage, state):
- obj = object.__new__(cls)
- op_list = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
- obj.__Setup(storage, state["id"], op_list, state["run_op_index"])
return obj
def Serialize(self):
+ """Serialize the _JobQueue instance.
+
+ @rtype: dict
+ @return: the serialized state
+
+ """
return {
"id": self.id,
- "ops": [op.Serialize() for op in self._ops],
- "run_op_index": self.run_op_index,
+ "ops": [op.Serialize() for op in self.ops],
+ "start_timestamp": self.start_timestamp,
+ "end_timestamp": self.end_timestamp,
+ "received_timestamp": self.received_timestamp,
}
- def SetUnclean(self, msg):
- try:
- for op in self._ops:
- op.SetStatus(constants.OP_STATUS_ERROR, msg)
- finally:
- self.storage.UpdateJob(self)
+ def CalcStatus(self):
+ """Compute the status of this job.
+
+ This function iterates over all the _QueuedOpCodes in the job and
+ based on their status, computes the job status.
+
+ The algorithm is:
+ - if we find a cancelled, or finished with error, the job
+ status will be the same
+ - otherwise, the last opcode with the status one of:
+ - waitlock
+ - canceling
+ - running
- def GetStatus(self):
+ will determine the job status
+
+ - otherwise, it means either all opcodes are queued, or success,
+ and the job status will be the same
+
+ @return: the job status
+
+ """
status = constants.JOB_STATUS_QUEUED
all_success = True
- for op in self._ops:
- op_status = op.GetStatus()
- if op_status == constants.OP_STATUS_SUCCESS:
+ for op in self.ops:
+ if op.status == constants.OP_STATUS_SUCCESS:
continue
all_success = False
- if op_status == constants.OP_STATUS_QUEUED:
+ if op.status == constants.OP_STATUS_QUEUED:
pass
- elif op_status == constants.OP_STATUS_RUNNING:
+ elif op.status == constants.OP_STATUS_WAITLOCK:
+ status = constants.JOB_STATUS_WAITLOCK
+ elif op.status == constants.OP_STATUS_RUNNING:
status = constants.JOB_STATUS_RUNNING
- elif op_status == constants.OP_STATUS_ERROR:
+ elif op.status == constants.OP_STATUS_CANCELING:
+ status = constants.JOB_STATUS_CANCELING
+ break
+ elif op.status == constants.OP_STATUS_ERROR:
status = constants.JOB_STATUS_ERROR
# The whole job fails if one opcode failed
break
+ elif op.status == constants.OP_STATUS_CANCELED:
+ status = constants.OP_STATUS_CANCELED
+ break
if all_success:
status = constants.JOB_STATUS_SUCCESS
return status
- @utils.LockedMethod
- def GetRunOpIndex(self):
- return self.run_op_index
+ def CalcPriority(self):
+ """Gets the current priority for this job.
- def Run(self, proc):
- """Job executor.
+ Only unfinished opcodes are considered. When all are done, the default
+ priority is used.
+
+ @rtype: int
+
+ """
+ priorities = [op.priority for op in self.ops
+ if op.status not in constants.OPS_FINALIZED]
+
+ if not priorities:
+ # All opcodes are done, assume default priority
+ return constants.OP_PRIO_DEFAULT
+
+ return min(priorities)
- This functions processes a this job in the context of given processor
- instance.
+ def GetLogEntries(self, newer_than):
+ """Selectively returns the log entries.
- Args:
- - proc: Ganeti Processor to run the job with
+ @type newer_than: None or int
+ @param newer_than: if this is None, return all log entries,
+ otherwise return only the log entries with serial higher
+ than this value
+ @rtype: list
+ @return: the list of the log entries selected
"""
- try:
- count = len(self._ops)
- for idx, op in enumerate(self._ops):
- try:
- logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
-
- self._lock.acquire()
- try:
- self.run_op_index = idx
- finally:
- self._lock.release()
-
- op.SetStatus(constants.OP_STATUS_RUNNING, None)
- self.storage.UpdateJob(self)
-
- result = proc.ExecOpCode(op.input, op.Log)
-
- op.SetStatus(constants.OP_STATUS_SUCCESS, result)
- self.storage.UpdateJob(self)
- logging.debug("Op %s/%s: Successfully finished %s",
- idx + 1, count, op)
- except Exception, err:
- try:
- op.SetStatus(constants.OP_STATUS_ERROR, str(err))
- logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
- finally:
- self.storage.UpdateJob(self)
- raise
-
- except errors.GenericError, err:
- logging.error("ganeti exception %s", exc_info=err)
- except Exception, err:
- logging.error("unhandled exception %s", exc_info=err)
- except:
- logging.error("unhandled unknown exception %s", exc_info=err)
+ if newer_than is None:
+ serial = -1
+ else:
+ serial = newer_than
+ entries = []
+ for op in self.ops:
+ entries.extend(filter(lambda entry: entry[0] > serial, op.log))
-class _JobQueueWorker(workerpool.BaseWorker):
- def RunTask(self, job):
- logging.debug("Worker %s processing job %s",
- self.worker_id, job.id)
- # TODO: feedback function
- proc = mcpu.Processor(self.pool.context)
- try:
- job.Run(proc)
- finally:
- logging.debug("Worker %s finished job %s, status = %s",
- self.worker_id, job.id, job.GetStatus())
+ return entries
+ def GetInfo(self, fields):
+ """Returns information about a job.
-class _JobQueueWorkerPool(workerpool.WorkerPool):
- def __init__(self, context):
- super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
- _JobQueueWorker)
- self.context = context
+ @type fields: list
+ @param fields: names of fields to return
+ @rtype: list
+ @return: list with one element for each field
+ @raise errors.OpExecError: when an invalid field
+ has been passed
+
+ """
+ row = []
+ for fname in fields:
+ if fname == "id":
+ row.append(self.id)
+ elif fname == "status":
+ row.append(self.CalcStatus())
+ elif fname == "priority":
+ row.append(self.CalcPriority())
+ elif fname == "ops":
+ row.append([op.input.__getstate__() for op in self.ops])
+ elif fname == "opresult":
+ row.append([op.result for op in self.ops])
+ elif fname == "opstatus":
+ row.append([op.status for op in self.ops])
+ elif fname == "oplog":
+ row.append([op.log for op in self.ops])
+ elif fname == "opstart":
+ row.append([op.start_timestamp for op in self.ops])
+ elif fname == "opexec":
+ row.append([op.exec_timestamp for op in self.ops])
+ elif fname == "opend":
+ row.append([op.end_timestamp for op in self.ops])
+ elif fname == "oppriority":
+ row.append([op.priority for op in self.ops])
+ elif fname == "received_ts":
+ row.append(self.received_timestamp)
+ elif fname == "start_ts":
+ row.append(self.start_timestamp)
+ elif fname == "end_ts":
+ row.append(self.end_timestamp)
+ elif fname == "summary":
+ row.append([op.input.Summary() for op in self.ops])
+ else:
+ raise errors.OpExecError("Invalid self query field '%s'" % fname)
+ return row
+ def MarkUnfinishedOps(self, status, result):
+ """Mark unfinished opcodes with a given status and result.
-class JobStorageBase(object):
- def __init__(self, id_prefix):
- self.id_prefix = id_prefix
+ This is an utility function for marking all running or waiting to
+ be run opcodes with a given status. Opcodes which are already
+ finalised are not changed.
- if id_prefix:
- prefix_pattern = re.escape("%s-" % id_prefix)
- else:
- prefix_pattern = ""
+ @param status: a given opcode status
+ @param result: the opcode result
+
+ """
+ not_marked = True
+ for op in self.ops:
+ if op.status in constants.OPS_FINALIZED:
+ assert not_marked, "Finalized opcodes found after non-finalized ones"
+ continue
+ op.status = status
+ op.result = result
+ not_marked = False
- # Apart from the prefix, all job IDs are numeric
- self._re_job_id = re.compile(r"^%s\d+$" % prefix_pattern)
+ def Cancel(self):
+ """Marks job as canceled/-ing if possible.
- def OwnsJobId(self, job_id):
- return self._re_job_id.match(job_id)
+ @rtype: tuple; (bool, string)
+ @return: Boolean describing whether job was successfully canceled or marked
+ as canceling and a text message
- def FormatJobID(self, job_id):
- if not isinstance(job_id, (int, long)):
- raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
- if job_id < 0:
- raise errors.ProgrammerError("Job ID %s is negative" % job_id)
+ """
+ status = self.CalcStatus()
+
+ if status == constants.JOB_STATUS_QUEUED:
+ self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
+ "Job canceled by request")
+ return (True, "Job %s canceled" % self.id)
+
+ elif status == constants.JOB_STATUS_WAITLOCK:
+ # The worker will notice the new status and cancel the job
+ self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
+ return (True, "Job %s will be canceled" % self.id)
- if self.id_prefix:
- prefix = "%s-" % self.id_prefix
else:
- prefix = ""
+ logging.debug("Job %s is no longer waiting in the queue", self.id)
+ return (False, "Job %s is no longer waiting in the queue" % self.id)
- return "%s%010d" % (prefix, job_id)
- def _ShouldJobBeArchivedUnlocked(self, job):
- if job.GetStatus() not in (constants.JOB_STATUS_CANCELED,
- constants.JOB_STATUS_SUCCESS,
- constants.JOB_STATUS_ERROR):
- logging.debug("Job %s is not yet done", job.id)
- return False
- return True
+class _OpExecCallbacks(mcpu.OpExecCbBase):
+ def __init__(self, queue, job, op):
+ """Initializes this class.
+ @type queue: L{JobQueue}
+ @param queue: Job queue
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @type op: L{_QueuedOpCode}
+ @param op: OpCode
-class DiskJobStorage(JobStorageBase):
- _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
+ """
+ assert queue, "Queue is missing"
+ assert job, "Job is missing"
+ assert op, "Opcode is missing"
- def __init__(self, id_prefix):
- JobStorageBase.__init__(self, id_prefix)
+ self._queue = queue
+ self._job = job
+ self._op = op
- self._lock = threading.Lock()
- self._memcache = {}
- self._my_hostname = utils.HostInfo().name
+ def _CheckCancel(self):
+ """Raises an exception to cancel the job if asked to.
- # Make sure our directories exists
- for path in (constants.QUEUE_DIR, constants.JOB_QUEUE_ARCHIVE_DIR):
- try:
- os.mkdir(path, 0700)
- except OSError, err:
- if err.errno not in (errno.EEXIST, ):
- raise
+ """
+ # Cancel here if we were asked to
+ if self._op.status == constants.OP_STATUS_CANCELING:
+ logging.debug("Canceling opcode")
+ raise CancelJob()
- # Get queue lock
- self.lock_fd = open(constants.JOB_QUEUE_LOCK_FILE, "w")
- try:
- utils.LockFile(self.lock_fd)
- except:
- self.lock_fd.close()
- raise
+ @locking.ssynchronized(_QUEUE, shared=1)
+ def NotifyStart(self):
+ """Mark the opcode as running, not lock-waiting.
- # Read version
- try:
- version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
- except IOError, err:
- if err.errno not in (errno.ENOENT, ):
- raise
+ This is called from the mcpu code as a notifier function, when the LU is
+ finally about to start the Exec() method. Of course, to have end-user
+ visible results, the opcode must be initially (before calling into
+ Processor.ExecOpCode) set to OP_STATUS_WAITLOCK.
+
+ """
+ assert self._op in self._job.ops
+ assert self._op.status in (constants.OP_STATUS_WAITLOCK,
+ constants.OP_STATUS_CANCELING)
- # Setup a new queue
- self._InitQueueUnlocked()
+ # Cancel here if we were asked to
+ self._CheckCancel()
- # Try to open again
- version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
+ logging.debug("Opcode is now running")
- try:
- # Try to read version
- version = int(version_fd.read(128))
+ self._op.status = constants.OP_STATUS_RUNNING
+ self._op.exec_timestamp = TimeStampNow()
- # Verify version
- if version != constants.JOB_QUEUE_VERSION:
- raise errors.JobQueueError("Found version %s, expected %s",
- version, constants.JOB_QUEUE_VERSION)
- finally:
- version_fd.close()
+ # And finally replicate the job status
+ self._queue.UpdateJobUnlocked(self._job)
- self._last_serial = self._ReadSerial()
- if self._last_serial is None:
- raise errors.ConfigurationError("Can't read/parse the job queue serial"
- " file")
+ @locking.ssynchronized(_QUEUE, shared=1)
+ def _AppendFeedback(self, timestamp, log_type, log_msg):
+ """Internal feedback append function, with locks
- @staticmethod
- def _ReadSerial():
- """Try to read the job serial file.
+ """
+ self._job.log_serial += 1
+ self._op.log.append((self._job.log_serial, timestamp, log_type, log_msg))
+ self._queue.UpdateJobUnlocked(self._job, replicate=False)
- @rtype: None or int
- @return: If the serial can be read, then it is returned. Otherwise None
- is returned.
+ def Feedback(self, *args):
+ """Append a log entry.
"""
- try:
- serial_fd = open(constants.JOB_QUEUE_SERIAL_FILE, "r")
- try:
- # Read last serial
- serial = int(serial_fd.read(1024).strip())
- finally:
- serial_fd.close()
- except (ValueError, EnvironmentError):
- serial = None
+ assert len(args) < 3
+
+ if len(args) == 1:
+ log_type = constants.ELOG_MESSAGE
+ log_msg = args[0]
+ else:
+ (log_type, log_msg) = args
- return serial
+ # The time is split to make serialization easier and not lose
+ # precision.
+ timestamp = utils.SplitTime(time.time())
+ self._AppendFeedback(timestamp, log_type, log_msg)
- def Close(self):
- assert self.lock_fd, "Queue should be open"
+ def CheckCancel(self):
+ """Check whether job has been cancelled.
+
+ """
+ assert self._op.status in (constants.OP_STATUS_WAITLOCK,
+ constants.OP_STATUS_CANCELING)
- self.lock_fd.close()
- self.lock_fd = None
+ # Cancel here if we were asked to
+ self._CheckCancel()
- def _InitQueueUnlocked(self):
- assert self.lock_fd, "Queue should be open"
+ def SubmitManyJobs(self, jobs):
+ """Submits jobs for processing.
- utils.WriteFile(constants.JOB_QUEUE_VERSION_FILE,
- data="%s\n" % constants.JOB_QUEUE_VERSION)
- if self._ReadSerial() is None:
- utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
- data="%s\n" % 0)
+ See L{JobQueue.SubmitManyJobs}.
- def _NewSerialUnlocked(self, nodes):
- """Generates a new job identifier.
+ """
+ # Locking is done in job queue
+ return self._queue.SubmitManyJobs(jobs)
- Job identifiers are unique during the lifetime of a cluster.
- Returns: A string representing the job identifier.
+class _JobChangesChecker(object):
+ def __init__(self, fields, prev_job_info, prev_log_serial):
+ """Initializes this class.
+
+ @type fields: list of strings
+ @param fields: Fields requested by LUXI client
+ @type prev_job_info: string
+ @param prev_job_info: previous job info, as passed by the LUXI client
+ @type prev_log_serial: string
+ @param prev_log_serial: previous job serial, as passed by the LUXI client
"""
- assert self.lock_fd, "Queue should be open"
+ self._fields = fields
+ self._prev_job_info = prev_job_info
+ self._prev_log_serial = prev_log_serial
- # New number
- serial = self._last_serial + 1
+ def __call__(self, job):
+ """Checks whether job has changed.
- # Write to file
- utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
- data="%s\n" % serial)
+ @type job: L{_QueuedJob}
+ @param job: Job object
- # Keep it only if we were able to write the file
- self._last_serial = serial
+ """
+ status = job.CalcStatus()
+ job_info = job.GetInfo(self._fields)
+ log_entries = job.GetLogEntries(self._prev_log_serial)
+
+ # Serializing and deserializing data can cause type changes (e.g. from
+ # tuple to list) or precision loss. We're doing it here so that we get
+ # the same modifications as the data received from the client. Without
+ # this, the comparison afterwards might fail without the data being
+ # significantly different.
+ # TODO: we just deserialized from disk, investigate how to make sure that
+ # the job info and log entries are compatible to avoid this further step.
+ # TODO: Doing something like in testutils.py:UnifyValueType might be more
+ # efficient, though floats will be tricky
+ job_info = serializer.LoadJson(serializer.DumpJson(job_info))
+ log_entries = serializer.LoadJson(serializer.DumpJson(log_entries))
+
+ # Don't even try to wait if the job is no longer running, there will be
+ # no changes.
+ if (status not in (constants.JOB_STATUS_QUEUED,
+ constants.JOB_STATUS_RUNNING,
+ constants.JOB_STATUS_WAITLOCK) or
+ job_info != self._prev_job_info or
+ (log_entries and self._prev_log_serial != log_entries[0][0])):
+ logging.debug("Job %s changed", job.id)
+ return (job_info, log_entries)
+
+ return None
+
+
+class _JobFileChangesWaiter(object):
+ def __init__(self, filename):
+ """Initializes this class.
+
+ @type filename: string
+ @param filename: Path to job file
+ @raises errors.InotifyError: if the notifier cannot be setup
- # Distribute the serial to the other nodes
+ """
+ self._wm = pyinotify.WatchManager()
+ self._inotify_handler = \
+ asyncnotifier.SingleFileEventHandler(self._wm, self._OnInotify, filename)
+ self._notifier = \
+ pyinotify.Notifier(self._wm, default_proc_fun=self._inotify_handler)
try:
- nodes.remove(self._my_hostname)
- except ValueError:
- pass
+ self._inotify_handler.enable()
+ except Exception:
+ # pyinotify doesn't close file descriptors automatically
+ self._notifier.stop()
+ raise
- result = rpc.call_upload_file(nodes, constants.JOB_QUEUE_SERIAL_FILE)
- for node in nodes:
- if not result[node]:
- logging.error("copy of job queue file to node %s failed", node)
+ def _OnInotify(self, notifier_enabled):
+ """Callback for inotify.
- return self.FormatJobID(serial)
+ """
+ if not notifier_enabled:
+ self._inotify_handler.enable()
- def _GetJobPath(self, job_id):
- return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)
+ def Wait(self, timeout):
+ """Waits for the job file to change.
- def _GetArchivedJobPath(self, job_id):
- return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)
+ @type timeout: float
+ @param timeout: Timeout in seconds
+ @return: Whether there have been events
- def _ExtractJobID(self, name):
- m = self._RE_JOB_FILE.match(name)
- if m:
- return m.group(1)
- else:
- return None
+ """
+ assert timeout >= 0
+ have_events = self._notifier.check_events(timeout * 1000)
+ if have_events:
+ self._notifier.read_events()
+ self._notifier.process_events()
+ return have_events
- def _GetJobIDsUnlocked(self, archived=False):
- """Return all known job IDs.
+ def Close(self):
+ """Closes underlying notifier and its file descriptor.
- If the parameter archived is True, archived jobs IDs will be
- included. Currently this argument is unused.
+ """
+ self._notifier.stop()
- The method only looks at disk because it's a requirement that all
- jobs are present on disk (so in the _memcache we don't have any
- extra IDs).
+
+class _JobChangesWaiter(object):
+ def __init__(self, filename):
+ """Initializes this class.
+
+ @type filename: string
+ @param filename: Path to job file
"""
- jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
- jlist.sort()
- return jlist
+ self._filewaiter = None
+ self._filename = filename
- def _ListJobFiles(self):
- assert self.lock_fd, "Queue should be open"
+ def Wait(self, timeout):
+ """Waits for a job to change.
- return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
- if self._RE_JOB_FILE.match(name)]
+ @type timeout: float
+ @param timeout: Timeout in seconds
+ @return: Whether there have been events
- def _LoadJobUnlocked(self, job_id):
- assert self.lock_fd, "Queue should be open"
+ """
+ if self._filewaiter:
+ return self._filewaiter.Wait(timeout)
- if job_id in self._memcache:
- logging.debug("Found job %s in memcache", job_id)
- return self._memcache[job_id]
+ # Lazy setup: Avoid inotify setup cost when job file has already changed.
+ # If this point is reached, return immediately and let caller check the job
+ # file again in case there were changes since the last check. This avoids a
+ # race condition.
+ self._filewaiter = _JobFileChangesWaiter(self._filename)
- filepath = self._GetJobPath(job_id)
- logging.debug("Loading job from %s", filepath)
- try:
- fd = open(filepath, "r")
- except IOError, err:
- if err.errno in (errno.ENOENT, ):
- return None
- raise
- try:
- data = serializer.LoadJson(fd.read())
- finally:
- fd.close()
+ return True
- job = _QueuedJob.Restore(self, data)
- self._memcache[job_id] = job
- logging.debug("Added job %s to the cache", job_id)
- return job
+ def Close(self):
+ """Closes underlying waiter.
- def _GetJobsUnlocked(self, job_ids):
- if not job_ids:
- job_ids = self._GetJobIDsUnlocked()
+ """
+ if self._filewaiter:
+ self._filewaiter.Close()
- return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
- @utils.LockedMethod
- def GetJobs(self, job_ids):
- return self._GetJobsUnlocked(job_ids)
+class _WaitForJobChangesHelper(object):
+ """Helper class using inotify to wait for changes in a job file.
- @utils.LockedMethod
- def AddJob(self, ops, nodes):
- """Create and store on disk a new job.
+ This class takes a previous job status and serial, and alerts the client when
+ the current job status has changed.
- @type ops: list
- @param ops: The list of OpCodes that will become the new job.
- @type nodes: list
- @param nodes: The list of nodes to which the new job serial will be
- distributed.
+ """
+ @staticmethod
+ def _CheckForChanges(job_load_fn, check_fn):
+ job = job_load_fn()
+ if not job:
+ raise errors.JobLost()
+
+ result = check_fn(job)
+ if result is None:
+ raise utils.RetryAgain()
+
+ return result
+
+ def __call__(self, filename, job_load_fn,
+ fields, prev_job_info, prev_log_serial, timeout):
+ """Waits for changes on a job.
+
+ @type filename: string
+ @param filename: File on which to wait for changes
+ @type job_load_fn: callable
+ @param job_load_fn: Function to load job
+ @type fields: list of strings
+ @param fields: Which fields to check for changes
+ @type prev_job_info: list or None
+ @param prev_job_info: Last job information returned
+ @type prev_log_serial: int
+ @param prev_log_serial: Last job message serial number
+ @type timeout: float
+ @param timeout: maximum time to wait in seconds
"""
- assert self.lock_fd, "Queue should be open"
+ try:
+ check_fn = _JobChangesChecker(fields, prev_job_info, prev_log_serial)
+ waiter = _JobChangesWaiter(filename)
+ try:
+ return utils.Retry(compat.partial(self._CheckForChanges,
+ job_load_fn, check_fn),
+ utils.RETRY_REMAINING_TIME, timeout,
+ wait_fn=waiter.Wait)
+ finally:
+ waiter.Close()
+ except (errors.InotifyError, errors.JobLost):
+ return None
+ except utils.RetryTimeout:
+ return constants.JOB_NOTCHANGED
- # Get job identifier
- job_id = self._NewSerialUnlocked(nodes)
- job = _QueuedJob(self, job_id, ops)
- # Write to disk
- self._UpdateJobUnlocked(job)
+def _EncodeOpError(err):
+ """Encodes an error which occurred while processing an opcode.
- logging.debug("Added new job %s to the cache", job_id)
- self._memcache[job_id] = job
+ """
+ if isinstance(err, errors.GenericError):
+ to_encode = err
+ else:
+ to_encode = errors.OpExecError(str(err))
- return job
+ return errors.EncodeException(to_encode)
- def _UpdateJobUnlocked(self, job):
- assert self.lock_fd, "Queue should be open"
- filename = self._GetJobPath(job.id)
- logging.debug("Writing job %s to %s", job.id, filename)
- utils.WriteFile(filename,
- data=serializer.DumpJson(job.Serialize(), indent=False))
- self._CleanCacheUnlocked([job.id])
+class _TimeoutStrategyWrapper:
+ def __init__(self, fn):
+ """Initializes this class.
- def _CleanCacheUnlocked(self, exclude):
- """Clean the memory cache.
+ """
+ self._fn = fn
+ self._next = None
- The exceptions argument contains job IDs that should not be
- cleaned.
+ def _Advance(self):
+ """Gets the next timeout if necessary.
"""
- assert isinstance(exclude, list)
- for job in self._memcache.values():
- if job.id in exclude:
- continue
- if job.GetStatus() not in (constants.JOB_STATUS_QUEUED,
- constants.JOB_STATUS_RUNNING):
- logging.debug("Cleaning job %s from the cache", job.id)
- try:
- del self._memcache[job.id]
- except KeyError:
- pass
+ if self._next is None:
+ self._next = self._fn()
- @utils.LockedMethod
- def UpdateJob(self, job):
- return self._UpdateJobUnlocked(job)
+ def Peek(self):
+ """Returns the next timeout.
- @utils.LockedMethod
- def ArchiveJob(self, job_id):
- """Archives a job.
+ """
+ self._Advance()
+ return self._next
- @type job_id: string
- @param job_id: Job ID of job to be archived.
+ def Next(self):
+ """Returns the current timeout and advances the internal state.
"""
- logging.debug("Archiving job %s", job_id)
+ self._Advance()
+ result = self._next
+ self._next = None
+ return result
- job = self._LoadJobUnlocked(job_id)
- if not job:
- logging.debug("Job %s not found", job_id)
- return
- if not self._ShouldJobBeArchivedUnlocked(job):
- return
+class _OpExecContext:
+ def __init__(self, op, index, log_prefix, timeout_strategy_factory):
+ """Initializes this class.
- try:
- old = self._GetJobPath(job.id)
- new = self._GetArchivedJobPath(job.id)
+ """
+ self.op = op
+ self.index = index
+ self.log_prefix = log_prefix
+ self.summary = op.input.Summary()
- os.rename(old, new)
+ self._timeout_strategy_factory = timeout_strategy_factory
+ self._ResetTimeoutStrategy()
- logging.debug("Successfully archived job %s", job.id)
- finally:
- # Cleaning the cache because we don't know what os.rename actually did
- # and to be on the safe side.
- self._CleanCacheUnlocked([])
+ def _ResetTimeoutStrategy(self):
+ """Creates a new timeout strategy.
+ """
+ self._timeout_strategy = \
+ _TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)
-class JobQueue:
- """The job queue.
+ def CheckPriorityIncrease(self):
+ """Checks whether priority can and should be increased.
- """
- def __init__(self, context):
- self._lock = threading.Lock()
- self._jobs = DiskJobStorage("")
- self._wpool = _JobQueueWorkerPool(context)
+ Called when locks couldn't be acquired.
- for job in self._jobs.GetJobs(None):
- status = job.GetStatus()
- if status in (constants.JOB_STATUS_QUEUED, ):
- self._wpool.AddTask(job)
+ """
+ op = self.op
- elif status in (constants.JOB_STATUS_RUNNING, ):
- logging.warning("Unfinished job %s found: %s", job.id, job)
- job.SetUnclean("Unclean master daemon shutdown")
+ # Exhausted all retries and next round should not use blocking acquire
+ # for locks?
+ if (self._timeout_strategy.Peek() is None and
+ op.priority > constants.OP_PRIO_HIGHEST):
+ logging.debug("Increasing priority")
+ op.priority -= 1
+ self._ResetTimeoutStrategy()
+ return True
- @utils.LockedMethod
- def SubmitJob(self, ops, nodes):
- """Add a new job to the queue.
+ return False
- This enters the job into our job queue and also puts it on the new
- queue, in order for it to be picked up by the queue processors.
+ def GetNextLockTimeout(self):
+ """Returns the next lock acquire timeout.
+
+ """
+ return self._timeout_strategy.Next()
- @type ops: list
- @param ops: the sequence of opcodes that will become the new job
- @type nodes: list
- @param nodes: the list of nodes to which the queue should be
- distributed
+
+class _JobProcessor(object):
+ def __init__(self, queue, opexec_fn, job,
+ _timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
+ """Initializes this class.
"""
- job = self._jobs.AddJob(ops, nodes)
+ self.queue = queue
+ self.opexec_fn = opexec_fn
+ self.job = job
+ self._timeout_strategy_factory = _timeout_strategy_factory
- # Add to worker pool
- self._wpool.AddTask(job)
+ @staticmethod
+ def _FindNextOpcode(job, timeout_strategy_factory):
+ """Locates the next opcode to run.
- return job.id
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @param timeout_strategy_factory: Callable to create new timeout strategy
- def ArchiveJob(self, job_id):
- self._jobs.ArchiveJob(job_id)
+ """
+ # Create some sort of a cache to speed up locating next opcode for future
+ # lookups
+ # TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
+ # pending and one for processed ops.
+ if job.ops_iter is None:
+ job.ops_iter = enumerate(job.ops)
+
+ # Find next opcode to run
+ while True:
+ try:
+ (idx, op) = job.ops_iter.next()
+ except StopIteration:
+ raise errors.ProgrammerError("Called for a finished job")
+
+ if op.status == constants.OP_STATUS_RUNNING:
+ # Found an opcode already marked as running
+ raise errors.ProgrammerError("Called for job marked as running")
+
+ opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
+ timeout_strategy_factory)
+
+ if op.status == constants.OP_STATUS_CANCELED:
+ # Cancelled jobs are handled by the caller
+ assert not compat.any(i.status != constants.OP_STATUS_CANCELED
+ for i in job.ops[idx:])
+
+ elif op.status in constants.OPS_FINALIZED:
+ # This is a job that was partially completed before master daemon
+ # shutdown, so it can be expected that some opcodes are already
+ # completed successfully (if any did error out, then the whole job
+ # should have been aborted and not resubmitted for processing).
+ logging.info("%s: opcode %s already processed, skipping",
+ opctx.log_prefix, opctx.summary)
+ continue
- def CancelJob(self, job_id):
- raise NotImplementedError()
+ return opctx
- def _GetJobInfo(self, job, fields):
- row = []
- for fname in fields:
- if fname == "id":
- row.append(job.id)
- elif fname == "status":
- row.append(job.GetStatus())
- elif fname == "ops":
- row.append([op.GetInput().__getstate__() for op in job._ops])
- elif fname == "opresult":
- row.append([op.GetResult() for op in job._ops])
- elif fname == "opstatus":
- row.append([op.GetStatus() for op in job._ops])
- elif fname == "ticker":
- ji = job.GetRunOpIndex()
- if ji < 0:
- lmsg = None
- else:
- lmsg = job._ops[ji].RetrieveLog(-1)
- # message might be empty here
- if lmsg:
- lmsg = lmsg[0]
- else:
- lmsg = None
- row.append(lmsg)
- else:
- raise errors.OpExecError("Invalid job query field '%s'" % fname)
- return row
+ @staticmethod
+ def _MarkWaitlock(job, op):
+ """Marks an opcode as waiting for locks.
- def QueryJobs(self, job_ids, fields):
- """Returns a list of jobs in queue.
+ The job's start timestamp is also set if necessary.
- Args:
- - job_ids: Sequence of job identifiers or None for all
- - fields: Names of fields to return
+ @type job: L{_QueuedJob}
+ @param job: Job object
+ @type op: L{_QueuedOpCode}
+ @param op: Opcode object
"""
- self._lock.acquire()
- try:
- jobs = []
+ assert op in job.ops
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITLOCK)
- for job in self._jobs.GetJobs(job_ids):
- if job is None:
- jobs.append(None)
- else:
- jobs.append(self._GetJobInfo(job, fields))
+ update = False
- return jobs
- finally:
- self._lock.release()
+ op.result = None
- @utils.LockedMethod
- def Shutdown(self):
- """Stops the job queue.
+ if op.status == constants.OP_STATUS_QUEUED:
+ op.status = constants.OP_STATUS_WAITLOCK
+ update = True
+
+ if op.start_timestamp is None:
+ op.start_timestamp = TimeStampNow()
+ update = True
+
+ if job.start_timestamp is None:
+ job.start_timestamp = op.start_timestamp
+ update = True
+
+ assert op.status == constants.OP_STATUS_WAITLOCK
+
+ return update
+
+ def _ExecOpCodeUnlocked(self, opctx):
+ """Processes one opcode and returns the result.
"""
- self._wpool.TerminateWorkers()
- self._jobs.Close()
+ op = opctx.op
+
+ assert op.status == constants.OP_STATUS_WAITLOCK
+
+ timeout = opctx.GetNextLockTimeout()
+
+ try:
+ # Make sure not to hold queue lock while calling ExecOpCode
+ result = self.opexec_fn(op.input,
+ _OpExecCallbacks(self.queue, self.job, op),
+ timeout=timeout, priority=op.priority)
+ except mcpu.LockAcquireTimeout:
+ assert timeout is not None, "Received timeout for blocking acquire"
+ logging.debug("Couldn't acquire locks in %0.6fs", timeout)
+
+ assert op.status in (constants.OP_STATUS_WAITLOCK,
+ constants.OP_STATUS_CANCELING)
+
+ # Was job cancelled while we were waiting for the lock?
+ if op.status == constants.OP_STATUS_CANCELING:
+ return (constants.OP_STATUS_CANCELING, None)
+
+ # Stay in waitlock while trying to re-acquire lock
+ return (constants.OP_STATUS_WAITLOCK, None)
+ except CancelJob:
+ logging.exception("%s: Canceling job", opctx.log_prefix)
+ assert op.status == constants.OP_STATUS_CANCELING
+ return (constants.OP_STATUS_CANCELING, None)
+ except Exception, err: # pylint: disable-msg=W0703
+ logging.exception("%s: Caught exception in %s",
+ opctx.log_prefix, opctx.summary)
+ return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
+ else:
+ logging.debug("%s: %s successful",
+ opctx.log_prefix, opctx.summary)
+ return (constants.OP_STATUS_SUCCESS, result)
+
+ def __call__(self, _nextop_fn=None):
+ """Continues execution of a job.
+
+ @param _nextop_fn: Callback function for tests
+ @rtype: bool
+ @return: True if job is finished, False if processor needs to be called
+ again
+
+ """
+ queue = self.queue
+ job = self.job
+
+ logging.debug("Processing job %s", job.id)
+
+ queue.acquire(shared=1)
+ try:
+ opcount = len(job.ops)
+
+ # Is a previous opcode still pending?
+ if job.cur_opctx:
+ opctx = job.cur_opctx
+ job.cur_opctx = None
+ else:
+ if __debug__ and _nextop_fn:
+ _nextop_fn()
+ opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)
+
+ op = opctx.op
+
+ # Consistency check
+ assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_CANCELING,
+ constants.OP_STATUS_CANCELED)
+ for i in job.ops[opctx.index + 1:])
+
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITLOCK,
+ constants.OP_STATUS_CANCELING,
+ constants.OP_STATUS_CANCELED)
+
+ assert (op.priority <= constants.OP_PRIO_LOWEST and
+ op.priority >= constants.OP_PRIO_HIGHEST)
+
+ if op.status not in (constants.OP_STATUS_CANCELING,
+ constants.OP_STATUS_CANCELED):
+ assert op.status in (constants.OP_STATUS_QUEUED,
+ constants.OP_STATUS_WAITLOCK)
+
+ # Prepare to start opcode
+ if self._MarkWaitlock(job, op):
+ # Write to disk
+ queue.UpdateJobUnlocked(job)
+
+ assert op.status == constants.OP_STATUS_WAITLOCK
+ assert job.CalcStatus() == constants.JOB_STATUS_WAITLOCK
+ assert job.start_timestamp and op.start_timestamp
+
+ logging.info("%s: opcode %s waiting for locks",
+ opctx.log_prefix, opctx.summary)
+
+ queue.release()
+ try:
+ (op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
+ finally:
+ queue.acquire(shared=1)
+
+ op.status = op_status
+ op.result = op_result
+
+ if op.status == constants.OP_STATUS_WAITLOCK:
+ # Couldn't get locks in time
+ assert not op.end_timestamp
+ else:
+ # Finalize opcode
+ op.end_timestamp = TimeStampNow()
+
+ if op.status == constants.OP_STATUS_CANCELING:
+ assert not compat.any(i.status != constants.OP_STATUS_CANCELING
+ for i in job.ops[opctx.index:])
+ else:
+ assert op.status in constants.OPS_FINALIZED
+
+ if op.status == constants.OP_STATUS_WAITLOCK:
+ finalize = False
+
+ if opctx.CheckPriorityIncrease():
+ # Priority was changed, need to update on-disk file
+ queue.UpdateJobUnlocked(job)
+
+ # Keep around for another round
+ job.cur_opctx = opctx
+
+ assert (op.priority <= constants.OP_PRIO_LOWEST and
+ op.priority >= constants.OP_PRIO_HIGHEST)
+
+ # In no case must the status be finalized here
+ assert job.CalcStatus() == constants.JOB_STATUS_WAITLOCK
+
+ else:
+ # Ensure all opcodes so far have been successful
+ assert (opctx.index == 0 or
+ compat.all(i.status == constants.OP_STATUS_SUCCESS
+ for i in job.ops[:opctx.index]))
+
+ # Reset context
+ job.cur_opctx = None
+
+ if op.status == constants.OP_STATUS_SUCCESS:
+ finalize = False
+
+ elif op.status == constants.OP_STATUS_ERROR:
+ # Ensure failed opcode has an exception as its result
+ assert errors.GetEncodedError(job.ops[opctx.index].result)
+
+ to_encode = errors.OpExecError("Preceding opcode failed")
+ job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
+ _EncodeOpError(to_encode))
+ finalize = True
+
+ # Consistency check
+ assert compat.all(i.status == constants.OP_STATUS_ERROR and
+ errors.GetEncodedError(i.result)
+ for i in job.ops[opctx.index:])
+
+ elif op.status == constants.OP_STATUS_CANCELING:
+ job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
+ "Job canceled by request")
+ finalize = True
+
+ elif op.status == constants.OP_STATUS_CANCELED:
+ finalize = True
+
+ else:
+ raise errors.ProgrammerError("Unknown status '%s'" % op.status)
+
+ # Finalizing or last opcode?
+ if finalize or opctx.index == (opcount - 1):
+ # All opcodes have been run, finalize job
+ job.end_timestamp = TimeStampNow()
+
+ # Write to disk. If the job status is final, this is the final write
+ # allowed. Once the file has been written, it can be archived anytime.
+ queue.UpdateJobUnlocked(job)
+
+ if finalize or opctx.index == (opcount - 1):
+ logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
+ return True
+
+ return False
+ finally:
+ queue.release()
+
+
+class _JobQueueWorker(workerpool.BaseWorker):
+ """The actual job workers.
+
+ """
+ def RunTask(self, job): # pylint: disable-msg=W0221
+ """Job executor.
+
+ This functions processes a job. It is closely tied to the L{_QueuedJob} and
+ L{_QueuedOpCode} classes.
+
+ @type job: L{_QueuedJob}
+ @param job: the job to be processed
+
+ """
+ queue = job.queue
+ assert queue == self.pool.queue
+
+ setname_fn = lambda op: self.SetTaskName(self._GetWorkerName(job, op))
+ setname_fn(None)
+
+ proc = mcpu.Processor(queue.context, job.id)
+
+ # Create wrapper for setting thread name
+ wrap_execop_fn = compat.partial(self._WrapExecOpCode, setname_fn,
+ proc.ExecOpCode)
+
+ if not _JobProcessor(queue, wrap_execop_fn, job)():
+ # Schedule again
+ raise workerpool.DeferTask(priority=job.CalcPriority())
+
+ @staticmethod
+ def _WrapExecOpCode(setname_fn, execop_fn, op, *args, **kwargs):
+ """Updates the worker thread name to include a short summary of the opcode.
+
+ @param setname_fn: Callable setting worker thread name
+ @param execop_fn: Callable for executing opcode (usually
+ L{mcpu.Processor.ExecOpCode})
+
+ """
+ setname_fn(op)
+ try:
+ return execop_fn(op, *args, **kwargs)
+ finally:
+ setname_fn(None)
+
+ @staticmethod
+ def _GetWorkerName(job, op):
+ """Sets the worker thread name.
+
+ @type job: L{_QueuedJob}
+ @type op: L{opcodes.OpCode}
+
+ """
+ parts = ["Job%s" % job.id]
+
+ if op:
+ parts.append(op.TinySummary())
+
+ return "/".join(parts)
+
+
+class _JobQueueWorkerPool(workerpool.WorkerPool):
+ """Simple class implementing a job-processing workerpool.
+
+ """
+ def __init__(self, queue):
+ super(_JobQueueWorkerPool, self).__init__("Jq",
+ JOBQUEUE_THREADS,
+ _JobQueueWorker)
+ self.queue = queue
+
+
+def _RequireOpenQueue(fn):
+ """Decorator for "public" functions.
+
+ This function should be used for all 'public' functions. That is,
+ functions usually called from other classes. Note that this should
+ be applied only to methods (not plain functions), since it expects
+ that the decorated function is called with a first argument that has
+ a '_queue_filelock' argument.
+
+ @warning: Use this decorator only after locking.ssynchronized
+
+ Example::
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def Example(self):
+ pass
+
+ """
+ def wrapper(self, *args, **kwargs):
+ # pylint: disable-msg=W0212
+ assert self._queue_filelock is not None, "Queue should be open"
+ return fn(self, *args, **kwargs)
+ return wrapper
+
+
+class JobQueue(object):
+ """Queue used to manage the jobs.
+
+ @cvar _RE_JOB_FILE: regex matching the valid job file names
+
+ """
+ _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
+
+ def __init__(self, context):
+ """Constructor for JobQueue.
+
+ The constructor will initialize the job queue object and then
+ start loading the current jobs from disk, either for starting them
+ (if they were queue) or for aborting them (if they were already
+ running).
+
+ @type context: GanetiContext
+ @param context: the context object for access to the configuration
+ data and other ganeti objects
+
+ """
+ self.context = context
+ self._memcache = weakref.WeakValueDictionary()
+ self._my_hostname = netutils.Hostname.GetSysName()
+
+ # The Big JobQueue lock. If a code block or method acquires it in shared
+ # mode safe it must guarantee concurrency with all the code acquiring it in
+ # shared mode, including itself. In order not to acquire it at all
+ # concurrency must be guaranteed with all code acquiring it in shared mode
+ # and all code acquiring it exclusively.
+ self._lock = locking.SharedLock("JobQueue")
+
+ self.acquire = self._lock.acquire
+ self.release = self._lock.release
+
+ # Initialize the queue, and acquire the filelock.
+ # This ensures no other process is working on the job queue.
+ self._queue_filelock = jstore.InitAndVerifyQueue(must_lock=True)
+
+ # Read serial file
+ self._last_serial = jstore.ReadSerial()
+ assert self._last_serial is not None, ("Serial file was modified between"
+ " check in jstore and here")
+
+ # Get initial list of nodes
+ self._nodes = dict((n.name, n.primary_ip)
+ for n in self.context.cfg.GetAllNodesInfo().values()
+ if n.master_candidate)
+
+ # Remove master node
+ self._nodes.pop(self._my_hostname, None)
+
+ # TODO: Check consistency across nodes
+
+ self._queue_size = 0
+ self._UpdateQueueSizeUnlocked()
+ self._drained = jstore.CheckDrainFlag()
+
+ # Setup worker pool
+ self._wpool = _JobQueueWorkerPool(self)
+ try:
+ self._InspectQueue()
+ except:
+ self._wpool.TerminateWorkers()
+ raise
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def _InspectQueue(self):
+ """Loads the whole job queue and resumes unfinished jobs.
+
+ This function needs the lock here because WorkerPool.AddTask() may start a
+ job while we're still doing our work.
+
+ """
+ logging.info("Inspecting job queue")
+
+ restartjobs = []
+
+ all_job_ids = self._GetJobIDsUnlocked()
+ jobs_count = len(all_job_ids)
+ lastinfo = time.time()
+ for idx, job_id in enumerate(all_job_ids):
+ # Give an update every 1000 jobs or 10 seconds
+ if (idx % 1000 == 0 or time.time() >= (lastinfo + 10.0) or
+ idx == (jobs_count - 1)):
+ logging.info("Job queue inspection: %d/%d (%0.1f %%)",
+ idx, jobs_count - 1, 100.0 * (idx + 1) / jobs_count)
+ lastinfo = time.time()
+
+ job = self._LoadJobUnlocked(job_id)
+
+ # a failure in loading the job can cause 'None' to be returned
+ if job is None:
+ continue
+
+ status = job.CalcStatus()
+
+ if status == constants.JOB_STATUS_QUEUED:
+ restartjobs.append(job)
+
+ elif status in (constants.JOB_STATUS_RUNNING,
+ constants.JOB_STATUS_WAITLOCK,
+ constants.JOB_STATUS_CANCELING):
+ logging.warning("Unfinished job %s found: %s", job.id, job)
+
+ if status == constants.JOB_STATUS_WAITLOCK:
+ # Restart job
+ job.MarkUnfinishedOps(constants.OP_STATUS_QUEUED, None)
+ restartjobs.append(job)
+ else:
+ job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
+ "Unclean master daemon shutdown")
+
+ self.UpdateJobUnlocked(job)
+
+ if restartjobs:
+ logging.info("Restarting %s jobs", len(restartjobs))
+ self._EnqueueJobs(restartjobs)
+
+ logging.info("Job queue inspection finished")
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def AddNode(self, node):
+ """Register a new node with the queue.
+
+ @type node: L{objects.Node}
+ @param node: the node object to be added
+
+ """
+ node_name = node.name
+ assert node_name != self._my_hostname
+
+ # Clean queue directory on added node
+ result = rpc.RpcRunner.call_jobqueue_purge(node_name)
+ msg = result.fail_msg
+ if msg:
+ logging.warning("Cannot cleanup queue directory on node %s: %s",
+ node_name, msg)
+
+ if not node.master_candidate:
+ # remove if existing, ignoring errors
+ self._nodes.pop(node_name, None)
+ # and skip the replication of the job ids
+ return
+
+ # Upload the whole queue excluding archived jobs
+ files = [self._GetJobPath(job_id) for job_id in self._GetJobIDsUnlocked()]
+
+ # Upload current serial file
+ files.append(constants.JOB_QUEUE_SERIAL_FILE)
+
+ for file_name in files:
+ # Read file content
+ content = utils.ReadFile(file_name)
+
+ result = rpc.RpcRunner.call_jobqueue_update([node_name],
+ [node.primary_ip],
+ file_name, content)
+ msg = result[node_name].fail_msg
+ if msg:
+ logging.error("Failed to upload file %s to node %s: %s",
+ file_name, node_name, msg)
+
+ self._nodes[node_name] = node.primary_ip
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def RemoveNode(self, node_name):
+ """Callback called when removing nodes from the cluster.
+
+ @type node_name: str
+ @param node_name: the name of the node to remove
+
+ """
+ self._nodes.pop(node_name, None)
+
+ @staticmethod
+ def _CheckRpcResult(result, nodes, failmsg):
+ """Verifies the status of an RPC call.
+
+ Since we aim to keep consistency should this node (the current
+ master) fail, we will log errors if our rpc fail, and especially
+ log the case when more than half of the nodes fails.
+
+ @param result: the data as returned from the rpc call
+ @type nodes: list
+ @param nodes: the list of nodes we made the call to
+ @type failmsg: str
+ @param failmsg: the identifier to be used for logging
+
+ """
+ failed = []
+ success = []
+
+ for node in nodes:
+ msg = result[node].fail_msg
+ if msg:
+ failed.append(node)
+ logging.error("RPC call %s (%s) failed on node %s: %s",
+ result[node].call, failmsg, node, msg)
+ else:
+ success.append(node)
+
+ # +1 for the master node
+ if (len(success) + 1) < len(failed):
+ # TODO: Handle failing nodes
+ logging.error("More than half of the nodes failed")
+
+ def _GetNodeIp(self):
+ """Helper for returning the node name/ip list.
+
+ @rtype: (list, list)
+ @return: a tuple of two lists, the first one with the node
+ names and the second one with the node addresses
+
+ """
+ # TODO: Change to "tuple(map(list, zip(*self._nodes.items())))"?
+ name_list = self._nodes.keys()
+ addr_list = [self._nodes[name] for name in name_list]
+ return name_list, addr_list
+
+ def _UpdateJobQueueFile(self, file_name, data, replicate):
+ """Writes a file locally and then replicates it to all nodes.
+
+ This function will replace the contents of a file on the local
+ node and then replicate it to all the other nodes we have.
+
+ @type file_name: str
+ @param file_name: the path of the file to be replicated
+ @type data: str
+ @param data: the new contents of the file
+ @type replicate: boolean
+ @param replicate: whether to spread the changes to the remote nodes
+
+ """
+ getents = runtime.GetEnts()
+ utils.WriteFile(file_name, data=data, uid=getents.masterd_uid,
+ gid=getents.masterd_gid)
+
+ if replicate:
+ names, addrs = self._GetNodeIp()
+ result = rpc.RpcRunner.call_jobqueue_update(names, addrs, file_name, data)
+ self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name)
+
+ def _RenameFilesUnlocked(self, rename):
+ """Renames a file locally and then replicate the change.
+
+ This function will rename a file in the local queue directory
+ and then replicate this rename to all the other nodes we have.
+
+ @type rename: list of (old, new)
+ @param rename: List containing tuples mapping old to new names
+
+ """
+ # Rename them locally
+ for old, new in rename:
+ utils.RenameFile(old, new, mkdir=True)
+
+ # ... and on all nodes
+ names, addrs = self._GetNodeIp()
+ result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename)
+ self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
+
+ @staticmethod
+ def _FormatJobID(job_id):
+ """Convert a job ID to string format.
+
+ Currently this just does C{str(job_id)} after performing some
+ checks, but if we want to change the job id format this will
+ abstract this change.
+
+ @type job_id: int or long
+ @param job_id: the numeric job id
+ @rtype: str
+ @return: the formatted job id
+
+ """
+ if not isinstance(job_id, (int, long)):
+ raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
+ if job_id < 0:
+ raise errors.ProgrammerError("Job ID %s is negative" % job_id)
+
+ return str(job_id)
+
+ @classmethod
+ def _GetArchiveDirectory(cls, job_id):
+ """Returns the archive directory for a job.
+
+ @type job_id: str
+ @param job_id: Job identifier
+ @rtype: str
+ @return: Directory name
+
+ """
+ return str(int(job_id) / JOBS_PER_ARCHIVE_DIRECTORY)
+
+ def _NewSerialsUnlocked(self, count):
+ """Generates a new job identifier.
+
+ Job identifiers are unique during the lifetime of a cluster.
+
+ @type count: integer
+ @param count: how many serials to return
+ @rtype: str
+ @return: a string representing the job identifier.
+
+ """
+ assert count > 0
+ # New number
+ serial = self._last_serial + count
+
+ # Write to file
+ self._UpdateJobQueueFile(constants.JOB_QUEUE_SERIAL_FILE,
+ "%s\n" % serial, True)
+
+ result = [self._FormatJobID(v)
+ for v in range(self._last_serial, serial + 1)]
+ # Keep it only if we were able to write the file
+ self._last_serial = serial
+
+ return result
+
+ @staticmethod
+ def _GetJobPath(job_id):
+ """Returns the job file for a given job id.
+
+ @type job_id: str
+ @param job_id: the job identifier
+ @rtype: str
+ @return: the path to the job file
+
+ """
+ return utils.PathJoin(constants.QUEUE_DIR, "job-%s" % job_id)
+
+ @classmethod
+ def _GetArchivedJobPath(cls, job_id):
+ """Returns the archived job file for a give job id.
+
+ @type job_id: str
+ @param job_id: the job identifier
+ @rtype: str
+ @return: the path to the archived job file
+
+ """
+ return utils.PathJoin(constants.JOB_QUEUE_ARCHIVE_DIR,
+ cls._GetArchiveDirectory(job_id), "job-%s" % job_id)
+
+ def _GetJobIDsUnlocked(self, sort=True):
+ """Return all known job IDs.
+
+ The method only looks at disk because it's a requirement that all
+ jobs are present on disk (so in the _memcache we don't have any
+ extra IDs).
+
+ @type sort: boolean
+ @param sort: perform sorting on the returned job ids
+ @rtype: list
+ @return: the list of job IDs
+
+ """
+ jlist = []
+ for filename in utils.ListVisibleFiles(constants.QUEUE_DIR):
+ m = self._RE_JOB_FILE.match(filename)
+ if m:
+ jlist.append(m.group(1))
+ if sort:
+ jlist = utils.NiceSort(jlist)
+ return jlist
+
+ def _LoadJobUnlocked(self, job_id):
+ """Loads a job from the disk or memory.
+
+ Given a job id, this will return the cached job object if
+ existing, or try to load the job from the disk. If loading from
+ disk, it will also add the job to the cache.
+
+ @param job_id: the job id
+ @rtype: L{_QueuedJob} or None
+ @return: either None or the job object
+
+ """
+ job = self._memcache.get(job_id, None)
+ if job:
+ logging.debug("Found job %s in memcache", job_id)
+ return job
+
+ try:
+ job = self._LoadJobFromDisk(job_id)
+ if job is None:
+ return job
+ except errors.JobFileCorrupted:
+ old_path = self._GetJobPath(job_id)
+ new_path = self._GetArchivedJobPath(job_id)
+ if old_path == new_path:
+ # job already archived (future case)
+ logging.exception("Can't parse job %s", job_id)
+ else:
+ # non-archived case
+ logging.exception("Can't parse job %s, will archive.", job_id)
+ self._RenameFilesUnlocked([(old_path, new_path)])
+ return None
+
+ self._memcache[job_id] = job
+ logging.debug("Added job %s to the cache", job_id)
+ return job
+
+ def _LoadJobFromDisk(self, job_id):
+ """Load the given job file from disk.
+
+ Given a job file, read, load and restore it in a _QueuedJob format.
+
+ @type job_id: string
+ @param job_id: job identifier
+ @rtype: L{_QueuedJob} or None
+ @return: either None or the job object
+
+ """
+ filepath = self._GetJobPath(job_id)
+ logging.debug("Loading job from %s", filepath)
+ try:
+ raw_data = utils.ReadFile(filepath)
+ except EnvironmentError, err:
+ if err.errno in (errno.ENOENT, ):
+ return None
+ raise
+
+ try:
+ data = serializer.LoadJson(raw_data)
+ job = _QueuedJob.Restore(self, data)
+ except Exception, err: # pylint: disable-msg=W0703
+ raise errors.JobFileCorrupted(err)
+
+ return job
+
+ def SafeLoadJobFromDisk(self, job_id):
+ """Load the given job file from disk.
+
+ Given a job file, read, load and restore it in a _QueuedJob format.
+ In case of error reading the job, it gets returned as None, and the
+ exception is logged.
+
+ @type job_id: string
+ @param job_id: job identifier
+ @rtype: L{_QueuedJob} or None
+ @return: either None or the job object
+
+ """
+ try:
+ return self._LoadJobFromDisk(job_id)
+ except (errors.JobFileCorrupted, EnvironmentError):
+ logging.exception("Can't load/parse job %s", job_id)
+ return None
+
+ def _UpdateQueueSizeUnlocked(self):
+ """Update the queue size.
+
+ """
+ self._queue_size = len(self._GetJobIDsUnlocked(sort=False))
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def SetDrainFlag(self, drain_flag):
+ """Sets the drain flag for the queue.
+
+ @type drain_flag: boolean
+ @param drain_flag: Whether to set or unset the drain flag
+
+ """
+ jstore.SetDrainFlag(drain_flag)
+
+ self._drained = drain_flag
+
+ return True
+
+ @_RequireOpenQueue
+ def _SubmitJobUnlocked(self, job_id, ops):
+ """Create and store a new job.
+
+ This enters the job into our job queue and also puts it on the new
+ queue, in order for it to be picked up by the queue processors.
+
+ @type job_id: job ID
+ @param job_id: the job ID for the new job
+ @type ops: list
+ @param ops: The list of OpCodes that will become the new job.
+ @rtype: L{_QueuedJob}
+ @return: the job object to be queued
+ @raise errors.JobQueueDrainError: if the job queue is marked for draining
+ @raise errors.JobQueueFull: if the job queue has too many jobs in it
+ @raise errors.GenericError: If an opcode is not valid
+
+ """
+ # Ok when sharing the big job queue lock, as the drain file is created when
+ # the lock is exclusive.
+ if self._drained:
+ raise errors.JobQueueDrainError("Job queue is drained, refusing job")
+
+ if self._queue_size >= constants.JOB_QUEUE_SIZE_HARD_LIMIT:
+ raise errors.JobQueueFull()
+
+ job = _QueuedJob(self, job_id, ops)
+
+ # Check priority
+ for idx, op in enumerate(job.ops):
+ if op.priority not in constants.OP_PRIO_SUBMIT_VALID:
+ allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID)
+ raise errors.GenericError("Opcode %s has invalid priority %s, allowed"
+ " are %s" % (idx, op.priority, allowed))
+
+ # Write to disk
+ self.UpdateJobUnlocked(job)
+
+ self._queue_size += 1
+
+ logging.debug("Adding new job %s to the cache", job_id)
+ self._memcache[job_id] = job
+
+ return job
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def SubmitJob(self, ops):
+ """Create and store a new job.
+
+ @see: L{_SubmitJobUnlocked}
+
+ """
+ job_id = self._NewSerialsUnlocked(1)[0]
+ self._EnqueueJobs([self._SubmitJobUnlocked(job_id, ops)])
+ return job_id
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def SubmitManyJobs(self, jobs):
+ """Create and store multiple jobs.
+
+ @see: L{_SubmitJobUnlocked}
+
+ """
+ results = []
+ added_jobs = []
+ all_job_ids = self._NewSerialsUnlocked(len(jobs))
+ for job_id, ops in zip(all_job_ids, jobs):
+ try:
+ added_jobs.append(self._SubmitJobUnlocked(job_id, ops))
+ status = True
+ data = job_id
+ except errors.GenericError, err:
+ data = ("%s; opcodes %s" %
+ (err, utils.CommaJoin(op.Summary() for op in ops)))
+ status = False
+ results.append((status, data))
+
+ self._EnqueueJobs(added_jobs)
+
+ return results
+
+ def _EnqueueJobs(self, jobs):
+ """Helper function to add jobs to worker pool's queue.
+
+ @type jobs: list
+ @param jobs: List of all jobs
+
+ """
+ self._wpool.AddManyTasks([(job, ) for job in jobs],
+ priority=[job.CalcPriority() for job in jobs])
+
+ @_RequireOpenQueue
+ def UpdateJobUnlocked(self, job, replicate=True):
+ """Update a job's on disk storage.
+
+ After a job has been modified, this function needs to be called in
+ order to write the changes to disk and replicate them to the other
+ nodes.
+
+ @type job: L{_QueuedJob}
+ @param job: the changed job
+ @type replicate: boolean
+ @param replicate: whether to replicate the change to remote nodes
+
+ """
+ filename = self._GetJobPath(job.id)
+ data = serializer.DumpJson(job.Serialize(), indent=False)
+ logging.debug("Writing job %s to %s", job.id, filename)
+ self._UpdateJobQueueFile(filename, data, replicate)
+
+ def WaitForJobChanges(self, job_id, fields, prev_job_info, prev_log_serial,
+ timeout):
+ """Waits for changes in a job.
+
+ @type job_id: string
+ @param job_id: Job identifier
+ @type fields: list of strings
+ @param fields: Which fields to check for changes
+ @type prev_job_info: list or None
+ @param prev_job_info: Last job information returned
+ @type prev_log_serial: int
+ @param prev_log_serial: Last job message serial number
+ @type timeout: float
+ @param timeout: maximum time to wait in seconds
+ @rtype: tuple (job info, log entries)
+ @return: a tuple of the job information as required via
+ the fields parameter, and the log entries as a list
+
+ if the job has not changed and the timeout has expired,
+ we instead return a special value,
+ L{constants.JOB_NOTCHANGED}, which should be interpreted
+ as such by the clients
+
+ """
+ load_fn = compat.partial(self.SafeLoadJobFromDisk, job_id)
+
+ helper = _WaitForJobChangesHelper()
+
+ return helper(self._GetJobPath(job_id), load_fn,
+ fields, prev_job_info, prev_log_serial, timeout)
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def CancelJob(self, job_id):
+ """Cancels a job.
+
+ This will only succeed if the job has not started yet.
+
+ @type job_id: string
+ @param job_id: job ID of job to be cancelled.
+
+ """
+ logging.info("Cancelling job %s", job_id)
+
+ job = self._LoadJobUnlocked(job_id)
+ if not job:
+ logging.debug("Job %s not found", job_id)
+ return (False, "Job %s not found" % job_id)
+
+ (success, msg) = job.Cancel()
+
+ if success:
+ self.UpdateJobUnlocked(job)
+
+ return (success, msg)
+
+ @_RequireOpenQueue
+ def _ArchiveJobsUnlocked(self, jobs):
+ """Archives jobs.
+
+ @type jobs: list of L{_QueuedJob}
+ @param jobs: Job objects
+ @rtype: int
+ @return: Number of archived jobs
+
+ """
+ archive_jobs = []
+ rename_files = []
+ for job in jobs:
+ if job.CalcStatus() not in constants.JOBS_FINALIZED:
+ logging.debug("Job %s is not yet done", job.id)
+ continue
+
+ archive_jobs.append(job)
+
+ old = self._GetJobPath(job.id)
+ new = self._GetArchivedJobPath(job.id)
+ rename_files.append((old, new))
+
+ # TODO: What if 1..n files fail to rename?
+ self._RenameFilesUnlocked(rename_files)
+
+ logging.debug("Successfully archived job(s) %s",
+ utils.CommaJoin(job.id for job in archive_jobs))
+
+ # Since we haven't quite checked, above, if we succeeded or failed renaming
+ # the files, we update the cached queue size from the filesystem. When we
+ # get around to fix the TODO: above, we can use the number of actually
+ # archived jobs to fix this.
+ self._UpdateQueueSizeUnlocked()
+ return len(archive_jobs)
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def ArchiveJob(self, job_id):
+ """Archives a job.
+
+ This is just a wrapper over L{_ArchiveJobsUnlocked}.
+
+ @type job_id: string
+ @param job_id: Job ID of job to be archived.
+ @rtype: bool
+ @return: Whether job was archived
+
+ """
+ logging.info("Archiving job %s", job_id)
+
+ job = self._LoadJobUnlocked(job_id)
+ if not job:
+ logging.debug("Job %s not found", job_id)
+ return False
+
+ return self._ArchiveJobsUnlocked([job]) == 1
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def AutoArchiveJobs(self, age, timeout):
+ """Archives all jobs based on age.
+
+ The method will archive all jobs which are older than the age
+ parameter. For jobs that don't have an end timestamp, the start
+ timestamp will be considered. The special '-1' age will cause
+ archival of all jobs (that are not running or queued).
+
+ @type age: int
+ @param age: the minimum age in seconds
+
+ """
+ logging.info("Archiving jobs with age more than %s seconds", age)
+
+ now = time.time()
+ end_time = now + timeout
+ archived_count = 0
+ last_touched = 0
+
+ all_job_ids = self._GetJobIDsUnlocked()
+ pending = []
+ for idx, job_id in enumerate(all_job_ids):
+ last_touched = idx + 1
+
+ # Not optimal because jobs could be pending
+ # TODO: Measure average duration for job archival and take number of
+ # pending jobs into account.
+ if time.time() > end_time:
+ break
+
+ # Returns None if the job failed to load
+ job = self._LoadJobUnlocked(job_id)
+ if job:
+ if job.end_timestamp is None:
+ if job.start_timestamp is None:
+ job_age = job.received_timestamp
+ else:
+ job_age = job.start_timestamp
+ else:
+ job_age = job.end_timestamp
+
+ if age == -1 or now - job_age[0] > age:
+ pending.append(job)
+
+ # Archive 10 jobs at a time
+ if len(pending) >= 10:
+ archived_count += self._ArchiveJobsUnlocked(pending)
+ pending = []
+
+ if pending:
+ archived_count += self._ArchiveJobsUnlocked(pending)
+
+ return (archived_count, len(all_job_ids) - last_touched)
+
+ def QueryJobs(self, job_ids, fields):
+ """Returns a list of jobs in queue.
+
+ @type job_ids: list
+ @param job_ids: sequence of job identifiers or None for all
+ @type fields: list
+ @param fields: names of fields to return
+ @rtype: list
+ @return: list one element per job, each element being list with
+ the requested fields
+
+ """
+ jobs = []
+ list_all = False
+ if not job_ids:
+ # Since files are added to/removed from the queue atomically, there's no
+ # risk of getting the job ids in an inconsistent state.
+ job_ids = self._GetJobIDsUnlocked()
+ list_all = True
+
+ for job_id in job_ids:
+ job = self.SafeLoadJobFromDisk(job_id)
+ if job is not None:
+ jobs.append(job.GetInfo(fields))
+ elif not list_all:
+ jobs.append(None)
+
+ return jobs
+
+ @locking.ssynchronized(_LOCK)
+ @_RequireOpenQueue
+ def Shutdown(self):
+ """Stops the job queue.
+
+ This shutdowns all the worker threads an closes the queue.
+
+ """
+ self._wpool.TerminateWorkers()
+
+ self._queue_filelock.Close()
+ self._queue_filelock = None