from ganeti import jstore
from ganeti import rpc
+
JOBQUEUE_THREADS = 25
+JOBS_PER_ARCHIVE_DIRECTORY = 10000
+
+
+class CancelJob(Exception):
+ """Special exception to cancel a job.
+
+ """
def TimeStampNow():
status will be the same
- otherwise, the last opcode with the status one of:
- waitlock
+ - canceling
- running
will determine the job status
status = constants.JOB_STATUS_WAITLOCK
elif op.status == constants.OP_STATUS_RUNNING:
status = constants.JOB_STATUS_RUNNING
+ elif op.status == constants.OP_STATUS_CANCELING:
+ status = constants.JOB_STATUS_CANCELING
+ break
elif op.status == constants.OP_STATUS_ERROR:
status = constants.JOB_STATUS_ERROR
# The whole job fails if one opcode failed
self.queue.acquire()
try:
+ assert self.opcode.status in (constants.OP_STATUS_WAITLOCK,
+ constants.OP_STATUS_CANCELING)
+
+ # Cancel here if we were asked to
+ if self.opcode.status == constants.OP_STATUS_CANCELING:
+ raise CancelJob()
+
self.opcode.status = constants.OP_STATUS_RUNNING
finally:
self.queue.release()
@param job: the job to be processed
"""
- logging.debug("Worker %s processing job %s",
+ logging.info("Worker %s processing job %s",
self.worker_id, job.id)
proc = mcpu.Processor(self.pool.queue.context)
self.queue = queue = job.queue
try:
count = len(job.ops)
for idx, op in enumerate(job.ops):
+ op_summary = op.input.Summary()
try:
- logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
+ logging.info("Op %s/%s: Starting opcode %s", idx + 1, count,
+ op_summary)
queue.acquire()
try:
+ if op.status == constants.OP_STATUS_CANCELED:
+ raise CancelJob()
+ assert op.status == constants.OP_STATUS_QUEUED
job.run_op_index = idx
op.status = constants.OP_STATUS_WAITLOCK
op.result = None
finally:
queue.release()
- logging.debug("Op %s/%s: Successfully finished %s",
- idx + 1, count, op)
+ logging.info("Op %s/%s: Successfully finished opcode %s",
+ idx + 1, count, op_summary)
+ except CancelJob:
+ # Will be handled further up
+ raise
except Exception, err:
queue.acquire()
try:
op.status = constants.OP_STATUS_ERROR
op.result = str(err)
op.end_timestamp = TimeStampNow()
- logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
+ logging.info("Op %s/%s: Error in opcode %s: %s",
+ idx + 1, count, op_summary, err)
finally:
queue.UpdateJobUnlocked(job)
finally:
queue.release()
raise
+ except CancelJob:
+ queue.acquire()
+ try:
+ queue.CancelJobUnlocked(job)
+ finally:
+ queue.release()
except errors.GenericError, err:
logging.exception("Ganeti exception")
except:
status = job.CalcStatus()
finally:
queue.release()
- logging.debug("Worker %s finished job %s, status = %s",
- self.worker_id, job_id, status)
+ logging.info("Worker %s finished job %s, status = %s",
+ self.worker_id, job_id, status)
class _JobQueueWorkerPool(workerpool.WorkerPool):
# Get initial list of nodes
self._nodes = dict((n.name, n.primary_ip)
- for n in self.context.cfg.GetAllNodesInfo().values())
+ for n in self.context.cfg.GetAllNodesInfo().values()
+ if n.master_candidate)
# Remove master node
try:
del self._nodes[self._my_hostname]
- except ValueError:
+ except KeyError:
pass
# TODO: Check consistency across nodes
# we're still doing our work.
self.acquire()
try:
- for job in self._GetJobsUnlocked(None):
+ logging.info("Inspecting job queue")
+
+ all_job_ids = self._GetJobIDsUnlocked()
+ jobs_count = len(all_job_ids)
+ lastinfo = time.time()
+ for idx, job_id in enumerate(all_job_ids):
+ # Give an update every 1000 jobs or 10 seconds
+ if (idx % 1000 == 0 or time.time() >= (lastinfo + 10.0) or
+ idx == (jobs_count - 1)):
+ logging.info("Job queue inspection: %d/%d (%0.1f %%)",
+ idx, jobs_count - 1, 100.0 * (idx + 1) / jobs_count)
+ lastinfo = time.time()
+
+ job = self._LoadJobUnlocked(job_id)
+
# a failure in loading the job can cause 'None' to be returned
if job is None:
continue
self._wpool.AddTask(job)
elif status in (constants.JOB_STATUS_RUNNING,
- constants.JOB_STATUS_WAITLOCK):
+ constants.JOB_STATUS_WAITLOCK,
+ constants.JOB_STATUS_CANCELING):
logging.warning("Unfinished job %s found: %s", job.id, job)
try:
for op in job.ops:
op.result = "Unclean master daemon shutdown"
finally:
self.UpdateJobUnlocked(job)
+
+ logging.info("Job queue inspection finished")
finally:
self.release()
except:
# Clean queue directory on added node
rpc.RpcRunner.call_jobqueue_purge(node_name)
+ if not node.master_candidate:
+ # remove if existing, ignoring errors
+ self._nodes.pop(node_name, None)
+ # and skip the replication of the job ids
+ return
+
# Upload the whole queue excluding archived jobs
files = [self._GetJobPath(job_id) for job_id in self._GetJobIDsUnlocked()]
self._CheckRpcResult(result, self._nodes,
"Updating %s" % file_name)
- def _RenameFileUnlocked(self, old, new):
+ def _RenameFilesUnlocked(self, rename):
"""Renames a file locally and then replicate the change.
This function will rename a file in the local queue directory
and then replicate this rename to all the other nodes we have.
- @type old: str
- @param old: the current name of the file
- @type new: str
- @param new: the new name of the file
+ @type rename: list of (old, new)
+ @param rename: List containing tuples mapping old to new names
"""
- os.rename(old, new)
+ # Rename them locally
+ for old, new in rename:
+ utils.RenameFile(old, new, mkdir=True)
+ # ... and on all nodes
names, addrs = self._GetNodeIp()
- result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, old, new)
- self._CheckRpcResult(result, self._nodes,
- "Moving %s to %s" % (old, new))
+ result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename)
+ self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
def _FormatJobID(self, job_id):
"""Convert a job ID to string format.
return str(job_id)
+ @classmethod
+ def _GetArchiveDirectory(cls, job_id):
+ """Returns the archive directory for a job.
+
+ @type job_id: str
+ @param job_id: Job identifier
+ @rtype: str
+ @return: Directory name
+
+ """
+ return str(int(job_id) / JOBS_PER_ARCHIVE_DIRECTORY)
+
def _NewSerialUnlocked(self):
"""Generates a new job identifier.
"""
return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)
- @staticmethod
- def _GetArchivedJobPath(job_id):
+ @classmethod
+ def _GetArchivedJobPath(cls, job_id):
"""Returns the archived job file for a give job id.
@type job_id: str
@return: the path to the archived job file
"""
- return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)
+ path = "%s/job-%s" % (cls._GetArchiveDirectory(job_id), job_id)
+ return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, path)
@classmethod
def _ExtractJobID(cls, name):
else:
# non-archived case
logging.exception("Can't parse job %s, will archive.", job_id)
- self._RenameFileUnlocked(filepath, new_path)
+ self._RenameFilesUnlocked([(filepath, new_path)])
return None
self._memcache[job_id] = job
utils.RemoveFile(constants.JOB_QUEUE_DRAIN_FILE)
return True
- @utils.LockedMethod
@_RequireOpenQueue
- def SubmitJob(self, ops):
+ def _SubmitJobUnlocked(self, ops):
"""Create and store a new job.
This enters the job into our job queue and also puts it on the new
"""
if self._IsQueueMarkedDrain():
- raise errors.JobQueueDrainError()
+ raise errors.JobQueueDrainError("Job queue is drained, refusing job")
+
+ # Check job queue size
+ size = len(self._ListJobFiles())
+ if size >= constants.JOB_QUEUE_SIZE_SOFT_LIMIT:
+ # TODO: Autoarchive jobs. Make sure it's not done on every job
+ # submission, though.
+ #size = ...
+ pass
+
+ if size >= constants.JOB_QUEUE_SIZE_HARD_LIMIT:
+ raise errors.JobQueueFull()
+
# Get job identifier
job_id = self._NewSerialUnlocked()
job = _QueuedJob(self, job_id, ops)
return job.id
+ @utils.LockedMethod
+ @_RequireOpenQueue
+ def SubmitJob(self, ops):
+ """Create and store a new job.
+
+ @see: L{_SubmitJobUnlocked}
+
+ """
+ return self._SubmitJobUnlocked(ops)
+
+ @utils.LockedMethod
+ @_RequireOpenQueue
+ def SubmitManyJobs(self, jobs):
+ """Create and store multiple jobs.
+
+ @see: L{_SubmitJobUnlocked}
+
+ """
+ results = []
+ for ops in jobs:
+ try:
+ data = self._SubmitJobUnlocked(ops)
+ status = True
+ except errors.GenericError, err:
+ data = str(err)
+ status = False
+ results.append((status, data))
+
+ return results
+
+
@_RequireOpenQueue
def UpdateJobUnlocked(self, job):
"""Update a job's on disk storage.
@param job_id: job ID of job to be cancelled.
"""
- logging.debug("Cancelling job %s", job_id)
+ logging.info("Cancelling job %s", job_id)
job = self._LoadJobUnlocked(job_id)
if not job:
logging.debug("Job %s not found", job_id)
- return
+ return (False, "Job %s not found" % job_id)
+
+ job_status = job.CalcStatus()
- if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,):
+ if job_status not in (constants.JOB_STATUS_QUEUED,
+ constants.JOB_STATUS_WAITLOCK):
logging.debug("Job %s is no longer in the queue", job.id)
- return
+ return (False, "Job %s is no longer in the queue" % job.id)
+
+ if job_status == constants.JOB_STATUS_QUEUED:
+ self.CancelJobUnlocked(job)
+ return (True, "Job %s canceled" % job.id)
+
+ elif job_status == constants.JOB_STATUS_WAITLOCK:
+ # The worker will notice the new status and cancel the job
+ try:
+ for op in job.ops:
+ op.status = constants.OP_STATUS_CANCELING
+ finally:
+ self.UpdateJobUnlocked(job)
+ return (True, "Job %s will be canceled" % job.id)
+ @_RequireOpenQueue
+ def CancelJobUnlocked(self, job):
+ """Marks a job as canceled.
+
+ """
try:
for op in job.ops:
- op.status = constants.OP_STATUS_ERROR
- op.result = "Job cancelled by request"
+ op.status = constants.OP_STATUS_CANCELED
+ op.result = "Job canceled by request"
finally:
self.UpdateJobUnlocked(job)
@_RequireOpenQueue
- def _ArchiveJobUnlocked(self, job_id):
- """Archives a job.
+ def _ArchiveJobsUnlocked(self, jobs):
+ """Archives jobs.
- @type job_id: string
- @param job_id: Job ID of job to be archived.
+ @type jobs: list of L{_QueuedJob}
+ @param jobs: Job objects
+ @rtype: int
+ @return: Number of archived jobs
"""
- logging.info("Archiving job %s", job_id)
+ archive_jobs = []
+ rename_files = []
+ for job in jobs:
+ if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED,
+ constants.JOB_STATUS_SUCCESS,
+ constants.JOB_STATUS_ERROR):
+ logging.debug("Job %s is not yet done", job.id)
+ continue
- job = self._LoadJobUnlocked(job_id)
- if not job:
- logging.debug("Job %s not found", job_id)
- return
+ archive_jobs.append(job)
- if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED,
- constants.JOB_STATUS_SUCCESS,
- constants.JOB_STATUS_ERROR):
- logging.debug("Job %s is not yet done", job.id)
- return
+ old = self._GetJobPath(job.id)
+ new = self._GetArchivedJobPath(job.id)
+ rename_files.append((old, new))
- old = self._GetJobPath(job.id)
- new = self._GetArchivedJobPath(job.id)
+ # TODO: What if 1..n files fail to rename?
+ self._RenameFilesUnlocked(rename_files)
- self._RenameFileUnlocked(old, new)
+ logging.debug("Successfully archived job(s) %s",
+ ", ".join(job.id for job in archive_jobs))
- logging.debug("Successfully archived job %s", job.id)
+ return len(archive_jobs)
@utils.LockedMethod
@_RequireOpenQueue
def ArchiveJob(self, job_id):
"""Archives a job.
- This is just a wrapper over L{_ArchiveJobUnlocked}.
+ This is just a wrapper over L{_ArchiveJobsUnlocked}.
@type job_id: string
@param job_id: Job ID of job to be archived.
+ @rtype: bool
+ @return: Whether job was archived
"""
- return self._ArchiveJobUnlocked(job_id)
+ logging.info("Archiving job %s", job_id)
+
+ job = self._LoadJobUnlocked(job_id)
+ if not job:
+ logging.debug("Job %s not found", job_id)
+ return False
+
+ return self._ArchiveJobsUnlocked([job]) == 1
@utils.LockedMethod
@_RequireOpenQueue
- def AutoArchiveJobs(self, age):
+ def AutoArchiveJobs(self, age, timeout):
"""Archives all jobs based on age.
The method will archive all jobs which are older than the age
logging.info("Archiving jobs with age more than %s seconds", age)
now = time.time()
- for jid in self._GetJobIDsUnlocked(archived=False):
- job = self._LoadJobUnlocked(jid)
- if job.CalcStatus() not in (constants.OP_STATUS_SUCCESS,
- constants.OP_STATUS_ERROR,
- constants.OP_STATUS_CANCELED):
- continue
- if job.end_timestamp is None:
- if job.start_timestamp is None:
- job_age = job.received_timestamp
+ end_time = now + timeout
+ archived_count = 0
+ last_touched = 0
+
+ all_job_ids = self._GetJobIDsUnlocked(archived=False)
+ pending = []
+ for idx, job_id in enumerate(all_job_ids):
+ last_touched = idx
+
+ # Not optimal because jobs could be pending
+ # TODO: Measure average duration for job archival and take number of
+ # pending jobs into account.
+ if time.time() > end_time:
+ break
+
+ # Returns None if the job failed to load
+ job = self._LoadJobUnlocked(job_id)
+ if job:
+ if job.end_timestamp is None:
+ if job.start_timestamp is None:
+ job_age = job.received_timestamp
+ else:
+ job_age = job.start_timestamp
else:
- job_age = job.start_timestamp
- else:
- job_age = job.end_timestamp
+ job_age = job.end_timestamp
+
+ if age == -1 or now - job_age[0] > age:
+ pending.append(job)
+
+ # Archive 10 jobs at a time
+ if len(pending) >= 10:
+ archived_count += self._ArchiveJobsUnlocked(pending)
+ pending = []
+
+ if pending:
+ archived_count += self._ArchiveJobsUnlocked(pending)
- if age == -1 or now - job_age[0] > age:
- self._ArchiveJobUnlocked(jid)
+ return (archived_count, len(all_job_ids) - last_touched - 1)
def _GetJobInfoUnlocked(self, job, fields):
"""Returns information about a job.