root / lib / jqueue.py @ 8e00939c
History | View | Annotate | Download (16.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the job queue handling."""
|
23 |
|
24 |
import os |
25 |
import logging |
26 |
import threading |
27 |
import errno |
28 |
import re |
29 |
import time |
30 |
|
31 |
from ganeti import constants |
32 |
from ganeti import serializer |
33 |
from ganeti import workerpool |
34 |
from ganeti import opcodes |
35 |
from ganeti import errors |
36 |
from ganeti import mcpu |
37 |
from ganeti import utils |
38 |
from ganeti import jstore |
39 |
from ganeti import rpc |
40 |
|
41 |
|
42 |
JOBQUEUE_THREADS = 5
|
43 |
|
44 |
|
45 |
class _QueuedOpCode(object): |
46 |
"""Encasulates an opcode object.
|
47 |
|
48 |
Access is synchronized by the '_lock' attribute.
|
49 |
|
50 |
The 'log' attribute holds the execution log and consists of tuples
|
51 |
of the form (timestamp, level, message).
|
52 |
|
53 |
"""
|
54 |
def __new__(cls, *args, **kwargs): |
55 |
obj = object.__new__(cls, *args, **kwargs)
|
56 |
# Create a special lock for logging
|
57 |
obj._log_lock = threading.Lock() |
58 |
return obj
|
59 |
|
60 |
def __init__(self, op): |
61 |
self.input = op
|
62 |
self.status = constants.OP_STATUS_QUEUED
|
63 |
self.result = None |
64 |
self.log = []
|
65 |
|
66 |
@classmethod
|
67 |
def Restore(cls, state): |
68 |
obj = _QueuedOpCode.__new__(cls) |
69 |
obj.input = opcodes.OpCode.LoadOpCode(state["input"])
|
70 |
obj.status = state["status"]
|
71 |
obj.result = state["result"]
|
72 |
obj.log = state["log"]
|
73 |
return obj
|
74 |
|
75 |
def Serialize(self): |
76 |
self._log_lock.acquire()
|
77 |
try:
|
78 |
return {
|
79 |
"input": self.input.__getstate__(), |
80 |
"status": self.status, |
81 |
"result": self.result, |
82 |
"log": self.log, |
83 |
} |
84 |
finally:
|
85 |
self._log_lock.release()
|
86 |
|
87 |
def Log(self, *args): |
88 |
"""Append a log entry.
|
89 |
|
90 |
"""
|
91 |
assert len(args) < 3 |
92 |
|
93 |
if len(args) == 1: |
94 |
log_type = constants.ELOG_MESSAGE |
95 |
log_msg = args[0]
|
96 |
else:
|
97 |
log_type, log_msg = args |
98 |
|
99 |
self._log_lock.acquire()
|
100 |
try:
|
101 |
self.log.append((time.time(), log_type, log_msg))
|
102 |
finally:
|
103 |
self._log_lock.release()
|
104 |
|
105 |
def RetrieveLog(self, start_at=0): |
106 |
"""Retrieve (a part of) the execution log.
|
107 |
|
108 |
"""
|
109 |
self._log_lock.acquire()
|
110 |
try:
|
111 |
return self.log[start_at:] |
112 |
finally:
|
113 |
self._log_lock.release()
|
114 |
|
115 |
|
116 |
class _QueuedJob(object): |
117 |
"""In-memory job representation.
|
118 |
|
119 |
This is what we use to track the user-submitted jobs.
|
120 |
|
121 |
"""
|
122 |
def __init__(self, queue, job_id, ops): |
123 |
if not ops: |
124 |
# TODO
|
125 |
raise Exception("No opcodes") |
126 |
|
127 |
self.queue = queue
|
128 |
self.id = job_id
|
129 |
self.ops = [_QueuedOpCode(op) for op in ops] |
130 |
self.run_op_index = -1 |
131 |
|
132 |
@classmethod
|
133 |
def Restore(cls, queue, state): |
134 |
obj = _QueuedJob.__new__(cls) |
135 |
obj.queue = queue |
136 |
obj.id = state["id"]
|
137 |
obj.ops = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]] |
138 |
obj.run_op_index = state["run_op_index"]
|
139 |
return obj
|
140 |
|
141 |
def Serialize(self): |
142 |
return {
|
143 |
"id": self.id, |
144 |
"ops": [op.Serialize() for op in self.ops], |
145 |
"run_op_index": self.run_op_index, |
146 |
} |
147 |
|
148 |
def CalcStatus(self): |
149 |
status = constants.JOB_STATUS_QUEUED |
150 |
|
151 |
all_success = True
|
152 |
for op in self.ops: |
153 |
if op.status == constants.OP_STATUS_SUCCESS:
|
154 |
continue
|
155 |
|
156 |
all_success = False
|
157 |
|
158 |
if op.status == constants.OP_STATUS_QUEUED:
|
159 |
pass
|
160 |
elif op.status == constants.OP_STATUS_RUNNING:
|
161 |
status = constants.JOB_STATUS_RUNNING |
162 |
elif op.status == constants.OP_STATUS_ERROR:
|
163 |
status = constants.JOB_STATUS_ERROR |
164 |
# The whole job fails if one opcode failed
|
165 |
break
|
166 |
elif op.status == constants.OP_STATUS_CANCELED:
|
167 |
status = constants.OP_STATUS_CANCELED |
168 |
break
|
169 |
|
170 |
if all_success:
|
171 |
status = constants.JOB_STATUS_SUCCESS |
172 |
|
173 |
return status
|
174 |
|
175 |
|
176 |
class _JobQueueWorker(workerpool.BaseWorker): |
177 |
def RunTask(self, job): |
178 |
"""Job executor.
|
179 |
|
180 |
This functions processes a job.
|
181 |
|
182 |
"""
|
183 |
logging.debug("Worker %s processing job %s",
|
184 |
self.worker_id, job.id)
|
185 |
proc = mcpu.Processor(self.pool.queue.context)
|
186 |
queue = job.queue |
187 |
try:
|
188 |
try:
|
189 |
count = len(job.ops)
|
190 |
for idx, op in enumerate(job.ops): |
191 |
try:
|
192 |
logging.debug("Op %s/%s: Starting %s", idx + 1, count, op) |
193 |
|
194 |
queue.acquire() |
195 |
try:
|
196 |
job.run_op_index = idx |
197 |
op.status = constants.OP_STATUS_RUNNING |
198 |
op.result = None
|
199 |
queue.UpdateJobUnlocked(job) |
200 |
|
201 |
input_opcode = op.input |
202 |
finally:
|
203 |
queue.release() |
204 |
|
205 |
result = proc.ExecOpCode(input_opcode, op.Log) |
206 |
|
207 |
queue.acquire() |
208 |
try:
|
209 |
op.status = constants.OP_STATUS_SUCCESS |
210 |
op.result = result |
211 |
queue.UpdateJobUnlocked(job) |
212 |
finally:
|
213 |
queue.release() |
214 |
|
215 |
logging.debug("Op %s/%s: Successfully finished %s",
|
216 |
idx + 1, count, op)
|
217 |
except Exception, err: |
218 |
queue.acquire() |
219 |
try:
|
220 |
try:
|
221 |
op.status = constants.OP_STATUS_ERROR |
222 |
op.result = str(err)
|
223 |
logging.debug("Op %s/%s: Error in %s", idx + 1, count, op) |
224 |
finally:
|
225 |
queue.UpdateJobUnlocked(job) |
226 |
finally:
|
227 |
queue.release() |
228 |
raise
|
229 |
|
230 |
except errors.GenericError, err:
|
231 |
logging.exception("Ganeti exception")
|
232 |
except:
|
233 |
logging.exception("Unhandled exception")
|
234 |
finally:
|
235 |
queue.acquire() |
236 |
try:
|
237 |
job_id = job.id |
238 |
status = job.CalcStatus() |
239 |
finally:
|
240 |
queue.release() |
241 |
logging.debug("Worker %s finished job %s, status = %s",
|
242 |
self.worker_id, job_id, status)
|
243 |
|
244 |
|
245 |
class _JobQueueWorkerPool(workerpool.WorkerPool): |
246 |
def __init__(self, queue): |
247 |
super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS, |
248 |
_JobQueueWorker) |
249 |
self.queue = queue
|
250 |
|
251 |
|
252 |
class JobQueue(object): |
253 |
_RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
|
254 |
|
255 |
def _RequireOpenQueue(fn): |
256 |
"""Decorator for "public" functions.
|
257 |
|
258 |
This function should be used for all "public" functions. That is, functions
|
259 |
usually called from other classes.
|
260 |
|
261 |
Important: Use this decorator only after utils.LockedMethod!
|
262 |
|
263 |
Example:
|
264 |
@utils.LockedMethod
|
265 |
@_RequireOpenQueue
|
266 |
def Example(self):
|
267 |
pass
|
268 |
|
269 |
"""
|
270 |
def wrapper(self, *args, **kwargs): |
271 |
assert self._queue_lock is not None, "Queue should be open" |
272 |
return fn(self, *args, **kwargs) |
273 |
return wrapper
|
274 |
|
275 |
def __init__(self, context): |
276 |
self.context = context
|
277 |
self._memcache = {}
|
278 |
self._my_hostname = utils.HostInfo().name
|
279 |
|
280 |
# Locking
|
281 |
self._lock = threading.Lock()
|
282 |
self.acquire = self._lock.acquire |
283 |
self.release = self._lock.release |
284 |
|
285 |
# Initialize
|
286 |
self._queue_lock = jstore.InitAndVerifyQueue(exclusive=True) |
287 |
|
288 |
# Read serial file
|
289 |
self._last_serial = jstore.ReadSerial()
|
290 |
assert self._last_serial is not None, ("Serial file was modified between" |
291 |
" check in jstore and here")
|
292 |
|
293 |
# Get initial list of nodes
|
294 |
self._nodes = set(self.context.cfg.GetNodeList()) |
295 |
|
296 |
# Remove master node
|
297 |
try:
|
298 |
self._nodes.remove(self._my_hostname) |
299 |
except ValueError: |
300 |
pass
|
301 |
|
302 |
# TODO: Check consistency across nodes
|
303 |
|
304 |
# Setup worker pool
|
305 |
self._wpool = _JobQueueWorkerPool(self) |
306 |
|
307 |
# We need to lock here because WorkerPool.AddTask() may start a job while
|
308 |
# we're still doing our work.
|
309 |
self.acquire()
|
310 |
try:
|
311 |
for job in self._GetJobsUnlocked(None): |
312 |
status = job.CalcStatus() |
313 |
|
314 |
if status in (constants.JOB_STATUS_QUEUED, ): |
315 |
self._wpool.AddTask(job)
|
316 |
|
317 |
elif status in (constants.JOB_STATUS_RUNNING, ): |
318 |
logging.warning("Unfinished job %s found: %s", job.id, job)
|
319 |
try:
|
320 |
for op in job.ops: |
321 |
op.status = constants.OP_STATUS_ERROR |
322 |
op.result = "Unclean master daemon shutdown"
|
323 |
finally:
|
324 |
self.UpdateJobUnlocked(job)
|
325 |
finally:
|
326 |
self.release()
|
327 |
|
328 |
def _WriteAndReplicateFileUnlocked(self, file_name, data): |
329 |
"""Writes a file locally and then replicates it to all nodes.
|
330 |
|
331 |
"""
|
332 |
utils.WriteFile(file_name, data=data) |
333 |
|
334 |
nodes = self._nodes[:]
|
335 |
|
336 |
# Remove master node
|
337 |
try:
|
338 |
nodes.remove(self._my_hostname)
|
339 |
except ValueError: |
340 |
pass
|
341 |
|
342 |
def _WriteAndReplicateFileUnlocked(self, file_name, data): |
343 |
"""Writes a file locally and then replicates it to all nodes.
|
344 |
|
345 |
"""
|
346 |
utils.WriteFile(file_name, data=data) |
347 |
|
348 |
failed_nodes = 0
|
349 |
result = rpc.call_upload_file(self._nodes, file_name)
|
350 |
for node in self._nodes: |
351 |
if not result[node]: |
352 |
failed_nodes += 1
|
353 |
logging.error("Copy of job queue file to node %s failed", node)
|
354 |
|
355 |
# TODO: check failed_nodes
|
356 |
|
357 |
def _FormatJobID(self, job_id): |
358 |
if not isinstance(job_id, (int, long)): |
359 |
raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id) |
360 |
if job_id < 0: |
361 |
raise errors.ProgrammerError("Job ID %s is negative" % job_id) |
362 |
|
363 |
return str(job_id) |
364 |
|
365 |
def _NewSerialUnlocked(self, nodes): |
366 |
"""Generates a new job identifier.
|
367 |
|
368 |
Job identifiers are unique during the lifetime of a cluster.
|
369 |
|
370 |
Returns: A string representing the job identifier.
|
371 |
|
372 |
"""
|
373 |
# New number
|
374 |
serial = self._last_serial + 1 |
375 |
|
376 |
# Write to file
|
377 |
self._WriteAndReplicateFileUnlocked(constants.JOB_QUEUE_SERIAL_FILE,
|
378 |
"%s\n" % serial)
|
379 |
|
380 |
# Keep it only if we were able to write the file
|
381 |
self._last_serial = serial
|
382 |
|
383 |
return self._FormatJobID(serial) |
384 |
|
385 |
@staticmethod
|
386 |
def _GetJobPath(job_id): |
387 |
return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id) |
388 |
|
389 |
@staticmethod
|
390 |
def _GetArchivedJobPath(job_id): |
391 |
return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id) |
392 |
|
393 |
@classmethod
|
394 |
def _ExtractJobID(cls, name): |
395 |
m = cls._RE_JOB_FILE.match(name) |
396 |
if m:
|
397 |
return m.group(1) |
398 |
else:
|
399 |
return None |
400 |
|
401 |
def _GetJobIDsUnlocked(self, archived=False): |
402 |
"""Return all known job IDs.
|
403 |
|
404 |
If the parameter archived is True, archived jobs IDs will be
|
405 |
included. Currently this argument is unused.
|
406 |
|
407 |
The method only looks at disk because it's a requirement that all
|
408 |
jobs are present on disk (so in the _memcache we don't have any
|
409 |
extra IDs).
|
410 |
|
411 |
"""
|
412 |
jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()] |
413 |
jlist.sort() |
414 |
return jlist
|
415 |
|
416 |
def _ListJobFiles(self): |
417 |
return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR) |
418 |
if self._RE_JOB_FILE.match(name)] |
419 |
|
420 |
def _LoadJobUnlocked(self, job_id): |
421 |
if job_id in self._memcache: |
422 |
logging.debug("Found job %s in memcache", job_id)
|
423 |
return self._memcache[job_id] |
424 |
|
425 |
filepath = self._GetJobPath(job_id)
|
426 |
logging.debug("Loading job from %s", filepath)
|
427 |
try:
|
428 |
fd = open(filepath, "r") |
429 |
except IOError, err: |
430 |
if err.errno in (errno.ENOENT, ): |
431 |
return None |
432 |
raise
|
433 |
try:
|
434 |
data = serializer.LoadJson(fd.read()) |
435 |
finally:
|
436 |
fd.close() |
437 |
|
438 |
job = _QueuedJob.Restore(self, data)
|
439 |
self._memcache[job_id] = job
|
440 |
logging.debug("Added job %s to the cache", job_id)
|
441 |
return job
|
442 |
|
443 |
def _GetJobsUnlocked(self, job_ids): |
444 |
if not job_ids: |
445 |
job_ids = self._GetJobIDsUnlocked()
|
446 |
|
447 |
return [self._LoadJobUnlocked(job_id) for job_id in job_ids] |
448 |
|
449 |
@utils.LockedMethod
|
450 |
@_RequireOpenQueue
|
451 |
def SubmitJob(self, ops, nodes): |
452 |
"""Create and store a new job.
|
453 |
|
454 |
This enters the job into our job queue and also puts it on the new
|
455 |
queue, in order for it to be picked up by the queue processors.
|
456 |
|
457 |
@type ops: list
|
458 |
@param ops: The list of OpCodes that will become the new job.
|
459 |
@type nodes: list
|
460 |
@param nodes: The list of nodes to which the new job serial will be
|
461 |
distributed.
|
462 |
|
463 |
"""
|
464 |
# Get job identifier
|
465 |
job_id = self._NewSerialUnlocked(nodes)
|
466 |
job = _QueuedJob(self, job_id, ops)
|
467 |
|
468 |
# Write to disk
|
469 |
self.UpdateJobUnlocked(job)
|
470 |
|
471 |
logging.debug("Added new job %s to the cache", job_id)
|
472 |
self._memcache[job_id] = job
|
473 |
|
474 |
# Add to worker pool
|
475 |
self._wpool.AddTask(job)
|
476 |
|
477 |
return job.id
|
478 |
|
479 |
@_RequireOpenQueue
|
480 |
def UpdateJobUnlocked(self, job): |
481 |
filename = self._GetJobPath(job.id)
|
482 |
data = serializer.DumpJson(job.Serialize(), indent=False)
|
483 |
logging.debug("Writing job %s to %s", job.id, filename)
|
484 |
self._WriteAndReplicateFileUnlocked(filename, data)
|
485 |
self._CleanCacheUnlocked([job.id])
|
486 |
|
487 |
def _CleanCacheUnlocked(self, exclude): |
488 |
"""Clean the memory cache.
|
489 |
|
490 |
The exceptions argument contains job IDs that should not be
|
491 |
cleaned.
|
492 |
|
493 |
"""
|
494 |
assert isinstance(exclude, list) |
495 |
|
496 |
for job in self._memcache.values(): |
497 |
if job.id in exclude: |
498 |
continue
|
499 |
if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED, |
500 |
constants.JOB_STATUS_RUNNING): |
501 |
logging.debug("Cleaning job %s from the cache", job.id)
|
502 |
try:
|
503 |
del self._memcache[job.id] |
504 |
except KeyError: |
505 |
pass
|
506 |
|
507 |
@utils.LockedMethod
|
508 |
@_RequireOpenQueue
|
509 |
def CancelJob(self, job_id): |
510 |
"""Cancels a job.
|
511 |
|
512 |
@type job_id: string
|
513 |
@param job_id: Job ID of job to be cancelled.
|
514 |
|
515 |
"""
|
516 |
logging.debug("Cancelling job %s", job_id)
|
517 |
|
518 |
job = self._LoadJobUnlocked(job_id)
|
519 |
if not job: |
520 |
logging.debug("Job %s not found", job_id)
|
521 |
return
|
522 |
|
523 |
if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,): |
524 |
logging.debug("Job %s is no longer in the queue", job.id)
|
525 |
return
|
526 |
|
527 |
try:
|
528 |
for op in job.ops: |
529 |
op.status = constants.OP_STATUS_ERROR |
530 |
op.result = "Job cancelled by request"
|
531 |
finally:
|
532 |
self.UpdateJobUnlocked(job)
|
533 |
|
534 |
@utils.LockedMethod
|
535 |
@_RequireOpenQueue
|
536 |
def ArchiveJob(self, job_id): |
537 |
"""Archives a job.
|
538 |
|
539 |
@type job_id: string
|
540 |
@param job_id: Job ID of job to be archived.
|
541 |
|
542 |
"""
|
543 |
logging.debug("Archiving job %s", job_id)
|
544 |
|
545 |
job = self._LoadJobUnlocked(job_id)
|
546 |
if not job: |
547 |
logging.debug("Job %s not found", job_id)
|
548 |
return
|
549 |
|
550 |
if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED, |
551 |
constants.JOB_STATUS_SUCCESS, |
552 |
constants.JOB_STATUS_ERROR): |
553 |
logging.debug("Job %s is not yet done", job.id)
|
554 |
return
|
555 |
|
556 |
try:
|
557 |
old = self._GetJobPath(job.id)
|
558 |
new = self._GetArchivedJobPath(job.id)
|
559 |
|
560 |
os.rename(old, new) |
561 |
|
562 |
logging.debug("Successfully archived job %s", job.id)
|
563 |
finally:
|
564 |
# Cleaning the cache because we don't know what os.rename actually did
|
565 |
# and to be on the safe side.
|
566 |
self._CleanCacheUnlocked([])
|
567 |
|
568 |
def _GetJobInfoUnlocked(self, job, fields): |
569 |
row = [] |
570 |
for fname in fields: |
571 |
if fname == "id": |
572 |
row.append(job.id) |
573 |
elif fname == "status": |
574 |
row.append(job.CalcStatus()) |
575 |
elif fname == "ops": |
576 |
row.append([op.input.__getstate__() for op in job.ops]) |
577 |
elif fname == "opresult": |
578 |
row.append([op.result for op in job.ops]) |
579 |
elif fname == "opstatus": |
580 |
row.append([op.status for op in job.ops]) |
581 |
elif fname == "ticker": |
582 |
ji = job.run_op_index |
583 |
if ji < 0: |
584 |
lmsg = None
|
585 |
else:
|
586 |
lmsg = job.ops[ji].RetrieveLog(-1)
|
587 |
# message might be empty here
|
588 |
if lmsg:
|
589 |
lmsg = lmsg[0]
|
590 |
else:
|
591 |
lmsg = None
|
592 |
row.append(lmsg) |
593 |
else:
|
594 |
raise errors.OpExecError("Invalid job query field '%s'" % fname) |
595 |
return row
|
596 |
|
597 |
@utils.LockedMethod
|
598 |
@_RequireOpenQueue
|
599 |
def QueryJobs(self, job_ids, fields): |
600 |
"""Returns a list of jobs in queue.
|
601 |
|
602 |
Args:
|
603 |
- job_ids: Sequence of job identifiers or None for all
|
604 |
- fields: Names of fields to return
|
605 |
|
606 |
"""
|
607 |
jobs = [] |
608 |
|
609 |
for job in self._GetJobsUnlocked(job_ids): |
610 |
if job is None: |
611 |
jobs.append(None)
|
612 |
else:
|
613 |
jobs.append(self._GetJobInfoUnlocked(job, fields))
|
614 |
|
615 |
return jobs
|
616 |
|
617 |
@utils.LockedMethod
|
618 |
@_RequireOpenQueue
|
619 |
def Shutdown(self): |
620 |
"""Stops the job queue.
|
621 |
|
622 |
"""
|
623 |
self._wpool.TerminateWorkers()
|
624 |
|
625 |
self._queue_lock.Close()
|
626 |
self._queue_lock = None |