Statistics
| Branch: | Tag: | Revision:

root / lib / jqueue.py @ 4c848b18

History | View | Annotate | Download (16.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the job queue handling."""
23

    
24
import os
25
import logging
26
import threading
27
import errno
28
import re
29
import time
30

    
31
from ganeti import constants
32
from ganeti import serializer
33
from ganeti import workerpool
34
from ganeti import opcodes
35
from ganeti import errors
36
from ganeti import mcpu
37
from ganeti import utils
38
from ganeti import jstore
39
from ganeti import rpc
40

    
41

    
42
JOBQUEUE_THREADS = 5
43

    
44

    
45
class _QueuedOpCode(object):
46
  """Encasulates an opcode object.
47

48
  Access is synchronized by the '_lock' attribute.
49

50
  The 'log' attribute holds the execution log and consists of tuples
51
  of the form (timestamp, level, message).
52

53
  """
54
  def __new__(cls, *args, **kwargs):
55
    obj = object.__new__(cls, *args, **kwargs)
56
    # Create a special lock for logging
57
    obj._log_lock = threading.Lock()
58
    return obj
59

    
60
  def __init__(self, op):
61
    self.input = op
62
    self.status = constants.OP_STATUS_QUEUED
63
    self.result = None
64
    self.log = []
65

    
66
  @classmethod
67
  def Restore(cls, state):
68
    obj = _QueuedOpCode.__new__(cls)
69
    obj.input = opcodes.OpCode.LoadOpCode(state["input"])
70
    obj.status = state["status"]
71
    obj.result = state["result"]
72
    obj.log = state["log"]
73
    return obj
74

    
75
  def Serialize(self):
76
    self._log_lock.acquire()
77
    try:
78
      return {
79
        "input": self.input.__getstate__(),
80
        "status": self.status,
81
        "result": self.result,
82
        "log": self.log,
83
        }
84
    finally:
85
      self._log_lock.release()
86

    
87
  def Log(self, *args):
88
    """Append a log entry.
89

90
    """
91
    assert len(args) < 3
92

    
93
    if len(args) == 1:
94
      log_type = constants.ELOG_MESSAGE
95
      log_msg = args[0]
96
    else:
97
      log_type, log_msg = args
98

    
99
    self._log_lock.acquire()
100
    try:
101
      self.log.append((time.time(), log_type, log_msg))
102
    finally:
103
      self._log_lock.release()
104

    
105
  def RetrieveLog(self, start_at=0):
106
    """Retrieve (a part of) the execution log.
107

108
    """
109
    self._log_lock.acquire()
110
    try:
111
      return self.log[start_at:]
112
    finally:
113
      self._log_lock.release()
114

    
115

    
116
class _QueuedJob(object):
117
  """In-memory job representation.
118

119
  This is what we use to track the user-submitted jobs.
120

121
  """
122
  def __init__(self, queue, job_id, ops):
123
    if not ops:
124
      # TODO
125
      raise Exception("No opcodes")
126

    
127
    self.queue = queue
128
    self.id = job_id
129
    self.ops = [_QueuedOpCode(op) for op in ops]
130
    self.run_op_index = -1
131

    
132
  @classmethod
133
  def Restore(cls, queue, state):
134
    obj = _QueuedJob.__new__(cls)
135
    obj.queue = queue
136
    obj.id = state["id"]
137
    obj.ops = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
138
    obj.run_op_index = state["run_op_index"]
139
    return obj
140

    
141
  def Serialize(self):
142
    return {
143
      "id": self.id,
144
      "ops": [op.Serialize() for op in self.ops],
145
      "run_op_index": self.run_op_index,
146
      }
147

    
148
  def CalcStatus(self):
149
    status = constants.JOB_STATUS_QUEUED
150

    
151
    all_success = True
152
    for op in self.ops:
153
      if op.status == constants.OP_STATUS_SUCCESS:
154
        continue
155

    
156
      all_success = False
157

    
158
      if op.status == constants.OP_STATUS_QUEUED:
159
        pass
160
      elif op.status == constants.OP_STATUS_RUNNING:
161
        status = constants.JOB_STATUS_RUNNING
162
      elif op.status == constants.OP_STATUS_ERROR:
163
        status = constants.JOB_STATUS_ERROR
164
        # The whole job fails if one opcode failed
165
        break
166
      elif op.status == constants.OP_STATUS_CANCELED:
167
        status = constants.OP_STATUS_CANCELED
168
        break
169

    
170
    if all_success:
171
      status = constants.JOB_STATUS_SUCCESS
172

    
173
    return status
174

    
175

    
176
class _JobQueueWorker(workerpool.BaseWorker):
177
  def RunTask(self, job):
178
    """Job executor.
179

180
    This functions processes a job.
181

182
    """
183
    logging.debug("Worker %s processing job %s",
184
                  self.worker_id, job.id)
185
    proc = mcpu.Processor(self.pool.queue.context)
186
    queue = job.queue
187
    try:
188
      try:
189
        count = len(job.ops)
190
        for idx, op in enumerate(job.ops):
191
          try:
192
            logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
193

    
194
            queue.acquire()
195
            try:
196
              job.run_op_index = idx
197
              op.status = constants.OP_STATUS_RUNNING
198
              op.result = None
199
              queue.UpdateJobUnlocked(job)
200

    
201
              input_opcode = op.input
202
            finally:
203
              queue.release()
204

    
205
            result = proc.ExecOpCode(input_opcode, op.Log)
206

    
207
            queue.acquire()
208
            try:
209
              op.status = constants.OP_STATUS_SUCCESS
210
              op.result = result
211
              queue.UpdateJobUnlocked(job)
212
            finally:
213
              queue.release()
214

    
215
            logging.debug("Op %s/%s: Successfully finished %s",
216
                          idx + 1, count, op)
217
          except Exception, err:
218
            queue.acquire()
219
            try:
220
              try:
221
                op.status = constants.OP_STATUS_ERROR
222
                op.result = str(err)
223
                logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
224
              finally:
225
                queue.UpdateJobUnlocked(job)
226
            finally:
227
              queue.release()
228
            raise
229

    
230
      except errors.GenericError, err:
231
        logging.exception("Ganeti exception")
232
      except:
233
        logging.exception("Unhandled exception")
234
    finally:
235
      queue.acquire()
236
      try:
237
        job_id = job.id
238
        status = job.CalcStatus()
239
      finally:
240
        queue.release()
241
      logging.debug("Worker %s finished job %s, status = %s",
242
                    self.worker_id, job_id, status)
243

    
244

    
245
class _JobQueueWorkerPool(workerpool.WorkerPool):
246
  def __init__(self, queue):
247
    super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
248
                                              _JobQueueWorker)
249
    self.queue = queue
250

    
251

    
252
class JobQueue(object):
253
  _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
254

    
255
  def _RequireOpenQueue(fn):
256
    """Decorator for "public" functions.
257

258
    This function should be used for all "public" functions. That is, functions
259
    usually called from other classes.
260

261
    Important: Use this decorator only after utils.LockedMethod!
262

263
    Example:
264
      @utils.LockedMethod
265
      @_RequireOpenQueue
266
      def Example(self):
267
        pass
268

269
    """
270
    def wrapper(self, *args, **kwargs):
271
      assert self._queue_lock is not None, "Queue should be open"
272
      return fn(self, *args, **kwargs)
273
    return wrapper
274

    
275
  def __init__(self, context):
276
    self.context = context
277
    self._memcache = {}
278
    self._my_hostname = utils.HostInfo().name
279

    
280
    # Locking
281
    self._lock = threading.Lock()
282
    self.acquire = self._lock.acquire
283
    self.release = self._lock.release
284

    
285
    # Initialize
286
    self._queue_lock = jstore.InitAndVerifyQueue(exclusive=True)
287

    
288
    # Read serial file
289
    self._last_serial = jstore.ReadSerial()
290
    assert self._last_serial is not None, ("Serial file was modified between"
291
                                           " check in jstore and here")
292

    
293
    # Get initial list of nodes
294
    self._nodes = set(self.context.cfg.GetNodeList())
295

    
296
    # Remove master node
297
    try:
298
      self._nodes.remove(self._my_hostname)
299
    except ValueError:
300
      pass
301

    
302
    # TODO: Check consistency across nodes
303

    
304
    # Setup worker pool
305
    self._wpool = _JobQueueWorkerPool(self)
306

    
307
    # We need to lock here because WorkerPool.AddTask() may start a job while
308
    # we're still doing our work.
309
    self.acquire()
310
    try:
311
      for job in self._GetJobsUnlocked(None):
312
        status = job.CalcStatus()
313

    
314
        if status in (constants.JOB_STATUS_QUEUED, ):
315
          self._wpool.AddTask(job)
316

    
317
        elif status in (constants.JOB_STATUS_RUNNING, ):
318
          logging.warning("Unfinished job %s found: %s", job.id, job)
319
          try:
320
            for op in job.ops:
321
              op.status = constants.OP_STATUS_ERROR
322
              op.result = "Unclean master daemon shutdown"
323
          finally:
324
            self.UpdateJobUnlocked(job)
325
    finally:
326
      self.release()
327

    
328
  def _WriteAndReplicateFileUnlocked(self, file_name, data):
329
    """Writes a file locally and then replicates it to all nodes.
330

331
    """
332
    utils.WriteFile(file_name, data=data)
333

    
334
    nodes = self._nodes[:]
335

    
336
    # Remove master node
337
    try:
338
      nodes.remove(self._my_hostname)
339
    except ValueError:
340
      pass
341

    
342
  def _WriteAndReplicateFileUnlocked(self, file_name, data):
343
    """Writes a file locally and then replicates it to all nodes.
344

345
    """
346
    utils.WriteFile(file_name, data=data)
347

    
348
    failed_nodes = 0
349
    result = rpc.call_upload_file(self._nodes, file_name)
350
    for node in self._nodes:
351
      if not result[node]:
352
        failed_nodes += 1
353
        logging.error("Copy of job queue file to node %s failed", node)
354

    
355
    # TODO: check failed_nodes
356

    
357
  def _FormatJobID(self, job_id):
358
    if not isinstance(job_id, (int, long)):
359
      raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
360
    if job_id < 0:
361
      raise errors.ProgrammerError("Job ID %s is negative" % job_id)
362

    
363
    return str(job_id)
364

    
365
  def _NewSerialUnlocked(self):
366
    """Generates a new job identifier.
367

368
    Job identifiers are unique during the lifetime of a cluster.
369

370
    Returns: A string representing the job identifier.
371

372
    """
373
    # New number
374
    serial = self._last_serial + 1
375

    
376
    # Write to file
377
    self._WriteAndReplicateFileUnlocked(constants.JOB_QUEUE_SERIAL_FILE,
378
                                        "%s\n" % serial)
379

    
380
    # Keep it only if we were able to write the file
381
    self._last_serial = serial
382

    
383
    return self._FormatJobID(serial)
384

    
385
  @staticmethod
386
  def _GetJobPath(job_id):
387
    return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)
388

    
389
  @staticmethod
390
  def _GetArchivedJobPath(job_id):
391
    return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)
392

    
393
  @classmethod
394
  def _ExtractJobID(cls, name):
395
    m = cls._RE_JOB_FILE.match(name)
396
    if m:
397
      return m.group(1)
398
    else:
399
      return None
400

    
401
  def _GetJobIDsUnlocked(self, archived=False):
402
    """Return all known job IDs.
403

404
    If the parameter archived is True, archived jobs IDs will be
405
    included. Currently this argument is unused.
406

407
    The method only looks at disk because it's a requirement that all
408
    jobs are present on disk (so in the _memcache we don't have any
409
    extra IDs).
410

411
    """
412
    jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
413
    jlist.sort()
414
    return jlist
415

    
416
  def _ListJobFiles(self):
417
    return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
418
            if self._RE_JOB_FILE.match(name)]
419

    
420
  def _LoadJobUnlocked(self, job_id):
421
    if job_id in self._memcache:
422
      logging.debug("Found job %s in memcache", job_id)
423
      return self._memcache[job_id]
424

    
425
    filepath = self._GetJobPath(job_id)
426
    logging.debug("Loading job from %s", filepath)
427
    try:
428
      fd = open(filepath, "r")
429
    except IOError, err:
430
      if err.errno in (errno.ENOENT, ):
431
        return None
432
      raise
433
    try:
434
      data = serializer.LoadJson(fd.read())
435
    finally:
436
      fd.close()
437

    
438
    job = _QueuedJob.Restore(self, data)
439
    self._memcache[job_id] = job
440
    logging.debug("Added job %s to the cache", job_id)
441
    return job
442

    
443
  def _GetJobsUnlocked(self, job_ids):
444
    if not job_ids:
445
      job_ids = self._GetJobIDsUnlocked()
446

    
447
    return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
448

    
449
  @utils.LockedMethod
450
  @_RequireOpenQueue
451
  def SubmitJob(self, ops):
452
    """Create and store a new job.
453

454
    This enters the job into our job queue and also puts it on the new
455
    queue, in order for it to be picked up by the queue processors.
456

457
    @type ops: list
458
    @param ops: The list of OpCodes that will become the new job.
459

460
    """
461
    # Get job identifier
462
    job_id = self._NewSerialUnlocked()
463
    job = _QueuedJob(self, job_id, ops)
464

    
465
    # Write to disk
466
    self.UpdateJobUnlocked(job)
467

    
468
    logging.debug("Added new job %s to the cache", job_id)
469
    self._memcache[job_id] = job
470

    
471
    # Add to worker pool
472
    self._wpool.AddTask(job)
473

    
474
    return job.id
475

    
476
  @_RequireOpenQueue
477
  def UpdateJobUnlocked(self, job):
478
    filename = self._GetJobPath(job.id)
479
    data = serializer.DumpJson(job.Serialize(), indent=False)
480
    logging.debug("Writing job %s to %s", job.id, filename)
481
    self._WriteAndReplicateFileUnlocked(filename, data)
482
    self._CleanCacheUnlocked([job.id])
483

    
484
  def _CleanCacheUnlocked(self, exclude):
485
    """Clean the memory cache.
486

487
    The exceptions argument contains job IDs that should not be
488
    cleaned.
489

490
    """
491
    assert isinstance(exclude, list)
492

    
493
    for job in self._memcache.values():
494
      if job.id in exclude:
495
        continue
496
      if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,
497
                                  constants.JOB_STATUS_RUNNING):
498
        logging.debug("Cleaning job %s from the cache", job.id)
499
        try:
500
          del self._memcache[job.id]
501
        except KeyError:
502
          pass
503

    
504
  @utils.LockedMethod
505
  @_RequireOpenQueue
506
  def CancelJob(self, job_id):
507
    """Cancels a job.
508

509
    @type job_id: string
510
    @param job_id: Job ID of job to be cancelled.
511

512
    """
513
    logging.debug("Cancelling job %s", job_id)
514

    
515
    job = self._LoadJobUnlocked(job_id)
516
    if not job:
517
      logging.debug("Job %s not found", job_id)
518
      return
519

    
520
    if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,):
521
      logging.debug("Job %s is no longer in the queue", job.id)
522
      return
523

    
524
    try:
525
      for op in job.ops:
526
        op.status = constants.OP_STATUS_ERROR
527
        op.result = "Job cancelled by request"
528
    finally:
529
      self.UpdateJobUnlocked(job)
530

    
531
  @utils.LockedMethod
532
  @_RequireOpenQueue
533
  def ArchiveJob(self, job_id):
534
    """Archives a job.
535

536
    @type job_id: string
537
    @param job_id: Job ID of job to be archived.
538

539
    """
540
    logging.debug("Archiving job %s", job_id)
541

    
542
    job = self._LoadJobUnlocked(job_id)
543
    if not job:
544
      logging.debug("Job %s not found", job_id)
545
      return
546

    
547
    if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED,
548
                                constants.JOB_STATUS_SUCCESS,
549
                                constants.JOB_STATUS_ERROR):
550
      logging.debug("Job %s is not yet done", job.id)
551
      return
552

    
553
    try:
554
      old = self._GetJobPath(job.id)
555
      new = self._GetArchivedJobPath(job.id)
556

    
557
      os.rename(old, new)
558

    
559
      logging.debug("Successfully archived job %s", job.id)
560
    finally:
561
      # Cleaning the cache because we don't know what os.rename actually did
562
      # and to be on the safe side.
563
      self._CleanCacheUnlocked([])
564

    
565
  def _GetJobInfoUnlocked(self, job, fields):
566
    row = []
567
    for fname in fields:
568
      if fname == "id":
569
        row.append(job.id)
570
      elif fname == "status":
571
        row.append(job.CalcStatus())
572
      elif fname == "ops":
573
        row.append([op.input.__getstate__() for op in job.ops])
574
      elif fname == "opresult":
575
        row.append([op.result for op in job.ops])
576
      elif fname == "opstatus":
577
        row.append([op.status for op in job.ops])
578
      elif fname == "ticker":
579
        ji = job.run_op_index
580
        if ji < 0:
581
          lmsg = None
582
        else:
583
          lmsg = job.ops[ji].RetrieveLog(-1)
584
          # message might be empty here
585
          if lmsg:
586
            lmsg = lmsg[0]
587
          else:
588
            lmsg = None
589
        row.append(lmsg)
590
      else:
591
        raise errors.OpExecError("Invalid job query field '%s'" % fname)
592
    return row
593

    
594
  @utils.LockedMethod
595
  @_RequireOpenQueue
596
  def QueryJobs(self, job_ids, fields):
597
    """Returns a list of jobs in queue.
598

599
    Args:
600
    - job_ids: Sequence of job identifiers or None for all
601
    - fields: Names of fields to return
602

603
    """
604
    jobs = []
605

    
606
    for job in self._GetJobsUnlocked(job_ids):
607
      if job is None:
608
        jobs.append(None)
609
      else:
610
        jobs.append(self._GetJobInfoUnlocked(job, fields))
611

    
612
    return jobs
613

    
614
  @utils.LockedMethod
615
  @_RequireOpenQueue
616
  def Shutdown(self):
617
    """Stops the job queue.
618

619
    """
620
    self._wpool.TerminateWorkers()
621

    
622
    self._queue_lock.Close()
623
    self._queue_lock = None