jqueue: Implement {Add,Remove}Node
[ganeti-local] / lib / jqueue.py
index e8a2bd5..a969478 100644 (file)
@@ -35,6 +35,8 @@ from ganeti import opcodes
 from ganeti import errors
 from ganeti import mcpu
 from ganeti import utils
+from ganeti import jstore
+from ganeti import rpc
 
 
 JOBQUEUE_THREADS = 5
@@ -49,81 +51,66 @@ class _QueuedOpCode(object):
   of the form (timestamp, level, message).
 
   """
-  def __init__(self, op):
-    self.__Setup(op, constants.OP_STATUS_QUEUED, None, [])
+  def __new__(cls, *args, **kwargs):
+    obj = object.__new__(cls, *args, **kwargs)
+    # Create a special lock for logging
+    obj._log_lock = threading.Lock()
+    return obj
 
-  def __Setup(self, input_, status, result, log):
-    self._lock = threading.Lock()
-    self.input = input_
-    self.status = status
-    self.result = result
-    self.log = log
+  def __init__(self, op):
+    self.input = op
+    self.status = constants.OP_STATUS_QUEUED
+    self.result = None
+    self.log = []
 
   @classmethod
   def Restore(cls, state):
-    obj = object.__new__(cls)
-    obj.__Setup(opcodes.OpCode.LoadOpCode(state["input"]),
-                state["status"], state["result"], state["log"])
+    obj = _QueuedOpCode.__new__(cls)
+    obj.input = opcodes.OpCode.LoadOpCode(state["input"])
+    obj.status = state["status"]
+    obj.result = state["result"]
+    obj.log = state["log"]
     return obj
 
-  @utils.LockedMethod
   def Serialize(self):
-    return {
-      "input": self.input.__getstate__(),
-      "status": self.status,
-      "result": self.result,
-      "log": self.log,
-      }
-
-  @utils.LockedMethod
-  def GetInput(self):
-    """Returns the original opcode.
-
-    """
-    return self.input
-
-  @utils.LockedMethod
-  def SetStatus(self, status, result):
-    """Update the opcode status and result.
-
-    """
-    self.status = status
-    self.result = result
-
-  @utils.LockedMethod
-  def GetStatus(self):
-    """Get the opcode status.
-
-    """
-    return self.status
-
-  @utils.LockedMethod
-  def GetResult(self):
-    """Get the opcode result.
-
-    """
-    return self.result
+    self._log_lock.acquire()
+    try:
+      return {
+        "input": self.input.__getstate__(),
+        "status": self.status,
+        "result": self.result,
+        "log": self.log,
+        }
+    finally:
+      self._log_lock.release()
 
-  @utils.LockedMethod
   def Log(self, *args):
     """Append a log entry.
 
     """
-    assert len(args) < 2
+    assert len(args) < 3
 
     if len(args) == 1:
       log_type = constants.ELOG_MESSAGE
       log_msg = args[0]
     else:
       log_type, log_msg = args
-    self.log.append((time.time(), log_type, log_msg))
 
-  @utils.LockedMethod
+    self._log_lock.acquire()
+    try:
+      self.log.append((time.time(), log_type, log_msg))
+    finally:
+      self._log_lock.release()
+
   def RetrieveLog(self, start_at=0):
     """Retrieve (a part of) the execution log.
 
     """
-    return self.log[start_at:]
+    self._log_lock.acquire()
+    try:
+      return self.log[start_at:]
+    finally:
+      self._log_lock.release()
 
 
 class _QueuedJob(object):
@@ -132,203 +119,263 @@ class _QueuedJob(object):
   This is what we use to track the user-submitted jobs.
 
   """
-  def __init__(self, storage, job_id, ops):
+  def __init__(self, queue, job_id, ops):
     if not ops:
       # TODO
       raise Exception("No opcodes")
 
-    self.__Setup(storage, job_id, [_QueuedOpCode(op) for op in ops], -1)
-
-  def __Setup(self, storage, job_id, ops, run_op_index):
-    self._lock = threading.Lock()
-    self.storage = storage
+    self.queue = queue
     self.id = job_id
-    self._ops = ops
-    self.run_op_index = run_op_index
+    self.ops = [_QueuedOpCode(op) for op in ops]
+    self.run_op_index = -1
 
   @classmethod
-  def Restore(cls, storage, state):
-    obj = object.__new__(cls)
-    op_list = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
-    obj.__Setup(storage, state["id"], op_list, state["run_op_index"])
+  def Restore(cls, queue, state):
+    obj = _QueuedJob.__new__(cls)
+    obj.queue = queue
+    obj.id = state["id"]
+    obj.ops = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
+    obj.run_op_index = state["run_op_index"]
     return obj
 
   def Serialize(self):
     return {
       "id": self.id,
-      "ops": [op.Serialize() for op in self._ops],
+      "ops": [op.Serialize() for op in self.ops],
       "run_op_index": self.run_op_index,
       }
 
-  def SetUnclean(self, msg):
-    try:
-      for op in self._ops:
-        op.SetStatus(constants.OP_STATUS_ERROR, msg)
-    finally:
-      self.storage.UpdateJob(self)
-
-  def GetStatus(self):
+  def CalcStatus(self):
     status = constants.JOB_STATUS_QUEUED
 
     all_success = True
-    for op in self._ops:
-      op_status = op.GetStatus()
-      if op_status == constants.OP_STATUS_SUCCESS:
+    for op in self.ops:
+      if op.status == constants.OP_STATUS_SUCCESS:
         continue
 
       all_success = False
 
-      if op_status == constants.OP_STATUS_QUEUED:
+      if op.status == constants.OP_STATUS_QUEUED:
         pass
-      elif op_status == constants.OP_STATUS_RUNNING:
+      elif op.status == constants.OP_STATUS_RUNNING:
         status = constants.JOB_STATUS_RUNNING
-      elif op_status == constants.OP_STATUS_ERROR:
+      elif op.status == constants.OP_STATUS_ERROR:
         status = constants.JOB_STATUS_ERROR
         # The whole job fails if one opcode failed
         break
+      elif op.status == constants.OP_STATUS_CANCELED:
+        status = constants.OP_STATUS_CANCELED
+        break
 
     if all_success:
       status = constants.JOB_STATUS_SUCCESS
 
     return status
 
-  @utils.LockedMethod
-  def GetRunOpIndex(self):
-    return self.run_op_index
 
-  def Run(self, proc):
+class _JobQueueWorker(workerpool.BaseWorker):
+  def RunTask(self, job):
     """Job executor.
 
-    This functions processes a this job in the context of given processor
-    instance.
-
-    Args:
-    - proc: Ganeti Processor to run the job with
+    This functions processes a job.
 
     """
-    try:
-      count = len(self._ops)
-      for idx, op in enumerate(self._ops):
-        try:
-          logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
-
-          self._lock.acquire()
-          try:
-            self.run_op_index = idx
-          finally:
-            self._lock.release()
-
-          op.SetStatus(constants.OP_STATUS_RUNNING, None)
-          self.storage.UpdateJob(self)
-
-          result = proc.ExecOpCode(op.input, op.Log)
-
-          op.SetStatus(constants.OP_STATUS_SUCCESS, result)
-          self.storage.UpdateJob(self)
-          logging.debug("Op %s/%s: Successfully finished %s",
-                        idx + 1, count, op)
-        except Exception, err:
-          try:
-            op.SetStatus(constants.OP_STATUS_ERROR, str(err))
-            logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
-          finally:
-            self.storage.UpdateJob(self)
-          raise
-
-    except errors.GenericError, err:
-      logging.error("ganeti exception %s", exc_info=err)
-    except Exception, err:
-      logging.error("unhandled exception %s", exc_info=err)
-    except:
-      logging.error("unhandled unknown exception %s", exc_info=err)
-
-
-class _JobQueueWorker(workerpool.BaseWorker):
-  def RunTask(self, job):
     logging.debug("Worker %s processing job %s",
                   self.worker_id, job.id)
-    # TODO: feedback function
-    proc = mcpu.Processor(self.pool.context)
+    proc = mcpu.Processor(self.pool.queue.context)
+    queue = job.queue
     try:
-      job.Run(proc)
+      try:
+        count = len(job.ops)
+        for idx, op in enumerate(job.ops):
+          try:
+            logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
+
+            queue.acquire()
+            try:
+              job.run_op_index = idx
+              op.status = constants.OP_STATUS_RUNNING
+              op.result = None
+              queue.UpdateJobUnlocked(job)
+
+              input_opcode = op.input
+            finally:
+              queue.release()
+
+            result = proc.ExecOpCode(input_opcode, op.Log)
+
+            queue.acquire()
+            try:
+              op.status = constants.OP_STATUS_SUCCESS
+              op.result = result
+              queue.UpdateJobUnlocked(job)
+            finally:
+              queue.release()
+
+            logging.debug("Op %s/%s: Successfully finished %s",
+                          idx + 1, count, op)
+          except Exception, err:
+            queue.acquire()
+            try:
+              try:
+                op.status = constants.OP_STATUS_ERROR
+                op.result = str(err)
+                logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
+              finally:
+                queue.UpdateJobUnlocked(job)
+            finally:
+              queue.release()
+            raise
+
+      except errors.GenericError, err:
+        logging.exception("Ganeti exception")
+      except:
+        logging.exception("Unhandled exception")
     finally:
+      queue.acquire()
+      try:
+        job_id = job.id
+        status = job.CalcStatus()
+      finally:
+        queue.release()
       logging.debug("Worker %s finished job %s, status = %s",
-                    self.worker_id, job.id, job.GetStatus())
+                    self.worker_id, job_id, status)
 
 
 class _JobQueueWorkerPool(workerpool.WorkerPool):
-  def __init__(self, context):
+  def __init__(self, queue):
     super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
                                               _JobQueueWorker)
-    self.context = context
+    self.queue = queue
 
 
-class JobStorage(object):
+class JobQueue(object):
   _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
 
-  def __init__(self):
-    self._lock = threading.Lock()
+  def _RequireOpenQueue(fn):
+    """Decorator for "public" functions.
+
+    This function should be used for all "public" functions. That is, functions
+    usually called from other classes.
+
+    Important: Use this decorator only after utils.LockedMethod!
+
+    Example:
+      @utils.LockedMethod
+      @_RequireOpenQueue
+      def Example(self):
+        pass
+
+    """
+    def wrapper(self, *args, **kwargs):
+      assert self._queue_lock is not None, "Queue should be open"
+      return fn(self, *args, **kwargs)
+    return wrapper
+
+  def __init__(self, context):
+    self.context = context
     self._memcache = {}
+    self._my_hostname = utils.HostInfo().name
 
-    # Make sure our directory exists
-    try:
-      os.mkdir(constants.QUEUE_DIR, 0700)
-    except OSError, err:
-      if err.errno not in (errno.EEXIST, ):
-        raise
+    # Locking
+    self._lock = threading.Lock()
+    self.acquire = self._lock.acquire
+    self.release = self._lock.release
 
-    # Get queue lock
-    self.lock_fd = open(constants.JOB_QUEUE_LOCK_FILE, "w")
-    try:
-      utils.LockFile(self.lock_fd)
-    except:
-      self.lock_fd.close()
-      raise
+    # Initialize
+    self._queue_lock = jstore.InitAndVerifyQueue(exclusive=True)
 
-    # Read version
+    # Read serial file
+    self._last_serial = jstore.ReadSerial()
+    assert self._last_serial is not None, ("Serial file was modified between"
+                                           " check in jstore and here")
+
+    # Get initial list of nodes
+    self._nodes = set(self.context.cfg.GetNodeList())
+
+    # Remove master node
     try:
-      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
-    except IOError, err:
-      if err.errno not in (errno.ENOENT, ):
-        raise
+      self._nodes.remove(self._my_hostname)
+    except ValueError:
+      pass
 
-      # Setup a new queue
-      self._InitQueueUnlocked()
+    # TODO: Check consistency across nodes
 
-      # Try to open again
-      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
+    # Setup worker pool
+    self._wpool = _JobQueueWorkerPool(self)
 
+    # We need to lock here because WorkerPool.AddTask() may start a job while
+    # we're still doing our work.
+    self.acquire()
     try:
-      # Try to read version
-      version = int(version_fd.read(128))
+      for job in self._GetJobsUnlocked(None):
+        status = job.CalcStatus()
+
+        if status in (constants.JOB_STATUS_QUEUED, ):
+          self._wpool.AddTask(job)
 
-      # Verify version
-      if version != constants.JOB_QUEUE_VERSION:
-        raise errors.JobQueueError("Found version %s, expected %s",
-                                   version, constants.JOB_QUEUE_VERSION)
+        elif status in (constants.JOB_STATUS_RUNNING, ):
+          logging.warning("Unfinished job %s found: %s", job.id, job)
+          try:
+            for op in job.ops:
+              op.status = constants.OP_STATUS_ERROR
+              op.result = "Unclean master daemon shutdown"
+          finally:
+            self.UpdateJobUnlocked(job)
     finally:
-      version_fd.close()
+      self.release()
+
+  @utils.LockedMethod
+  @_RequireOpenQueue
+  def AddNode(self, node_name):
+    assert node_name != self._my_hostname
+
+    # TODO: Clean queue directory on added node
+
+    # Upload the whole queue excluding archived jobs
+    files = [self._GetJobPath(job_id) for job_id in self._GetJobIDsUnlocked()]
+
+    # Upload current serial file
+    files.append(constants.JOB_QUEUE_SERIAL_FILE)
+
+    for file_name in files:
+      result = rpc.call_upload_file([node_name], file_name)
+      if not result[node_name]:
+        logging.error("Failed to upload %s to %s", file_name, node_name)
+
+    self._nodes.add(node_name)
 
-    serial_fd = open(constants.JOB_QUEUE_SERIAL_FILE, "r")
+  @utils.LockedMethod
+  @_RequireOpenQueue
+  def RemoveNode(self, node_name):
     try:
-      # Read last serial
-      self._last_serial = int(serial_fd.read(1024).strip())
-    finally:
-      serial_fd.close()
+      # The queue is removed by the "leave node" RPC call.
+      self._nodes.remove(node_name)
+    except KeyError:
+      pass
+
+  def _WriteAndReplicateFileUnlocked(self, file_name, data):
+    """Writes a file locally and then replicates it to all nodes.
+
+    """
+    utils.WriteFile(file_name, data=data)
 
-  def Close(self):
-    assert self.lock_fd, "Queue should be open"
+    failed_nodes = 0
+    result = rpc.call_upload_file(self._nodes, file_name)
+    for node in self._nodes:
+      if not result[node]:
+        failed_nodes += 1
+        logging.error("Copy of job queue file to node %s failed", node)
 
-    self.lock_fd.close()
-    self.lock_fd = None
+    # TODO: check failed_nodes
 
-  def _InitQueueUnlocked(self):
-    assert self.lock_fd, "Queue should be open"
+  def _FormatJobID(self, job_id):
+    if not isinstance(job_id, (int, long)):
+      raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
+    if job_id < 0:
+      raise errors.ProgrammerError("Job ID %s is negative" % job_id)
 
-    utils.WriteFile(constants.JOB_QUEUE_VERSION_FILE,
-                    data="%s\n" % constants.JOB_QUEUE_VERSION)
-    utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
-                    data="%s\n" % 0)
+    return str(job_id)
 
   def _NewSerialUnlocked(self):
     """Generates a new job identifier.
@@ -338,23 +385,34 @@ class JobStorage(object):
     Returns: A string representing the job identifier.
 
     """
-    assert self.lock_fd, "Queue should be open"
-
     # New number
     serial = self._last_serial + 1
 
     # Write to file
-    utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
-                    data="%s\n" % serial)
+    self._WriteAndReplicateFileUnlocked(constants.JOB_QUEUE_SERIAL_FILE,
+                                        "%s\n" % serial)
 
     # Keep it only if we were able to write the file
     self._last_serial = serial
 
-    return serial
+    return self._FormatJobID(serial)
 
-  def _GetJobPath(self, job_id):
+  @staticmethod
+  def _GetJobPath(job_id):
     return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)
 
+  @staticmethod
+  def _GetArchivedJobPath(job_id):
+    return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)
+
+  @classmethod
+  def _ExtractJobID(cls, name):
+    m = cls._RE_JOB_FILE.match(name)
+    if m:
+      return m.group(1)
+    else:
+      return None
+
   def _GetJobIDsUnlocked(self, archived=False):
     """Return all known job IDs.
 
@@ -366,23 +424,17 @@ class JobStorage(object):
     extra IDs).
 
     """
-    jfiles = self._ListJobFiles()
-    jlist = [int(m.group(1)) for m in
-             [self._RE_JOB_FILE.match(name) for name in jfiles]]
+    jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
     jlist.sort()
     return jlist
 
   def _ListJobFiles(self):
-    assert self.lock_fd, "Queue should be open"
-
     return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
             if self._RE_JOB_FILE.match(name)]
 
   def _LoadJobUnlocked(self, job_id):
-    assert self.lock_fd, "Queue should be open"
-
     if job_id in self._memcache:
-      logging.debug("Found job %d in memcache", job_id)
+      logging.debug("Found job %s in memcache", job_id)
       return self._memcache[job_id]
 
     filepath = self._GetJobPath(job_id)
@@ -400,7 +452,7 @@ class JobStorage(object):
 
     job = _QueuedJob.Restore(self, data)
     self._memcache[job_id] = job
-    logging.debug("Added job %d to the cache", job_id)
+    logging.debug("Added job %s to the cache", job_id)
     return job
 
   def _GetJobsUnlocked(self, job_ids):
@@ -410,32 +462,38 @@ class JobStorage(object):
     return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
 
   @utils.LockedMethod
-  def GetJobs(self, job_ids):
-    return self._GetJobsUnlocked(job_ids)
+  @_RequireOpenQueue
+  def SubmitJob(self, ops):
+    """Create and store a new job.
 
-  @utils.LockedMethod
-  def AddJob(self, ops):
-    assert self.lock_fd, "Queue should be open"
+    This enters the job into our job queue and also puts it on the new
+    queue, in order for it to be picked up by the queue processors.
+
+    @type ops: list
+    @param ops: The list of OpCodes that will become the new job.
 
+    """
     # Get job identifier
     job_id = self._NewSerialUnlocked()
     job = _QueuedJob(self, job_id, ops)
 
     # Write to disk
-    self._UpdateJobUnlocked(job)
+    self.UpdateJobUnlocked(job)
 
-    logging.debug("Added new job %d to the cache", job_id)
+    logging.debug("Added new job %s to the cache", job_id)
     self._memcache[job_id] = job
 
-    return job
+    # Add to worker pool
+    self._wpool.AddTask(job)
 
-  def _UpdateJobUnlocked(self, job):
-    assert self.lock_fd, "Queue should be open"
+    return job.id
 
+  @_RequireOpenQueue
+  def UpdateJobUnlocked(self, job):
     filename = self._GetJobPath(job.id)
+    data = serializer.DumpJson(job.Serialize(), indent=False)
     logging.debug("Writing job %s to %s", job.id, filename)
-    utils.WriteFile(filename,
-                    data=serializer.DumpJson(job.Serialize(), indent=False))
+    self._WriteAndReplicateFileUnlocked(filename, data)
     self._CleanCacheUnlocked([job.id])
 
   def _CleanCacheUnlocked(self, exclude):
@@ -446,86 +504,98 @@ class JobStorage(object):
 
     """
     assert isinstance(exclude, list)
+
     for job in self._memcache.values():
       if job.id in exclude:
         continue
-      if job.GetStatus() not in (constants.JOB_STATUS_QUEUED,
-                                 constants.JOB_STATUS_RUNNING):
-        logging.debug("Cleaning job %d from the cache", job.id)
+      if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,
+                                  constants.JOB_STATUS_RUNNING):
+        logging.debug("Cleaning job %s from the cache", job.id)
         try:
           del self._memcache[job.id]
         except KeyError:
           pass
 
   @utils.LockedMethod
-  def UpdateJob(self, job):
-    return self._UpdateJobUnlocked(job)
-
-  def ArchiveJob(self, job_id):
-    raise NotImplementedError()
+  @_RequireOpenQueue
+  def CancelJob(self, job_id):
+    """Cancels a job.
 
+    @type job_id: string
+    @param job_id: Job ID of job to be cancelled.
 
-class JobQueue:
-  """The job queue.
+    """
+    logging.debug("Cancelling job %s", job_id)
 
-   """
-  def __init__(self, context):
-    self._lock = threading.Lock()
-    self._jobs = JobStorage()
-    self._wpool = _JobQueueWorkerPool(context)
+    job = self._LoadJobUnlocked(job_id)
+    if not job:
+      logging.debug("Job %s not found", job_id)
+      return
 
-    for job in self._jobs.GetJobs(None):
-      status = job.GetStatus()
-      if status in (constants.JOB_STATUS_QUEUED, ):
-        self._wpool.AddTask(job)
+    if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,):
+      logging.debug("Job %s is no longer in the queue", job.id)
+      return
 
-      elif status in (constants.JOB_STATUS_RUNNING, ):
-        logging.warning("Unfinished job %s found: %s", job.id, job)
-        job.SetUnclean("Unclean master daemon shutdown")
+    try:
+      for op in job.ops:
+        op.status = constants.OP_STATUS_ERROR
+        op.result = "Job cancelled by request"
+    finally:
+      self.UpdateJobUnlocked(job)
 
   @utils.LockedMethod
-  def SubmitJob(self, ops):
-    """Add a new job to the queue.
-
-    This enters the job into our job queue and also puts it on the new
-    queue, in order for it to be picked up by the queue processors.
+  @_RequireOpenQueue
+  def ArchiveJob(self, job_id):
+    """Archives a job.
 
-    Args:
-    - ops: Sequence of opcodes
+    @type job_id: string
+    @param job_id: Job ID of job to be archived.
 
     """
-    job = self._jobs.AddJob(ops)
+    logging.debug("Archiving job %s", job_id)
 
-    # Add to worker pool
-    self._wpool.AddTask(job)
+    job = self._LoadJobUnlocked(job_id)
+    if not job:
+      logging.debug("Job %s not found", job_id)
+      return
 
-    return job.id
+    if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED,
+                                constants.JOB_STATUS_SUCCESS,
+                                constants.JOB_STATUS_ERROR):
+      logging.debug("Job %s is not yet done", job.id)
+      return
 
-  def ArchiveJob(self, job_id):
-    raise NotImplementedError()
+    try:
+      old = self._GetJobPath(job.id)
+      new = self._GetArchivedJobPath(job.id)
 
-  def CancelJob(self, job_id):
-    raise NotImplementedError()
+      os.rename(old, new)
 
-  def _GetJobInfo(self, job, fields):
+      logging.debug("Successfully archived job %s", job.id)
+    finally:
+      # Cleaning the cache because we don't know what os.rename actually did
+      # and to be on the safe side.
+      self._CleanCacheUnlocked([])
+
+  def _GetJobInfoUnlocked(self, job, fields):
     row = []
     for fname in fields:
       if fname == "id":
         row.append(job.id)
       elif fname == "status":
-        row.append(job.GetStatus())
+        row.append(job.CalcStatus())
       elif fname == "ops":
-        row.append([op.GetInput().__getstate__() for op in job._ops])
+        row.append([op.input.__getstate__() for op in job.ops])
       elif fname == "opresult":
-        row.append([op.GetResult() for op in job._ops])
+        row.append([op.result for op in job.ops])
       elif fname == "opstatus":
-        row.append([op.GetStatus() for op in job._ops])
+        row.append([op.status for op in job.ops])
       elif fname == "ticker":
-        ji = job.GetRunOpIndex()
+        ji = job.run_op_index
         if ji < 0:
           lmsg = None
         else:
-          lmsg = job._ops[ji].RetrieveLog(-1)
+          lmsg = job.ops[ji].RetrieveLog(-1)
           # message might be empty here
           if lmsg:
             lmsg = lmsg[0]
@@ -536,6 +606,8 @@ class JobQueue:
         raise errors.OpExecError("Invalid job query field '%s'" % fname)
     return row
 
+  @utils.LockedMethod
+  @_RequireOpenQueue
   def QueryJobs(self, job_ids, fields):
     """Returns a list of jobs in queue.
 
@@ -544,24 +616,23 @@ class JobQueue:
     - fields: Names of fields to return
 
     """
-    self._lock.acquire()
-    try:
-      jobs = []
+    jobs = []
 
-      for job in self._jobs.GetJobs(job_ids):
-        if job is None:
-          jobs.append(None)
-        else:
-          jobs.append(self._GetJobInfo(job, fields))
+    for job in self._GetJobsUnlocked(job_ids):
+      if job is None:
+        jobs.append(None)
+      else:
+        jobs.append(self._GetJobInfoUnlocked(job, fields))
 
-      return jobs
-    finally:
-      self._lock.release()
+    return jobs
 
   @utils.LockedMethod
+  @_RequireOpenQueue
   def Shutdown(self):
     """Stops the job queue.
 
     """
     self._wpool.TerminateWorkers()
-    self._jobs.Close()
+
+    self._queue_lock.Close()
+    self._queue_lock = None