Revision e9a81214 lib/mcpu.py

b/lib/mcpu.py
48 48
_OP_PREFIX = "Op"
49 49
_LU_PREFIX = "LU"
50 50

  
51
#: LU classes which don't need to acquire the node allocation lock
52
#: (L{locking.NAL}) when they acquire all node or node resource locks
53
_NODE_ALLOC_WHITELIST = frozenset()
54

  
55
#: LU classes which don't need to acquire the node allocation lock
56
#: (L{locking.NAL}) in the same mode (shared/exclusive) as the node
57
#: or node resource locks
58
_NODE_ALLOC_MODE_WHITELIST = frozenset([
59
  cmdlib.LUBackupExport,
60
  cmdlib.LUBackupRemove,
61
  cmdlib.LUOobCommand,
62
  ])
63

  
51 64

  
52 65
class LockAcquireTimeout(Exception):
53 66
  """Exception to report timeouts on acquiring locks.
......
246 259
              for (node, rpc_res) in rpc_results.items())
247 260

  
248 261

  
262
def _VerifyLocks(lu, glm, _mode_whitelist=_NODE_ALLOC_MODE_WHITELIST,
263
                 _nal_whitelist=_NODE_ALLOC_WHITELIST):
264
  """Performs consistency checks on locks acquired by a logical unit.
265

  
266
  @type lu: L{cmdlib.LogicalUnit}
267
  @param lu: Logical unit instance
268
  @type glm: L{locking.GanetiLockManager}
269
  @param glm: Lock manager
270

  
271
  """
272
  if not __debug__:
273
    return
274

  
275
  have_nal = glm.check_owned(locking.LEVEL_NODE_ALLOC, locking.NAL)
276

  
277
  for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
278
    # TODO: Verify using actual lock mode, not using LU variables
279
    if level in lu.needed_locks:
280
      share_node_alloc = lu.share_locks[locking.LEVEL_NODE_ALLOC]
281
      share_level = lu.share_locks[level]
282

  
283
      if lu.__class__ in _mode_whitelist:
284
        assert share_node_alloc != share_level, \
285
          "LU is whitelisted to use different modes for node allocation lock"
286
      else:
287
        assert bool(share_node_alloc) == bool(share_level), \
288
          ("Node allocation lock must be acquired using the same mode as nodes"
289
           " and node resources")
290

  
291
      if lu.__class__ in _nal_whitelist:
292
        assert not have_nal, \
293
          "LU is whitelisted for not acquiring the node allocation lock"
294
      elif lu.needed_locks[level] == locking.ALL_SET or glm.owning_all(level):
295
        assert have_nal, \
296
          ("Node allocation lock must be used if an LU acquires all nodes"
297
           " or node resources")
298

  
299

  
249 300
class Processor(object):
250 301
  """Object which runs OpCodes"""
251 302
  DISPATCH_TABLE = _ComputeDispatchTable()
......
356 407
    given LU and its opcodes.
357 408

  
358 409
    """
410
    glm = self.context.glm
359 411
    adding_locks = level in lu.add_locks
360 412
    acquiring_locks = level in lu.needed_locks
413

  
361 414
    if level not in locking.LEVELS:
415
      _VerifyLocks(lu, glm)
416

  
362 417
      if self._cbs:
363 418
        self._cbs.NotifyStart()
364 419

  
......
405 460
          lu.remove_locks[level] = add_locks
406 461

  
407 462
          try:
408
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
463
            glm.add(level, add_locks, acquired=1, shared=share)
409 464
          except errors.LockError:
410 465
            logging.exception("Detected lock error in level %s for locks"
411 466
                              " %s, shared=%s", level, add_locks, share)
......
418 473
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
419 474
        finally:
420 475
          if level in lu.remove_locks:
421
            self.context.glm.remove(level, lu.remove_locks[level])
476
            glm.remove(level, lu.remove_locks[level])
422 477
      finally:
423
        if self.context.glm.is_owned(level):
424
          self.context.glm.release(level)
478
        if glm.is_owned(level):
479
          glm.release(level)
425 480

  
426 481
    else:
427 482
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)

Also available in: Unified diff