Statistics
| Branch: | Tag: | Revision:

root / lib / locking.py @ 6605411d

History | View | Annotate | Download (29.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Module implementing the Ganeti locking code."""
22

    
23
# pylint: disable-msg=W0613,W0201
24

    
25
import threading
26
# Wouldn't it be better to define LockingError in the locking module?
27
# Well, for now that's how the rest of the code does it...
28
from ganeti import errors
29
from ganeti import utils
30

    
31

    
32
def ssynchronized(lock, shared=0):
33
  """Shared Synchronization decorator.
34

35
  Calls the function holding the given lock, either in exclusive or shared
36
  mode. It requires the passed lock to be a SharedLock (or support its
37
  semantics).
38

39
  """
40
  def wrap(fn):
41
    def sync_function(*args, **kwargs):
42
      lock.acquire(shared=shared)
43
      try:
44
        return fn(*args, **kwargs)
45
      finally:
46
        lock.release()
47
    return sync_function
48
  return wrap
49

    
50

    
51
class SharedLock:
52
  """Implements a shared lock.
53

54
  Multiple threads can acquire the lock in a shared way, calling
55
  acquire_shared().  In order to acquire the lock in an exclusive way threads
56
  can call acquire_exclusive().
57

58
  The lock prevents starvation but does not guarantee that threads will acquire
59
  the shared lock in the order they queued for it, just that they will
60
  eventually do so.
61

62
  """
63
  def __init__(self):
64
    """Construct a new SharedLock"""
65
    # we have two conditions, c_shr and c_exc, sharing the same lock.
66
    self.__lock = threading.Lock()
67
    self.__turn_shr = threading.Condition(self.__lock)
68
    self.__turn_exc = threading.Condition(self.__lock)
69

    
70
    # current lock holders
71
    self.__shr = set()
72
    self.__exc = None
73

    
74
    # lock waiters
75
    self.__nwait_exc = 0
76
    self.__nwait_shr = 0
77
    self.__npass_shr = 0
78

    
79
    # is this lock in the deleted state?
80
    self.__deleted = False
81

    
82
  def __is_sharer(self):
83
    """Is the current thread sharing the lock at this time?"""
84
    return threading.currentThread() in self.__shr
85

    
86
  def __is_exclusive(self):
87
    """Is the current thread holding the lock exclusively at this time?"""
88
    return threading.currentThread() == self.__exc
89

    
90
  def __is_owned(self, shared=-1):
91
    """Is the current thread somehow owning the lock at this time?
92

93
    This is a private version of the function, which presumes you're holding
94
    the internal lock.
95

96
    """
97
    if shared < 0:
98
      return self.__is_sharer() or self.__is_exclusive()
99
    elif shared:
100
      return self.__is_sharer()
101
    else:
102
      return self.__is_exclusive()
103

    
104
  def _is_owned(self, shared=-1):
105
    """Is the current thread somehow owning the lock at this time?
106

107
    Args:
108
      shared:
109
        < 0: check for any type of ownership (default)
110
        0: check for exclusive ownership
111
        > 0: check for shared ownership
112

113
    """
114
    self.__lock.acquire()
115
    try:
116
      result = self.__is_owned(shared=shared)
117
    finally:
118
      self.__lock.release()
119

    
120
    return result
121

    
122
  def __wait(self, c):
123
    """Wait on the given condition, and raise an exception if the current lock
124
    is declared deleted in the meantime.
125

126
    Args:
127
      c: condition to wait on
128

129
    """
130
    c.wait()
131
    if self.__deleted:
132
      raise errors.LockError('deleted lock')
133

    
134
  def __exclusive_acquire(self):
135
    """Acquire the lock exclusively.
136

137
    This is a private function that presumes you are already holding the
138
    internal lock. It's defined separately to avoid code duplication between
139
    acquire() and delete()
140

141
    """
142
    self.__nwait_exc += 1
143
    try:
144
      # This is to save ourselves from a nasty race condition that could
145
      # theoretically make the sharers starve.
146
      if self.__nwait_shr > 0 or self.__nwait_exc > 1:
147
        self.__wait(self.__turn_exc)
148

    
149
      while len(self.__shr) > 0 or self.__exc is not None:
150
        self.__wait(self.__turn_exc)
151

    
152
      self.__exc = threading.currentThread()
153
    finally:
154
      self.__nwait_exc -= 1
155

    
156
    assert self.__npass_shr == 0, "SharedLock: internal fairness violation"
157

    
158
  def acquire(self, blocking=1, shared=0):
159
    """Acquire a shared lock.
160

161
    Args:
162
      shared: whether to acquire in shared mode. By default an exclusive lock
163
              will be acquired.
164
      blocking: whether to block while trying to acquire or to operate in
165
                try-lock mode. this locking mode is not supported yet.
166

167
    """
168
    if not blocking:
169
      # We don't have non-blocking mode for now
170
      raise NotImplementedError
171

    
172
    self.__lock.acquire()
173
    try:
174
      if self.__deleted:
175
        raise errors.LockError('deleted lock')
176

    
177
      # We cannot acquire the lock if we already have it
178
      assert not self.__is_owned(), "double acquire() on a non-recursive lock"
179
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
180

    
181
      if shared:
182
        self.__nwait_shr += 1
183
        try:
184
          wait = False
185
          # If there is an exclusive holder waiting we have to wait.  We'll
186
          # only do this once, though, when we start waiting for the lock. Then
187
          # we'll just wait while there are no exclusive holders.
188
          if self.__nwait_exc > 0:
189
            # TODO: if !blocking...
190
            wait = True
191
            self.__wait(self.__turn_shr)
192

    
193
          while self.__exc is not None:
194
            wait = True
195
            # TODO: if !blocking...
196
            self.__wait(self.__turn_shr)
197

    
198
          self.__shr.add(threading.currentThread())
199

    
200
          # If we were waiting note that we passed
201
          if wait:
202
            self.__npass_shr -= 1
203

    
204
        finally:
205
          self.__nwait_shr -= 1
206

    
207
        assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
208
      else:
209
        # TODO: if !blocking...
210
        # (or modify __exclusive_acquire for non-blocking mode)
211
        self.__exclusive_acquire()
212

    
213
    finally:
214
      self.__lock.release()
215

    
216
    return True
217

    
218
  def release(self):
219
    """Release a Shared Lock.
220

221
    You must have acquired the lock, either in shared or in exclusive mode,
222
    before calling this function.
223

224
    """
225
    self.__lock.acquire()
226
    try:
227
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
228
      # Autodetect release type
229
      if self.__is_exclusive():
230
        self.__exc = None
231

    
232
        # An exclusive holder has just had the lock, time to put it in shared
233
        # mode if there are shared holders waiting. Otherwise wake up the next
234
        # exclusive holder.
235
        if self.__nwait_shr > 0:
236
          # Make sure at least the ones which were blocked pass.
237
          self.__npass_shr = self.__nwait_shr
238
          self.__turn_shr.notifyAll()
239
        elif self.__nwait_exc > 0:
240
          self.__turn_exc.notify()
241

    
242
      elif self.__is_sharer():
243
        self.__shr.remove(threading.currentThread())
244

    
245
        # If there are shared holders waiting (and not just scheduled to pass)
246
        # there *must* be an exclusive holder waiting as well; otherwise what
247
        # were they waiting for?
248
        assert (self.__nwait_exc > 0 or self.__npass_shr == self.__nwait_shr), \
249
               "Lock sharers waiting while no exclusive is queueing"
250

    
251
        # If there are no more shared holders either in or scheduled to pass,
252
        # and some exclusive holders are waiting let's wake one up.
253
        if (len(self.__shr) == 0 and
254
            self.__nwait_exc > 0 and
255
            not self.__npass_shr > 0):
256
          self.__turn_exc.notify()
257

    
258
      else:
259
        assert False, "Cannot release non-owned lock"
260

    
261
    finally:
262
      self.__lock.release()
263

    
264
  def delete(self, blocking=1):
265
    """Delete a Shared Lock.
266

267
    This operation will declare the lock for removal. First the lock will be
268
    acquired in exclusive mode if you don't already own it, then the lock
269
    will be put in a state where any future and pending acquire() fail.
270

271
    Args:
272
      blocking: whether to block while trying to acquire or to operate in
273
                try-lock mode.  this locking mode is not supported yet unless
274
                you are already holding exclusively the lock.
275

276
    """
277
    self.__lock.acquire()
278
    try:
279
      assert not self.__is_sharer(), "cannot delete() a lock while sharing it"
280

    
281
      if self.__deleted:
282
        raise errors.LockError('deleted lock')
283

    
284
      if not self.__is_exclusive():
285
        if not blocking:
286
          # We don't have non-blocking mode for now
287
          raise NotImplementedError
288
        self.__exclusive_acquire()
289

    
290
      self.__deleted = True
291
      self.__exc = None
292
      # Wake up everybody, they will fail acquiring the lock and
293
      # raise an exception instead.
294
      self.__turn_exc.notifyAll()
295
      self.__turn_shr.notifyAll()
296

    
297
    finally:
298
      self.__lock.release()
299

    
300

    
301
# Whenever we want to acquire a full LockSet we pass None as the value to acquire.
302
# Hide this behing this nicely named constant.
303
ALL_SET = None
304

    
305

    
306
class LockSet:
307
  """Implements a set of locks.
308

309
  This abstraction implements a set of shared locks for the same resource type,
310
  distinguished by name. The user can lock a subset of the resources and the
311
  LockSet will take care of acquiring the locks always in the same order, thus
312
  preventing deadlock.
313

314
  All the locks needed in the same set must be acquired together, though.
315

316
  """
317
  def __init__(self, members=None):
318
    """Constructs a new LockSet.
319

320
    Args:
321
      members: initial members of the set
322

323
    """
324
    # Used internally to guarantee coherency.
325
    self.__lock = SharedLock()
326

    
327
    # The lockdict indexes the relationship name -> lock
328
    # The order-of-locking is implied by the alphabetical order of names
329
    self.__lockdict = {}
330

    
331
    if members is not None:
332
      for name in members:
333
        self.__lockdict[name] = SharedLock()
334

    
335
    # The owner dict contains the set of locks each thread owns. For
336
    # performance each thread can access its own key without a global lock on
337
    # this structure. It is paramount though that *no* other type of access is
338
    # done to this structure (eg. no looping over its keys). *_owner helper
339
    # function are defined to guarantee access is correct, but in general never
340
    # do anything different than __owners[threading.currentThread()], or there
341
    # will be trouble.
342
    self.__owners = {}
343

    
344
  def _is_owned(self):
345
    """Is the current thread a current level owner?"""
346
    return threading.currentThread() in self.__owners
347

    
348
  def _add_owned(self, name=None):
349
    """Note the current thread owns the given lock"""
350
    if name is None:
351
      if not self._is_owned():
352
        self.__owners[threading.currentThread()] = set()
353
    else:
354
      if self._is_owned():
355
        self.__owners[threading.currentThread()].add(name)
356
      else:
357
        self.__owners[threading.currentThread()] = set([name])
358

    
359

    
360
  def _del_owned(self, name=None):
361
    """Note the current thread owns the given lock"""
362

    
363
    if name is not None:
364
      self.__owners[threading.currentThread()].remove(name)
365

    
366
    # Only remove the key if we don't hold the set-lock as well
367
    if (not self.__lock._is_owned() and
368
        not self.__owners[threading.currentThread()]):
369
      del self.__owners[threading.currentThread()]
370

    
371
  def _list_owned(self):
372
    """Get the set of resource names owned by the current thread"""
373
    if self._is_owned():
374
      return self.__owners[threading.currentThread()].copy()
375
    else:
376
      return set()
377

    
378
  def __names(self):
379
    """Return the current set of names.
380

381
    Only call this function while holding __lock and don't iterate on the
382
    result after releasing the lock.
383

384
    """
385
    return self.__lockdict.keys()
386

    
387
  def _names(self):
388
    """Return a copy of the current set of elements.
389

390
    Used only for debugging purposes.
391

392
    """
393
    # If we don't already own the set-level lock acquired
394
    # we'll get it and note we need to release it later.
395
    release_lock = False
396
    if not self.__lock._is_owned():
397
      release_lock = True
398
      self.__lock.acquire(shared=1)
399
    try:
400
      result = self.__names()
401
    finally:
402
      if release_lock:
403
        self.__lock.release()
404
    return set(result)
405

    
406
  def acquire(self, names, blocking=1, shared=0):
407
    """Acquire a set of resource locks.
408

409
    Args:
410
      names: the names of the locks which shall be acquired.
411
             (special lock names, or instance/node names)
412
      shared: whether to acquire in shared mode. By default an exclusive lock
413
              will be acquired.
414
      blocking: whether to block while trying to acquire or to operate in
415
                try-lock mode.  this locking mode is not supported yet.
416

417
    Returns:
418
      True: when all the locks are successfully acquired
419

420
    Raises:
421
      errors.LockError: when any lock we try to acquire has been deleted
422
      before we succeed. In this case none of the locks requested will be
423
      acquired.
424

425
    """
426
    if not blocking:
427
      # We don't have non-blocking mode for now
428
      raise NotImplementedError
429

    
430
    # Check we don't already own locks at this level
431
    assert not self._is_owned(), "Cannot acquire locks in the same set twice"
432

    
433
    if names is None:
434
      # If no names are given acquire the whole set by not letting new names
435
      # being added before we release, and getting the current list of names.
436
      # Some of them may then be deleted later, but we'll cope with this.
437
      #
438
      # We'd like to acquire this lock in a shared way, as it's nice if
439
      # everybody else can use the instances at the same time. If are acquiring
440
      # them exclusively though they won't be able to do this anyway, though,
441
      # so we'll get the list lock exclusively as well in order to be able to
442
      # do add() on the set while owning it.
443
      self.__lock.acquire(shared=shared)
444
      try:
445
        # note we own the set-lock
446
        self._add_owned()
447
        names = self.__names()
448
      except:
449
        # We shouldn't have problems adding the lock to the owners list, but
450
        # if we did we'll try to release this lock and re-raise exception.
451
        # Of course something is going to be really wrong, after this.
452
        self.__lock.release()
453
        raise
454

    
455
    try:
456
      # Support passing in a single resource to acquire rather than many
457
      if isinstance(names, basestring):
458
        names = [names]
459
      else:
460
        names.sort()
461

    
462
      acquire_list = []
463
      # First we look the locks up on __lockdict. We have no way of being sure
464
      # they will still be there after, but this makes it a lot faster should
465
      # just one of them be the already wrong
466
      for lname in utils.UniqueSequence(names):
467
        try:
468
          lock = self.__lockdict[lname] # raises KeyError if lock is not there
469
          acquire_list.append((lname, lock))
470
        except (KeyError):
471
          if self.__lock._is_owned():
472
            # We are acquiring all the set, it doesn't matter if this particular
473
            # element is not there anymore.
474
            continue
475
          else:
476
            raise errors.LockError('non-existing lock in set (%s)' % lname)
477

    
478
      # This will hold the locknames we effectively acquired.
479
      acquired = set()
480
      # Now acquire_list contains a sorted list of resources and locks we want.
481
      # In order to get them we loop on this (private) list and acquire() them.
482
      # We gave no real guarantee they will still exist till this is done but
483
      # .acquire() itself is safe and will alert us if the lock gets deleted.
484
      for (lname, lock) in acquire_list:
485
        try:
486
          lock.acquire(shared=shared) # raises LockError if the lock is deleted
487
          # now the lock cannot be deleted, we have it!
488
          self._add_owned(name=lname)
489
          acquired.add(lname)
490
        except (errors.LockError):
491
          if self.__lock._is_owned():
492
            # We are acquiring all the set, it doesn't matter if this particular
493
            # element is not there anymore.
494
            continue
495
          else:
496
            name_fail = lname
497
            for lname in self._list_owned():
498
              self.__lockdict[lname].release()
499
              self._del_owned(name=lname)
500
            raise errors.LockError('non-existing lock in set (%s)' % name_fail)
501
        except:
502
          # We shouldn't have problems adding the lock to the owners list, but
503
          # if we did we'll try to release this lock and re-raise exception.
504
          # Of course something is going to be really wrong, after this.
505
          if lock._is_owned():
506
            lock.release()
507
            raise
508

    
509
    except:
510
      # If something went wrong and we had the set-lock let's release it...
511
      if self.__lock._is_owned():
512
        self.__lock.release()
513
      raise
514

    
515
    return acquired
516

    
517
  def release(self, names=None):
518
    """Release a set of resource locks, at the same level.
519

520
    You must have acquired the locks, either in shared or in exclusive mode,
521
    before releasing them.
522

523
    Args:
524
      names: the names of the locks which shall be released.
525
             (defaults to all the locks acquired at that level).
526

527
    """
528
    assert self._is_owned(), "release() on lock set while not owner"
529

    
530
    # Support passing in a single resource to release rather than many
531
    if isinstance(names, basestring):
532
      names = [names]
533

    
534
    if names is None:
535
      names = self._list_owned()
536
    else:
537
      names = set(names)
538
      assert self._list_owned().issuperset(names), (
539
               "release() on unheld resources %s" %
540
               names.difference(self._list_owned()))
541

    
542
    # First of all let's release the "all elements" lock, if set.
543
    # After this 'add' can work again
544
    if self.__lock._is_owned():
545
      self.__lock.release()
546
      self._del_owned()
547

    
548
    for lockname in names:
549
      # If we are sure the lock doesn't leave __lockdict without being
550
      # exclusively held we can do this...
551
      self.__lockdict[lockname].release()
552
      self._del_owned(name=lockname)
553

    
554
  def add(self, names, acquired=0, shared=0):
555
    """Add a new set of elements to the set
556

557
    Args:
558
      names: names of the new elements to add
559
      acquired: pre-acquire the new resource?
560
      shared: is the pre-acquisition shared?
561

562
    """
563
    # Check we don't already own locks at this level
564
    assert not self._is_owned() or self.__lock._is_owned(shared=0), \
565
      "Cannot add locks if the set is only partially owned, or shared"
566

    
567
    # Support passing in a single resource to add rather than many
568
    if isinstance(names, basestring):
569
      names = [names]
570

    
571
    # If we don't already own the set-level lock acquired in an exclusive way
572
    # we'll get it and note we need to release it later.
573
    release_lock = False
574
    if not self.__lock._is_owned():
575
      release_lock = True
576
      self.__lock.acquire()
577

    
578
    try:
579
      invalid_names = set(self.__names()).intersection(names)
580
      if invalid_names:
581
        # This must be an explicit raise, not an assert, because assert is
582
        # turned off when using optimization, and this can happen because of
583
        # concurrency even if the user doesn't want it.
584
        raise errors.LockError("duplicate add() (%s)" % invalid_names)
585

    
586
      for lockname in names:
587
        lock = SharedLock()
588

    
589
        if acquired:
590
          lock.acquire(shared=shared)
591
          # now the lock cannot be deleted, we have it!
592
          try:
593
            self._add_owned(name=lockname)
594
          except:
595
            # We shouldn't have problems adding the lock to the owners list,
596
            # but if we did we'll try to release this lock and re-raise
597
            # exception.  Of course something is going to be really wrong,
598
            # after this.  On the other hand the lock hasn't been added to the
599
            # __lockdict yet so no other threads should be pending on it. This
600
            # release is just a safety measure.
601
            lock.release()
602
            raise
603

    
604
        self.__lockdict[lockname] = lock
605

    
606
    finally:
607
      # Only release __lock if we were not holding it previously.
608
      if release_lock:
609
        self.__lock.release()
610

    
611
    return True
612

    
613
  def remove(self, names, blocking=1):
614
    """Remove elements from the lock set.
615

616
    You can either not hold anything in the lockset or already hold a superset
617
    of the elements you want to delete, exclusively.
618

619
    Args:
620
      names: names of the resource to remove.
621
      blocking: whether to block while trying to acquire or to operate in
622
                try-lock mode.  this locking mode is not supported yet unless
623
                you are already holding exclusively the locks.
624

625
    Returns:
626
      A list of lock which we removed. The list is always equal to the names
627
      list if we were holding all the locks exclusively.
628

629
    """
630
    if not blocking and not self._is_owned():
631
      # We don't have non-blocking mode for now
632
      raise NotImplementedError
633

    
634
    # Support passing in a single resource to remove rather than many
635
    if isinstance(names, basestring):
636
      names = [names]
637

    
638
    # If we own any subset of this lock it must be a superset of what we want
639
    # to delete. The ownership must also be exclusive, but that will be checked
640
    # by the lock itself.
641
    assert not self._is_owned() or self._list_owned().issuperset(names), (
642
      "remove() on acquired lockset while not owning all elements")
643

    
644
    removed = []
645

    
646
    for lname in names:
647
      # Calling delete() acquires the lock exclusively if we don't already own
648
      # it, and causes all pending and subsequent lock acquires to fail. It's
649
      # fine to call it out of order because delete() also implies release(),
650
      # and the assertion above guarantees that if we either already hold
651
      # everything we want to delete, or we hold none.
652
      try:
653
        self.__lockdict[lname].delete()
654
        removed.append(lname)
655
      except (KeyError, errors.LockError):
656
        # This cannot happen if we were already holding it, verify:
657
        assert not self._is_owned(), "remove failed while holding lockset"
658
      else:
659
        # If no LockError was raised we are the ones who deleted the lock.
660
        # This means we can safely remove it from lockdict, as any further or
661
        # pending delete() or acquire() will fail (and nobody can have the lock
662
        # since before our call to delete()).
663
        #
664
        # This is done in an else clause because if the exception was thrown
665
        # it's the job of the one who actually deleted it.
666
        del self.__lockdict[lname]
667
        # And let's remove it from our private list if we owned it.
668
        if self._is_owned():
669
          self._del_owned(name=lname)
670

    
671
    return removed
672

    
673

    
674
# Locking levels, must be acquired in increasing order.
675
# Current rules are:
676
#   - at level LEVEL_CLUSTER resides the Big Ganeti Lock (BGL) which must be
677
#   acquired before performing any operation, either in shared or in exclusive
678
#   mode. acquiring the BGL in exclusive mode is discouraged and should be
679
#   avoided.
680
#   - at levels LEVEL_NODE and LEVEL_INSTANCE reside node and instance locks.
681
#   If you need more than one node, or more than one instance, acquire them at
682
#   the same time.
683
LEVEL_CLUSTER = 0
684
LEVEL_INSTANCE = 1
685
LEVEL_NODE = 2
686

    
687
LEVELS = [LEVEL_CLUSTER,
688
          LEVEL_INSTANCE,
689
          LEVEL_NODE]
690

    
691
# Lock levels which are modifiable
692
LEVELS_MOD = [LEVEL_NODE, LEVEL_INSTANCE]
693

    
694
# Constant for the big ganeti lock
695
BGL = 'BGL'
696

    
697

    
698
class GanetiLockManager:
699
  """The Ganeti Locking Library
700

701
  The purpouse of this small library is to manage locking for ganeti clusters
702
  in a central place, while at the same time doing dynamic checks against
703
  possible deadlocks. It will also make it easier to transition to a different
704
  lock type should we migrate away from python threads.
705

706
  """
707
  _instance = None
708

    
709
  def __init__(self, nodes=None, instances=None):
710
    """Constructs a new GanetiLockManager object.
711

712
    There should be only a GanetiLockManager object at any time, so this
713
    function raises an error if this is not the case.
714

715
    Args:
716
      nodes: list of node names
717
      instances: list of instance names
718

719
    """
720
    assert self.__class__._instance is None, "double GanetiLockManager instance"
721
    self.__class__._instance = self
722

    
723
    # The keyring contains all the locks, at their level and in the correct
724
    # locking order.
725
    self.__keyring = {
726
      LEVEL_CLUSTER: LockSet([BGL]),
727
      LEVEL_NODE: LockSet(nodes),
728
      LEVEL_INSTANCE: LockSet(instances),
729
    }
730

    
731
  def _names(self, level):
732
    """List the lock names at the given level.
733
    Used for debugging/testing purposes.
734

735
    Args:
736
      level: the level whose list of locks to get
737

738
    """
739
    assert level in LEVELS, "Invalid locking level %s" % level
740
    return self.__keyring[level]._names()
741

    
742
  def _is_owned(self, level):
743
    """Check whether we are owning locks at the given level
744

745
    """
746
    return self.__keyring[level]._is_owned()
747

    
748
  is_owned = _is_owned
749

    
750
  def _list_owned(self, level):
751
    """Get the set of owned locks at the given level
752

753
    """
754
    return self.__keyring[level]._list_owned()
755

    
756
  def _upper_owned(self, level):
757
    """Check that we don't own any lock at a level greater than the given one.
758

759
    """
760
    # This way of checking only works if LEVELS[i] = i, which we check for in
761
    # the test cases.
762
    return utils.any((self._is_owned(l) for l in LEVELS[level + 1:]))
763

    
764
  def _BGL_owned(self):
765
    """Check if the current thread owns the BGL.
766

767
    Both an exclusive or a shared acquisition work.
768

769
    """
770
    return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
771

    
772
  def _contains_BGL(self, level, names):
773
    """Check if acting on the given level and set of names will change the
774
    status of the Big Ganeti Lock.
775

776
    """
777
    return level == LEVEL_CLUSTER and (names is None or BGL in names)
778

    
779
  def acquire(self, level, names, blocking=1, shared=0):
780
    """Acquire a set of resource locks, at the same level.
781

782
    Args:
783
      level: the level at which the locks shall be acquired.
784
             It must be a memmber of LEVELS.
785
      names: the names of the locks which shall be acquired.
786
             (special lock names, or instance/node names)
787
      shared: whether to acquire in shared mode. By default an exclusive lock
788
              will be acquired.
789
      blocking: whether to block while trying to acquire or to operate in
790
                try-lock mode.  this locking mode is not supported yet.
791

792
    """
793
    assert level in LEVELS, "Invalid locking level %s" % level
794

    
795
    # Check that we are either acquiring the Big Ganeti Lock or we already own
796
    # it. Some "legacy" opcodes need to be sure they are run non-concurrently
797
    # so even if we've migrated we need to at least share the BGL to be
798
    # compatible with them. Of course if we own the BGL exclusively there's no
799
    # point in acquiring any other lock, unless perhaps we are half way through
800
    # the migration of the current opcode.
801
    assert (self._contains_BGL(level, names) or self._BGL_owned()), (
802
            "You must own the Big Ganeti Lock before acquiring any other")
803

    
804
    # Check we don't own locks at the same or upper levels.
805
    assert not self._upper_owned(level), ("Cannot acquire locks at a level"
806
           " while owning some at a greater one")
807

    
808
    # Acquire the locks in the set.
809
    return self.__keyring[level].acquire(names, shared=shared,
810
                                         blocking=blocking)
811

    
812
  def release(self, level, names=None):
813
    """Release a set of resource locks, at the same level.
814

815
    You must have acquired the locks, either in shared or in exclusive mode,
816
    before releasing them.
817

818
    Args:
819
      level: the level at which the locks shall be released.
820
             It must be a memmber of LEVELS.
821
      names: the names of the locks which shall be released.
822
             (defaults to all the locks acquired at that level).
823

824
    """
825
    assert level in LEVELS, "Invalid locking level %s" % level
826
    assert (not self._contains_BGL(level, names) or
827
            not self._upper_owned(LEVEL_CLUSTER)), (
828
            "Cannot release the Big Ganeti Lock while holding something"
829
            " at upper levels")
830

    
831
    # Release will complain if we don't own the locks already
832
    return self.__keyring[level].release(names)
833

    
834
  def add(self, level, names, acquired=0, shared=0):
835
    """Add locks at the specified level.
836

837
    Args:
838
      level: the level at which the locks shall be added.
839
             It must be a memmber of LEVELS_MOD.
840
      names: names of the locks to acquire
841
      acquired: whether to acquire the newly added locks
842
      shared: whether the acquisition will be shared
843
    """
844
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
845
    assert self._BGL_owned(), ("You must own the BGL before performing other"
846
           " operations")
847
    assert not self._upper_owned(level), ("Cannot add locks at a level"
848
           " while owning some at a greater one")
849
    return self.__keyring[level].add(names, acquired=acquired, shared=shared)
850

    
851
  def remove(self, level, names, blocking=1):
852
    """Remove locks from the specified level.
853

854
    You must either already own the locks you are trying to remove exclusively
855
    or not own any lock at an upper level.
856

857
    Args:
858
      level: the level at which the locks shall be removed.
859
             It must be a memmber of LEVELS_MOD.
860
      names: the names of the locks which shall be removed.
861
             (special lock names, or instance/node names)
862
      blocking: whether to block while trying to operate in try-lock mode.
863
                this locking mode is not supported yet.
864

865
    """
866
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
867
    assert self._BGL_owned(), ("You must own the BGL before performing other"
868
           " operations")
869
    # Check we either own the level or don't own anything from here up.
870
    # LockSet.remove() will check the case in which we don't own all the needed
871
    # resources, or we have a shared ownership.
872
    assert self._is_owned(level) or not self._upper_owned(level), (
873
           "Cannot remove locks at a level while not owning it or"
874
           " owning some at a greater one")
875
    return self.__keyring[level].remove(names, blocking=blocking)