Statistics
| Branch: | Tag: | Revision:

root / lib / locking.py @ 9216a9f7

History | View | Annotate | Download (29.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Module implementing the Ganeti locking code."""
22

    
23
# pylint: disable-msg=W0613,W0201
24

    
25
import threading
26
# Wouldn't it be better to define LockingError in the locking module?
27
# Well, for now that's how the rest of the code does it...
28
from ganeti import errors
29
from ganeti import utils
30

    
31

    
32
def ssynchronized(lock, shared=0):
33
  """Shared Synchronization decorator.
34

35
  Calls the function holding the given lock, either in exclusive or shared
36
  mode. It requires the passed lock to be a SharedLock (or support its
37
  semantics).
38

39
  """
40
  def wrap(fn):
41
    def sync_function(*args, **kwargs):
42
      lock.acquire(shared=shared)
43
      try:
44
        return fn(*args, **kwargs)
45
      finally:
46
        lock.release()
47
    return sync_function
48
  return wrap
49

    
50

    
51
class SharedLock:
52
  """Implements a shared lock.
53

54
  Multiple threads can acquire the lock in a shared way, calling
55
  acquire_shared().  In order to acquire the lock in an exclusive way threads
56
  can call acquire_exclusive().
57

58
  The lock prevents starvation but does not guarantee that threads will acquire
59
  the shared lock in the order they queued for it, just that they will
60
  eventually do so.
61

62
  """
63
  def __init__(self):
64
    """Construct a new SharedLock"""
65
    # we have two conditions, c_shr and c_exc, sharing the same lock.
66
    self.__lock = threading.Lock()
67
    self.__turn_shr = threading.Condition(self.__lock)
68
    self.__turn_exc = threading.Condition(self.__lock)
69

    
70
    # current lock holders
71
    self.__shr = set()
72
    self.__exc = None
73

    
74
    # lock waiters
75
    self.__nwait_exc = 0
76
    self.__nwait_shr = 0
77
    self.__npass_shr = 0
78

    
79
    # is this lock in the deleted state?
80
    self.__deleted = False
81

    
82
  def __is_sharer(self):
83
    """Is the current thread sharing the lock at this time?"""
84
    return threading.currentThread() in self.__shr
85

    
86
  def __is_exclusive(self):
87
    """Is the current thread holding the lock exclusively at this time?"""
88
    return threading.currentThread() == self.__exc
89

    
90
  def __is_owned(self, shared=-1):
91
    """Is the current thread somehow owning the lock at this time?
92

93
    This is a private version of the function, which presumes you're holding
94
    the internal lock.
95

96
    """
97
    if shared < 0:
98
      return self.__is_sharer() or self.__is_exclusive()
99
    elif shared:
100
      return self.__is_sharer()
101
    else:
102
      return self.__is_exclusive()
103

    
104
  def _is_owned(self, shared=-1):
105
    """Is the current thread somehow owning the lock at this time?
106

107
    @param shared:
108
        - < 0: check for any type of ownership (default)
109
        - 0: check for exclusive ownership
110
        - > 0: check for shared ownership
111

112
    """
113
    self.__lock.acquire()
114
    try:
115
      result = self.__is_owned(shared=shared)
116
    finally:
117
      self.__lock.release()
118

    
119
    return result
120

    
121
  def __wait(self, c):
122
    """Wait on the given condition, and raise an exception if the current lock
123
    is declared deleted in the meantime.
124

125
    @param c: the condition to wait on
126

127
    """
128
    c.wait()
129
    if self.__deleted:
130
      raise errors.LockError('deleted lock')
131

    
132
  def __exclusive_acquire(self):
133
    """Acquire the lock exclusively.
134

135
    This is a private function that presumes you are already holding the
136
    internal lock. It's defined separately to avoid code duplication between
137
    acquire() and delete()
138

139
    """
140
    self.__nwait_exc += 1
141
    try:
142
      # This is to save ourselves from a nasty race condition that could
143
      # theoretically make the sharers starve.
144
      if self.__nwait_shr > 0 or self.__nwait_exc > 1:
145
        self.__wait(self.__turn_exc)
146

    
147
      while len(self.__shr) > 0 or self.__exc is not None:
148
        self.__wait(self.__turn_exc)
149

    
150
      self.__exc = threading.currentThread()
151
    finally:
152
      self.__nwait_exc -= 1
153

    
154
    assert self.__npass_shr == 0, "SharedLock: internal fairness violation"
155

    
156
  def __shared_acquire(self):
157
    """Acquire the lock in shared mode
158

159
    This is a private function that presumes you are already holding the
160
    internal lock.
161

162
    """
163
    self.__nwait_shr += 1
164
    try:
165
      wait = False
166
      # If there is an exclusive holder waiting we have to wait.
167
      # We'll only do this once, though, when we start waiting for
168
      # the lock. Then we'll just wait while there are no
169
      # exclusive holders.
170
      if self.__nwait_exc > 0:
171
        # TODO: if !blocking...
172
        wait = True
173
        self.__wait(self.__turn_shr)
174

    
175
      while self.__exc is not None:
176
        wait = True
177
        # TODO: if !blocking...
178
        self.__wait(self.__turn_shr)
179

    
180
      self.__shr.add(threading.currentThread())
181

    
182
      # If we were waiting note that we passed
183
      if wait:
184
        self.__npass_shr -= 1
185

    
186
    finally:
187
      self.__nwait_shr -= 1
188

    
189
    assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
190

    
191
  def acquire(self, blocking=1, shared=0):
192
    """Acquire a shared lock.
193

194
    @param shared: whether to acquire in shared mode; by default an
195
        exclusive lock will be acquired
196
    @param blocking: whether to block while trying to acquire or to
197
        operate in try-lock mode (this locking mode is not supported yet)
198

199
    """
200
    if not blocking:
201
      # We don't have non-blocking mode for now
202
      raise NotImplementedError
203

    
204
    self.__lock.acquire()
205
    try:
206
      if self.__deleted:
207
        raise errors.LockError('deleted lock')
208

    
209
      # We cannot acquire the lock if we already have it
210
      assert not self.__is_owned(), "double acquire() on a non-recursive lock"
211
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
212

    
213
      if shared:
214
        self.__shared_acquire()
215
      else:
216
        # TODO: if !blocking...
217
        # (or modify __exclusive_acquire for non-blocking mode)
218
        self.__exclusive_acquire()
219

    
220
    finally:
221
      self.__lock.release()
222

    
223
    return True
224

    
225
  def release(self):
226
    """Release a Shared Lock.
227

228
    You must have acquired the lock, either in shared or in exclusive mode,
229
    before calling this function.
230

231
    """
232
    self.__lock.acquire()
233
    try:
234
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
235
      # Autodetect release type
236
      if self.__is_exclusive():
237
        self.__exc = None
238

    
239
        # An exclusive holder has just had the lock, time to put it in shared
240
        # mode if there are shared holders waiting. Otherwise wake up the next
241
        # exclusive holder.
242
        if self.__nwait_shr > 0:
243
          # Make sure at least the ones which were blocked pass.
244
          self.__npass_shr = self.__nwait_shr
245
          self.__turn_shr.notifyAll()
246
        elif self.__nwait_exc > 0:
247
          self.__turn_exc.notify()
248

    
249
      elif self.__is_sharer():
250
        self.__shr.remove(threading.currentThread())
251

    
252
        # If there are shared holders waiting (and not just scheduled to pass)
253
        # there *must* be an exclusive holder waiting as well; otherwise what
254
        # were they waiting for?
255
        assert (self.__nwait_exc > 0 or
256
                self.__npass_shr == self.__nwait_shr), \
257
                "Lock sharers waiting while no exclusive is queueing"
258

    
259
        # If there are no more shared holders either in or scheduled to pass,
260
        # and some exclusive holders are waiting let's wake one up.
261
        if (len(self.__shr) == 0 and
262
            self.__nwait_exc > 0 and
263
            not self.__npass_shr > 0):
264
          self.__turn_exc.notify()
265

    
266
      else:
267
        assert False, "Cannot release non-owned lock"
268

    
269
    finally:
270
      self.__lock.release()
271

    
272
  def delete(self, blocking=1):
273
    """Delete a Shared Lock.
274

275
    This operation will declare the lock for removal. First the lock will be
276
    acquired in exclusive mode if you don't already own it, then the lock
277
    will be put in a state where any future and pending acquire() fail.
278

279
    @param blocking: whether to block while trying to acquire or to
280
        operate in try-lock mode.  this locking mode is not supported
281
        yet unless you are already holding exclusively the lock.
282

283
    """
284
    self.__lock.acquire()
285
    try:
286
      assert not self.__is_sharer(), "cannot delete() a lock while sharing it"
287

    
288
      if self.__deleted:
289
        raise errors.LockError('deleted lock')
290

    
291
      if not self.__is_exclusive():
292
        if not blocking:
293
          # We don't have non-blocking mode for now
294
          raise NotImplementedError
295
        self.__exclusive_acquire()
296

    
297
      self.__deleted = True
298
      self.__exc = None
299
      # Wake up everybody, they will fail acquiring the lock and
300
      # raise an exception instead.
301
      self.__turn_exc.notifyAll()
302
      self.__turn_shr.notifyAll()
303

    
304
    finally:
305
      self.__lock.release()
306

    
307

    
308
# Whenever we want to acquire a full LockSet we pass None as the value
309
# to acquire.  Hide this behind this nicely named constant.
310
ALL_SET = None
311

    
312

    
313
class LockSet:
314
  """Implements a set of locks.
315

316
  This abstraction implements a set of shared locks for the same resource type,
317
  distinguished by name. The user can lock a subset of the resources and the
318
  LockSet will take care of acquiring the locks always in the same order, thus
319
  preventing deadlock.
320

321
  All the locks needed in the same set must be acquired together, though.
322

323
  """
324
  def __init__(self, members=None):
325
    """Constructs a new LockSet.
326

327
    @param members: initial members of the set
328

329
    """
330
    # Used internally to guarantee coherency.
331
    self.__lock = SharedLock()
332

    
333
    # The lockdict indexes the relationship name -> lock
334
    # The order-of-locking is implied by the alphabetical order of names
335
    self.__lockdict = {}
336

    
337
    if members is not None:
338
      for name in members:
339
        self.__lockdict[name] = SharedLock()
340

    
341
    # The owner dict contains the set of locks each thread owns. For
342
    # performance each thread can access its own key without a global lock on
343
    # this structure. It is paramount though that *no* other type of access is
344
    # done to this structure (eg. no looping over its keys). *_owner helper
345
    # function are defined to guarantee access is correct, but in general never
346
    # do anything different than __owners[threading.currentThread()], or there
347
    # will be trouble.
348
    self.__owners = {}
349

    
350
  def _is_owned(self):
351
    """Is the current thread a current level owner?"""
352
    return threading.currentThread() in self.__owners
353

    
354
  def _add_owned(self, name=None):
355
    """Note the current thread owns the given lock"""
356
    if name is None:
357
      if not self._is_owned():
358
        self.__owners[threading.currentThread()] = set()
359
    else:
360
      if self._is_owned():
361
        self.__owners[threading.currentThread()].add(name)
362
      else:
363
        self.__owners[threading.currentThread()] = set([name])
364

    
365
  def _del_owned(self, name=None):
366
    """Note the current thread owns the given lock"""
367

    
368
    if name is not None:
369
      self.__owners[threading.currentThread()].remove(name)
370

    
371
    # Only remove the key if we don't hold the set-lock as well
372
    if (not self.__lock._is_owned() and
373
        not self.__owners[threading.currentThread()]):
374
      del self.__owners[threading.currentThread()]
375

    
376
  def _list_owned(self):
377
    """Get the set of resource names owned by the current thread"""
378
    if self._is_owned():
379
      return self.__owners[threading.currentThread()].copy()
380
    else:
381
      return set()
382

    
383
  def __names(self):
384
    """Return the current set of names.
385

386
    Only call this function while holding __lock and don't iterate on the
387
    result after releasing the lock.
388

389
    """
390
    return self.__lockdict.keys()
391

    
392
  def _names(self):
393
    """Return a copy of the current set of elements.
394

395
    Used only for debugging purposes.
396

397
    """
398
    # If we don't already own the set-level lock acquired
399
    # we'll get it and note we need to release it later.
400
    release_lock = False
401
    if not self.__lock._is_owned():
402
      release_lock = True
403
      self.__lock.acquire(shared=1)
404
    try:
405
      result = self.__names()
406
    finally:
407
      if release_lock:
408
        self.__lock.release()
409
    return set(result)
410

    
411
  def acquire(self, names, blocking=1, shared=0):
412
    """Acquire a set of resource locks.
413

414
    @param names: the names of the locks which shall be acquired
415
        (special lock names, or instance/node names)
416
    @param shared: whether to acquire in shared mode; by default an
417
        exclusive lock will be acquired
418
    @param blocking: whether to block while trying to acquire or to
419
        operate in try-lock mode (this locking mode is not supported yet)
420

421
    @return: True when all the locks are successfully acquired
422

423
    @raise errors.LockError: when any lock we try to acquire has
424
        been deleted before we succeed. In this case none of the
425
        locks requested will be acquired.
426

427
    """
428
    if not blocking:
429
      # We don't have non-blocking mode for now
430
      raise NotImplementedError
431

    
432
    # Check we don't already own locks at this level
433
    assert not self._is_owned(), "Cannot acquire locks in the same set twice"
434

    
435
    if names is None:
436
      # If no names are given acquire the whole set by not letting new names
437
      # being added before we release, and getting the current list of names.
438
      # Some of them may then be deleted later, but we'll cope with this.
439
      #
440
      # We'd like to acquire this lock in a shared way, as it's nice if
441
      # everybody else can use the instances at the same time. If are acquiring
442
      # them exclusively though they won't be able to do this anyway, though,
443
      # so we'll get the list lock exclusively as well in order to be able to
444
      # do add() on the set while owning it.
445
      self.__lock.acquire(shared=shared)
446
      try:
447
        # note we own the set-lock
448
        self._add_owned()
449
        names = self.__names()
450
      except:
451
        # We shouldn't have problems adding the lock to the owners list, but
452
        # if we did we'll try to release this lock and re-raise exception.
453
        # Of course something is going to be really wrong, after this.
454
        self.__lock.release()
455
        raise
456

    
457
    try:
458
      # Support passing in a single resource to acquire rather than many
459
      if isinstance(names, basestring):
460
        names = [names]
461
      else:
462
        names = sorted(names)
463

    
464
      acquire_list = []
465
      # First we look the locks up on __lockdict. We have no way of being sure
466
      # they will still be there after, but this makes it a lot faster should
467
      # just one of them be the already wrong
468
      for lname in utils.UniqueSequence(names):
469
        try:
470
          lock = self.__lockdict[lname] # raises KeyError if lock is not there
471
          acquire_list.append((lname, lock))
472
        except (KeyError):
473
          if self.__lock._is_owned():
474
            # We are acquiring all the set, it doesn't matter if this
475
            # particular element is not there anymore.
476
            continue
477
          else:
478
            raise errors.LockError('non-existing lock in set (%s)' % lname)
479

    
480
      # This will hold the locknames we effectively acquired.
481
      acquired = set()
482
      # Now acquire_list contains a sorted list of resources and locks we want.
483
      # In order to get them we loop on this (private) list and acquire() them.
484
      # We gave no real guarantee they will still exist till this is done but
485
      # .acquire() itself is safe and will alert us if the lock gets deleted.
486
      for (lname, lock) in acquire_list:
487
        try:
488
          lock.acquire(shared=shared) # raises LockError if the lock is deleted
489
          # now the lock cannot be deleted, we have it!
490
          self._add_owned(name=lname)
491
          acquired.add(lname)
492
        except (errors.LockError):
493
          if self.__lock._is_owned():
494
            # We are acquiring all the set, it doesn't matter if this
495
            # particular element is not there anymore.
496
            continue
497
          else:
498
            name_fail = lname
499
            for lname in self._list_owned():
500
              self.__lockdict[lname].release()
501
              self._del_owned(name=lname)
502
            raise errors.LockError('non-existing lock in set (%s)' % name_fail)
503
        except:
504
          # We shouldn't have problems adding the lock to the owners list, but
505
          # if we did we'll try to release this lock and re-raise exception.
506
          # Of course something is going to be really wrong, after this.
507
          if lock._is_owned():
508
            lock.release()
509
          raise
510

    
511
    except:
512
      # If something went wrong and we had the set-lock let's release it...
513
      if self.__lock._is_owned():
514
        self.__lock.release()
515
      raise
516

    
517
    return acquired
518

    
519
  def release(self, names=None):
520
    """Release a set of resource locks, at the same level.
521

522
    You must have acquired the locks, either in shared or in exclusive mode,
523
    before releasing them.
524

525
    @param names: the names of the locks which shall be released
526
        (defaults to all the locks acquired at that level).
527

528
    """
529
    assert self._is_owned(), "release() on lock set while not owner"
530

    
531
    # Support passing in a single resource to release rather than many
532
    if isinstance(names, basestring):
533
      names = [names]
534

    
535
    if names is None:
536
      names = self._list_owned()
537
    else:
538
      names = set(names)
539
      assert self._list_owned().issuperset(names), (
540
               "release() on unheld resources %s" %
541
               names.difference(self._list_owned()))
542

    
543
    # First of all let's release the "all elements" lock, if set.
544
    # After this 'add' can work again
545
    if self.__lock._is_owned():
546
      self.__lock.release()
547
      self._del_owned()
548

    
549
    for lockname in names:
550
      # If we are sure the lock doesn't leave __lockdict without being
551
      # exclusively held we can do this...
552
      self.__lockdict[lockname].release()
553
      self._del_owned(name=lockname)
554

    
555
  def add(self, names, acquired=0, shared=0):
556
    """Add a new set of elements to the set
557

558
    @param names: names of the new elements to add
559
    @param acquired: pre-acquire the new resource?
560
    @param shared: is the pre-acquisition shared?
561

562
    """
563
    # Check we don't already own locks at this level
564
    assert not self._is_owned() or self.__lock._is_owned(shared=0), \
565
      "Cannot add locks if the set is only partially owned, or shared"
566

    
567
    # Support passing in a single resource to add rather than many
568
    if isinstance(names, basestring):
569
      names = [names]
570

    
571
    # If we don't already own the set-level lock acquired in an exclusive way
572
    # we'll get it and note we need to release it later.
573
    release_lock = False
574
    if not self.__lock._is_owned():
575
      release_lock = True
576
      self.__lock.acquire()
577

    
578
    try:
579
      invalid_names = set(self.__names()).intersection(names)
580
      if invalid_names:
581
        # This must be an explicit raise, not an assert, because assert is
582
        # turned off when using optimization, and this can happen because of
583
        # concurrency even if the user doesn't want it.
584
        raise errors.LockError("duplicate add() (%s)" % invalid_names)
585

    
586
      for lockname in names:
587
        lock = SharedLock()
588

    
589
        if acquired:
590
          lock.acquire(shared=shared)
591
          # now the lock cannot be deleted, we have it!
592
          try:
593
            self._add_owned(name=lockname)
594
          except:
595
            # We shouldn't have problems adding the lock to the owners list,
596
            # but if we did we'll try to release this lock and re-raise
597
            # exception.  Of course something is going to be really wrong,
598
            # after this.  On the other hand the lock hasn't been added to the
599
            # __lockdict yet so no other threads should be pending on it. This
600
            # release is just a safety measure.
601
            lock.release()
602
            raise
603

    
604
        self.__lockdict[lockname] = lock
605

    
606
    finally:
607
      # Only release __lock if we were not holding it previously.
608
      if release_lock:
609
        self.__lock.release()
610

    
611
    return True
612

    
613
  def remove(self, names, blocking=1):
614
    """Remove elements from the lock set.
615

616
    You can either not hold anything in the lockset or already hold a superset
617
    of the elements you want to delete, exclusively.
618

619
    @param names: names of the resource to remove.
620
    @param blocking: whether to block while trying to acquire or to
621
        operate in try-lock mode (this locking mode is not supported
622
        yet unless you are already holding exclusively the locks)
623

624
    @return:: a list of locks which we removed; the list is always
625
        equal to the names list if we were holding all the locks
626
        exclusively
627

628
    """
629
    if not blocking and not self._is_owned():
630
      # We don't have non-blocking mode for now
631
      raise NotImplementedError
632

    
633
    # Support passing in a single resource to remove rather than many
634
    if isinstance(names, basestring):
635
      names = [names]
636

    
637
    # If we own any subset of this lock it must be a superset of what we want
638
    # to delete. The ownership must also be exclusive, but that will be checked
639
    # by the lock itself.
640
    assert not self._is_owned() or self._list_owned().issuperset(names), (
641
      "remove() on acquired lockset while not owning all elements")
642

    
643
    removed = []
644

    
645
    for lname in names:
646
      # Calling delete() acquires the lock exclusively if we don't already own
647
      # it, and causes all pending and subsequent lock acquires to fail. It's
648
      # fine to call it out of order because delete() also implies release(),
649
      # and the assertion above guarantees that if we either already hold
650
      # everything we want to delete, or we hold none.
651
      try:
652
        self.__lockdict[lname].delete()
653
        removed.append(lname)
654
      except (KeyError, errors.LockError):
655
        # This cannot happen if we were already holding it, verify:
656
        assert not self._is_owned(), "remove failed while holding lockset"
657
      else:
658
        # If no LockError was raised we are the ones who deleted the lock.
659
        # This means we can safely remove it from lockdict, as any further or
660
        # pending delete() or acquire() will fail (and nobody can have the lock
661
        # since before our call to delete()).
662
        #
663
        # This is done in an else clause because if the exception was thrown
664
        # it's the job of the one who actually deleted it.
665
        del self.__lockdict[lname]
666
        # And let's remove it from our private list if we owned it.
667
        if self._is_owned():
668
          self._del_owned(name=lname)
669

    
670
    return removed
671

    
672

    
673
# Locking levels, must be acquired in increasing order.
674
# Current rules are:
675
#   - at level LEVEL_CLUSTER resides the Big Ganeti Lock (BGL) which must be
676
#   acquired before performing any operation, either in shared or in exclusive
677
#   mode. acquiring the BGL in exclusive mode is discouraged and should be
678
#   avoided.
679
#   - at levels LEVEL_NODE and LEVEL_INSTANCE reside node and instance locks.
680
#   If you need more than one node, or more than one instance, acquire them at
681
#   the same time.
682
LEVEL_CLUSTER = 0
683
LEVEL_INSTANCE = 1
684
LEVEL_NODE = 2
685

    
686
LEVELS = [LEVEL_CLUSTER,
687
          LEVEL_INSTANCE,
688
          LEVEL_NODE]
689

    
690
# Lock levels which are modifiable
691
LEVELS_MOD = [LEVEL_NODE, LEVEL_INSTANCE]
692

    
693
LEVEL_NAMES = {
694
  LEVEL_CLUSTER: "cluster",
695
  LEVEL_INSTANCE: "instance",
696
  LEVEL_NODE: "node",
697
  }
698

    
699
# Constant for the big ganeti lock
700
BGL = 'BGL'
701

    
702

    
703
class GanetiLockManager:
704
  """The Ganeti Locking Library
705

706
  The purpose of this small library is to manage locking for ganeti clusters
707
  in a central place, while at the same time doing dynamic checks against
708
  possible deadlocks. It will also make it easier to transition to a different
709
  lock type should we migrate away from python threads.
710

711
  """
712
  _instance = None
713

    
714
  def __init__(self, nodes=None, instances=None):
715
    """Constructs a new GanetiLockManager object.
716

717
    There should be only a GanetiLockManager object at any time, so this
718
    function raises an error if this is not the case.
719

720
    @param nodes: list of node names
721
    @param instances: list of instance names
722

723
    """
724
    assert self.__class__._instance is None, \
725
           "double GanetiLockManager instance"
726

    
727
    self.__class__._instance = self
728

    
729
    # The keyring contains all the locks, at their level and in the correct
730
    # locking order.
731
    self.__keyring = {
732
      LEVEL_CLUSTER: LockSet([BGL]),
733
      LEVEL_NODE: LockSet(nodes),
734
      LEVEL_INSTANCE: LockSet(instances),
735
    }
736

    
737
  def _names(self, level):
738
    """List the lock names at the given level.
739

740
    This can be used for debugging/testing purposes.
741

742
    @param level: the level whose list of locks to get
743

744
    """
745
    assert level in LEVELS, "Invalid locking level %s" % level
746
    return self.__keyring[level]._names()
747

    
748
  def _is_owned(self, level):
749
    """Check whether we are owning locks at the given level
750

751
    """
752
    return self.__keyring[level]._is_owned()
753

    
754
  is_owned = _is_owned
755

    
756
  def _list_owned(self, level):
757
    """Get the set of owned locks at the given level
758

759
    """
760
    return self.__keyring[level]._list_owned()
761

    
762
  def _upper_owned(self, level):
763
    """Check that we don't own any lock at a level greater than the given one.
764

765
    """
766
    # This way of checking only works if LEVELS[i] = i, which we check for in
767
    # the test cases.
768
    return utils.any((self._is_owned(l) for l in LEVELS[level + 1:]))
769

    
770
  def _BGL_owned(self):
771
    """Check if the current thread owns the BGL.
772

773
    Both an exclusive or a shared acquisition work.
774

775
    """
776
    return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
777

    
778
  def _contains_BGL(self, level, names):
779
    """Check if the level contains the BGL.
780

781
    Check if acting on the given level and set of names will change
782
    the status of the Big Ganeti Lock.
783

784
    """
785
    return level == LEVEL_CLUSTER and (names is None or BGL in names)
786

    
787
  def acquire(self, level, names, blocking=1, shared=0):
788
    """Acquire a set of resource locks, at the same level.
789

790
    @param level: the level at which the locks shall be acquired;
791
        it must be a member of LEVELS.
792
    @param names: the names of the locks which shall be acquired
793
        (special lock names, or instance/node names)
794
    @param shared: whether to acquire in shared mode; by default
795
        an exclusive lock will be acquired
796
    @param blocking: whether to block while trying to acquire or to
797
        operate in try-lock mode (this locking mode is not supported yet)
798

799
    """
800
    assert level in LEVELS, "Invalid locking level %s" % level
801

    
802
    # Check that we are either acquiring the Big Ganeti Lock or we already own
803
    # it. Some "legacy" opcodes need to be sure they are run non-concurrently
804
    # so even if we've migrated we need to at least share the BGL to be
805
    # compatible with them. Of course if we own the BGL exclusively there's no
806
    # point in acquiring any other lock, unless perhaps we are half way through
807
    # the migration of the current opcode.
808
    assert (self._contains_BGL(level, names) or self._BGL_owned()), (
809
            "You must own the Big Ganeti Lock before acquiring any other")
810

    
811
    # Check we don't own locks at the same or upper levels.
812
    assert not self._upper_owned(level), ("Cannot acquire locks at a level"
813
           " while owning some at a greater one")
814

    
815
    # Acquire the locks in the set.
816
    return self.__keyring[level].acquire(names, shared=shared,
817
                                         blocking=blocking)
818

    
819
  def release(self, level, names=None):
820
    """Release a set of resource locks, at the same level.
821

822
    You must have acquired the locks, either in shared or in exclusive
823
    mode, before releasing them.
824

825
    @param level: the level at which the locks shall be released;
826
        it must be a member of LEVELS
827
    @param names: the names of the locks which shall be released
828
        (defaults to all the locks acquired at that level)
829

830
    """
831
    assert level in LEVELS, "Invalid locking level %s" % level
832
    assert (not self._contains_BGL(level, names) or
833
            not self._upper_owned(LEVEL_CLUSTER)), (
834
            "Cannot release the Big Ganeti Lock while holding something"
835
            " at upper levels")
836

    
837
    # Release will complain if we don't own the locks already
838
    return self.__keyring[level].release(names)
839

    
840
  def add(self, level, names, acquired=0, shared=0):
841
    """Add locks at the specified level.
842

843
    @param level: the level at which the locks shall be added;
844
        it must be a member of LEVELS_MOD.
845
    @param names: names of the locks to acquire
846
    @param acquired: whether to acquire the newly added locks
847
    @param shared: whether the acquisition will be shared
848

849
    """
850
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
851
    assert self._BGL_owned(), ("You must own the BGL before performing other"
852
           " operations")
853
    assert not self._upper_owned(level), ("Cannot add locks at a level"
854
           " while owning some at a greater one")
855
    return self.__keyring[level].add(names, acquired=acquired, shared=shared)
856

    
857
  def remove(self, level, names, blocking=1):
858
    """Remove locks from the specified level.
859

860
    You must either already own the locks you are trying to remove
861
    exclusively or not own any lock at an upper level.
862

863
    @param level: the level at which the locks shall be removed;
864
        it must be a member of LEVELS_MOD
865
    @param names: the names of the locks which shall be removed
866
        (special lock names, or instance/node names)
867
    @param blocking: whether to block while trying to operate in
868
        try-lock mode (this locking mode is not supported yet)
869

870
    """
871
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
872
    assert self._BGL_owned(), ("You must own the BGL before performing other"
873
           " operations")
874
    # Check we either own the level or don't own anything from here
875
    # up. LockSet.remove() will check the case in which we don't own
876
    # all the needed resources, or we have a shared ownership.
877
    assert self._is_owned(level) or not self._upper_owned(level), (
878
           "Cannot remove locks at a level while not owning it or"
879
           " owning some at a greater one")
880
    return self.__keyring[level].remove(names, blocking=blocking)