Statistics
| Branch: | Tag: | Revision:

root / lib / locking.py @ bf75f132

History | View | Annotate | Download (29.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Module implementing the Ganeti locking code."""
22

    
23
# pylint: disable-msg=W0613,W0201
24

    
25
import threading
26
# Wouldn't it be better to define LockingError in the locking module?
27
# Well, for now that's how the rest of the code does it...
28
from ganeti import errors
29
from ganeti import utils
30

    
31

    
32
def ssynchronized(lock, shared=0):
33
  """Shared Synchronization decorator.
34

35
  Calls the function holding the given lock, either in exclusive or shared
36
  mode. It requires the passed lock to be a SharedLock (or support its
37
  semantics).
38

39
  """
40
  def wrap(fn):
41
    def sync_function(*args, **kwargs):
42
      lock.acquire(shared=shared)
43
      try:
44
        return fn(*args, **kwargs)
45
      finally:
46
        lock.release()
47
    return sync_function
48
  return wrap
49

    
50

    
51
class SharedLock:
52
  """Implements a shared lock.
53

54
  Multiple threads can acquire the lock in a shared way, calling
55
  acquire_shared().  In order to acquire the lock in an exclusive way threads
56
  can call acquire_exclusive().
57

58
  The lock prevents starvation but does not guarantee that threads will acquire
59
  the shared lock in the order they queued for it, just that they will
60
  eventually do so.
61

62
  """
63
  def __init__(self):
64
    """Construct a new SharedLock"""
65
    # we have two conditions, c_shr and c_exc, sharing the same lock.
66
    self.__lock = threading.Lock()
67
    self.__turn_shr = threading.Condition(self.__lock)
68
    self.__turn_exc = threading.Condition(self.__lock)
69

    
70
    # current lock holders
71
    self.__shr = set()
72
    self.__exc = None
73

    
74
    # lock waiters
75
    self.__nwait_exc = 0
76
    self.__nwait_shr = 0
77
    self.__npass_shr = 0
78

    
79
    # is this lock in the deleted state?
80
    self.__deleted = False
81

    
82
  def __is_sharer(self):
83
    """Is the current thread sharing the lock at this time?"""
84
    return threading.currentThread() in self.__shr
85

    
86
  def __is_exclusive(self):
87
    """Is the current thread holding the lock exclusively at this time?"""
88
    return threading.currentThread() == self.__exc
89

    
90
  def __is_owned(self, shared=-1):
91
    """Is the current thread somehow owning the lock at this time?
92

93
    This is a private version of the function, which presumes you're holding
94
    the internal lock.
95

96
    """
97
    if shared < 0:
98
      return self.__is_sharer() or self.__is_exclusive()
99
    elif shared:
100
      return self.__is_sharer()
101
    else:
102
      return self.__is_exclusive()
103

    
104
  def _is_owned(self, shared=-1):
105
    """Is the current thread somehow owning the lock at this time?
106

107
    @param shared:
108
        - < 0: check for any type of ownership (default)
109
        - 0: check for exclusive ownership
110
        - > 0: check for shared ownership
111

112
    """
113
    self.__lock.acquire()
114
    try:
115
      result = self.__is_owned(shared=shared)
116
    finally:
117
      self.__lock.release()
118

    
119
    return result
120

    
121
  def __wait(self, c):
122
    """Wait on the given condition, and raise an exception if the current lock
123
    is declared deleted in the meantime.
124

125
    @param c: the condition to wait on
126

127
    """
128
    c.wait()
129
    if self.__deleted:
130
      raise errors.LockError('deleted lock')
131

    
132
  def __exclusive_acquire(self):
133
    """Acquire the lock exclusively.
134

135
    This is a private function that presumes you are already holding the
136
    internal lock. It's defined separately to avoid code duplication between
137
    acquire() and delete()
138

139
    """
140
    self.__nwait_exc += 1
141
    try:
142
      # This is to save ourselves from a nasty race condition that could
143
      # theoretically make the sharers starve.
144
      if self.__nwait_shr > 0 or self.__nwait_exc > 1:
145
        self.__wait(self.__turn_exc)
146

    
147
      while len(self.__shr) > 0 or self.__exc is not None:
148
        self.__wait(self.__turn_exc)
149

    
150
      self.__exc = threading.currentThread()
151
    finally:
152
      self.__nwait_exc -= 1
153

    
154
    assert self.__npass_shr == 0, "SharedLock: internal fairness violation"
155

    
156
  def acquire(self, blocking=1, shared=0):
157
    """Acquire a shared lock.
158

159
    @param shared: whether to acquire in shared mode; by default an
160
        exclusive lock will be acquired
161
    @param blocking: whether to block while trying to acquire or to
162
        operate in try-lock mode (this locking mode is not supported yet)
163

164
    """
165
    if not blocking:
166
      # We don't have non-blocking mode for now
167
      raise NotImplementedError
168

    
169
    self.__lock.acquire()
170
    try:
171
      if self.__deleted:
172
        raise errors.LockError('deleted lock')
173

    
174
      # We cannot acquire the lock if we already have it
175
      assert not self.__is_owned(), "double acquire() on a non-recursive lock"
176
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
177

    
178
      if shared:
179
        self.__nwait_shr += 1
180
        try:
181
          wait = False
182
          # If there is an exclusive holder waiting we have to wait.  We'll
183
          # only do this once, though, when we start waiting for the lock. Then
184
          # we'll just wait while there are no exclusive holders.
185
          if self.__nwait_exc > 0:
186
            # TODO: if !blocking...
187
            wait = True
188
            self.__wait(self.__turn_shr)
189

    
190
          while self.__exc is not None:
191
            wait = True
192
            # TODO: if !blocking...
193
            self.__wait(self.__turn_shr)
194

    
195
          self.__shr.add(threading.currentThread())
196

    
197
          # If we were waiting note that we passed
198
          if wait:
199
            self.__npass_shr -= 1
200

    
201
        finally:
202
          self.__nwait_shr -= 1
203

    
204
        assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
205
      else:
206
        # TODO: if !blocking...
207
        # (or modify __exclusive_acquire for non-blocking mode)
208
        self.__exclusive_acquire()
209

    
210
    finally:
211
      self.__lock.release()
212

    
213
    return True
214

    
215
  def release(self):
216
    """Release a Shared Lock.
217

218
    You must have acquired the lock, either in shared or in exclusive mode,
219
    before calling this function.
220

221
    """
222
    self.__lock.acquire()
223
    try:
224
      assert self.__npass_shr >= 0, "Internal fairness condition weirdness"
225
      # Autodetect release type
226
      if self.__is_exclusive():
227
        self.__exc = None
228

    
229
        # An exclusive holder has just had the lock, time to put it in shared
230
        # mode if there are shared holders waiting. Otherwise wake up the next
231
        # exclusive holder.
232
        if self.__nwait_shr > 0:
233
          # Make sure at least the ones which were blocked pass.
234
          self.__npass_shr = self.__nwait_shr
235
          self.__turn_shr.notifyAll()
236
        elif self.__nwait_exc > 0:
237
          self.__turn_exc.notify()
238

    
239
      elif self.__is_sharer():
240
        self.__shr.remove(threading.currentThread())
241

    
242
        # If there are shared holders waiting (and not just scheduled to pass)
243
        # there *must* be an exclusive holder waiting as well; otherwise what
244
        # were they waiting for?
245
        assert (self.__nwait_exc > 0 or self.__npass_shr == self.__nwait_shr), \
246
               "Lock sharers waiting while no exclusive is queueing"
247

    
248
        # If there are no more shared holders either in or scheduled to pass,
249
        # and some exclusive holders are waiting let's wake one up.
250
        if (len(self.__shr) == 0 and
251
            self.__nwait_exc > 0 and
252
            not self.__npass_shr > 0):
253
          self.__turn_exc.notify()
254

    
255
      else:
256
        assert False, "Cannot release non-owned lock"
257

    
258
    finally:
259
      self.__lock.release()
260

    
261
  def delete(self, blocking=1):
262
    """Delete a Shared Lock.
263

264
    This operation will declare the lock for removal. First the lock will be
265
    acquired in exclusive mode if you don't already own it, then the lock
266
    will be put in a state where any future and pending acquire() fail.
267

268
    @param blocking: whether to block while trying to acquire or to
269
        operate in try-lock mode.  this locking mode is not supported
270
        yet unless you are already holding exclusively the lock.
271

272
    """
273
    self.__lock.acquire()
274
    try:
275
      assert not self.__is_sharer(), "cannot delete() a lock while sharing it"
276

    
277
      if self.__deleted:
278
        raise errors.LockError('deleted lock')
279

    
280
      if not self.__is_exclusive():
281
        if not blocking:
282
          # We don't have non-blocking mode for now
283
          raise NotImplementedError
284
        self.__exclusive_acquire()
285

    
286
      self.__deleted = True
287
      self.__exc = None
288
      # Wake up everybody, they will fail acquiring the lock and
289
      # raise an exception instead.
290
      self.__turn_exc.notifyAll()
291
      self.__turn_shr.notifyAll()
292

    
293
    finally:
294
      self.__lock.release()
295

    
296

    
297
# Whenever we want to acquire a full LockSet we pass None as the value to acquire.
298
# Hide this behing this nicely named constant.
299
ALL_SET = None
300

    
301

    
302
class LockSet:
303
  """Implements a set of locks.
304

305
  This abstraction implements a set of shared locks for the same resource type,
306
  distinguished by name. The user can lock a subset of the resources and the
307
  LockSet will take care of acquiring the locks always in the same order, thus
308
  preventing deadlock.
309

310
  All the locks needed in the same set must be acquired together, though.
311

312
  """
313
  def __init__(self, members=None):
314
    """Constructs a new LockSet.
315

316
    @param members: initial members of the set
317

318
    """
319
    # Used internally to guarantee coherency.
320
    self.__lock = SharedLock()
321

    
322
    # The lockdict indexes the relationship name -> lock
323
    # The order-of-locking is implied by the alphabetical order of names
324
    self.__lockdict = {}
325

    
326
    if members is not None:
327
      for name in members:
328
        self.__lockdict[name] = SharedLock()
329

    
330
    # The owner dict contains the set of locks each thread owns. For
331
    # performance each thread can access its own key without a global lock on
332
    # this structure. It is paramount though that *no* other type of access is
333
    # done to this structure (eg. no looping over its keys). *_owner helper
334
    # function are defined to guarantee access is correct, but in general never
335
    # do anything different than __owners[threading.currentThread()], or there
336
    # will be trouble.
337
    self.__owners = {}
338

    
339
  def _is_owned(self):
340
    """Is the current thread a current level owner?"""
341
    return threading.currentThread() in self.__owners
342

    
343
  def _add_owned(self, name=None):
344
    """Note the current thread owns the given lock"""
345
    if name is None:
346
      if not self._is_owned():
347
        self.__owners[threading.currentThread()] = set()
348
    else:
349
      if self._is_owned():
350
        self.__owners[threading.currentThread()].add(name)
351
      else:
352
        self.__owners[threading.currentThread()] = set([name])
353

    
354

    
355
  def _del_owned(self, name=None):
356
    """Note the current thread owns the given lock"""
357

    
358
    if name is not None:
359
      self.__owners[threading.currentThread()].remove(name)
360

    
361
    # Only remove the key if we don't hold the set-lock as well
362
    if (not self.__lock._is_owned() and
363
        not self.__owners[threading.currentThread()]):
364
      del self.__owners[threading.currentThread()]
365

    
366
  def _list_owned(self):
367
    """Get the set of resource names owned by the current thread"""
368
    if self._is_owned():
369
      return self.__owners[threading.currentThread()].copy()
370
    else:
371
      return set()
372

    
373
  def __names(self):
374
    """Return the current set of names.
375

376
    Only call this function while holding __lock and don't iterate on the
377
    result after releasing the lock.
378

379
    """
380
    return self.__lockdict.keys()
381

    
382
  def _names(self):
383
    """Return a copy of the current set of elements.
384

385
    Used only for debugging purposes.
386

387
    """
388
    # If we don't already own the set-level lock acquired
389
    # we'll get it and note we need to release it later.
390
    release_lock = False
391
    if not self.__lock._is_owned():
392
      release_lock = True
393
      self.__lock.acquire(shared=1)
394
    try:
395
      result = self.__names()
396
    finally:
397
      if release_lock:
398
        self.__lock.release()
399
    return set(result)
400

    
401
  def acquire(self, names, blocking=1, shared=0):
402
    """Acquire a set of resource locks.
403

404
    @param names: the names of the locks which shall be acquired
405
        (special lock names, or instance/node names)
406
    @param shared: whether to acquire in shared mode; by default an
407
        exclusive lock will be acquired
408
    @param blocking: whether to block while trying to acquire or to
409
        operate in try-lock mode (this locking mode is not supported yet)
410

411
    @return: True when all the locks are successfully acquired
412

413
    @raise errors.LockError: when any lock we try to acquire has
414
        been deleted before we succeed. In this case none of the
415
        locks requested will be acquired.
416

417
    """
418
    if not blocking:
419
      # We don't have non-blocking mode for now
420
      raise NotImplementedError
421

    
422
    # Check we don't already own locks at this level
423
    assert not self._is_owned(), "Cannot acquire locks in the same set twice"
424

    
425
    if names is None:
426
      # If no names are given acquire the whole set by not letting new names
427
      # being added before we release, and getting the current list of names.
428
      # Some of them may then be deleted later, but we'll cope with this.
429
      #
430
      # We'd like to acquire this lock in a shared way, as it's nice if
431
      # everybody else can use the instances at the same time. If are acquiring
432
      # them exclusively though they won't be able to do this anyway, though,
433
      # so we'll get the list lock exclusively as well in order to be able to
434
      # do add() on the set while owning it.
435
      self.__lock.acquire(shared=shared)
436
      try:
437
        # note we own the set-lock
438
        self._add_owned()
439
        names = self.__names()
440
      except:
441
        # We shouldn't have problems adding the lock to the owners list, but
442
        # if we did we'll try to release this lock and re-raise exception.
443
        # Of course something is going to be really wrong, after this.
444
        self.__lock.release()
445
        raise
446

    
447
    try:
448
      # Support passing in a single resource to acquire rather than many
449
      if isinstance(names, basestring):
450
        names = [names]
451
      else:
452
        names.sort()
453

    
454
      acquire_list = []
455
      # First we look the locks up on __lockdict. We have no way of being sure
456
      # they will still be there after, but this makes it a lot faster should
457
      # just one of them be the already wrong
458
      for lname in utils.UniqueSequence(names):
459
        try:
460
          lock = self.__lockdict[lname] # raises KeyError if lock is not there
461
          acquire_list.append((lname, lock))
462
        except (KeyError):
463
          if self.__lock._is_owned():
464
            # We are acquiring all the set, it doesn't matter if this particular
465
            # element is not there anymore.
466
            continue
467
          else:
468
            raise errors.LockError('non-existing lock in set (%s)' % lname)
469

    
470
      # This will hold the locknames we effectively acquired.
471
      acquired = set()
472
      # Now acquire_list contains a sorted list of resources and locks we want.
473
      # In order to get them we loop on this (private) list and acquire() them.
474
      # We gave no real guarantee they will still exist till this is done but
475
      # .acquire() itself is safe and will alert us if the lock gets deleted.
476
      for (lname, lock) in acquire_list:
477
        try:
478
          lock.acquire(shared=shared) # raises LockError if the lock is deleted
479
          # now the lock cannot be deleted, we have it!
480
          self._add_owned(name=lname)
481
          acquired.add(lname)
482
        except (errors.LockError):
483
          if self.__lock._is_owned():
484
            # We are acquiring all the set, it doesn't matter if this particular
485
            # element is not there anymore.
486
            continue
487
          else:
488
            name_fail = lname
489
            for lname in self._list_owned():
490
              self.__lockdict[lname].release()
491
              self._del_owned(name=lname)
492
            raise errors.LockError('non-existing lock in set (%s)' % name_fail)
493
        except:
494
          # We shouldn't have problems adding the lock to the owners list, but
495
          # if we did we'll try to release this lock and re-raise exception.
496
          # Of course something is going to be really wrong, after this.
497
          if lock._is_owned():
498
            lock.release()
499
            raise
500

    
501
    except:
502
      # If something went wrong and we had the set-lock let's release it...
503
      if self.__lock._is_owned():
504
        self.__lock.release()
505
      raise
506

    
507
    return acquired
508

    
509
  def release(self, names=None):
510
    """Release a set of resource locks, at the same level.
511

512
    You must have acquired the locks, either in shared or in exclusive mode,
513
    before releasing them.
514

515
    @param names: the names of the locks which shall be released
516
        (defaults to all the locks acquired at that level).
517

518
    """
519
    assert self._is_owned(), "release() on lock set while not owner"
520

    
521
    # Support passing in a single resource to release rather than many
522
    if isinstance(names, basestring):
523
      names = [names]
524

    
525
    if names is None:
526
      names = self._list_owned()
527
    else:
528
      names = set(names)
529
      assert self._list_owned().issuperset(names), (
530
               "release() on unheld resources %s" %
531
               names.difference(self._list_owned()))
532

    
533
    # First of all let's release the "all elements" lock, if set.
534
    # After this 'add' can work again
535
    if self.__lock._is_owned():
536
      self.__lock.release()
537
      self._del_owned()
538

    
539
    for lockname in names:
540
      # If we are sure the lock doesn't leave __lockdict without being
541
      # exclusively held we can do this...
542
      self.__lockdict[lockname].release()
543
      self._del_owned(name=lockname)
544

    
545
  def add(self, names, acquired=0, shared=0):
546
    """Add a new set of elements to the set
547

548
    @param names: names of the new elements to add
549
    @param acquired: pre-acquire the new resource?
550
    @param shared: is the pre-acquisition shared?
551

552
    """
553
    # Check we don't already own locks at this level
554
    assert not self._is_owned() or self.__lock._is_owned(shared=0), \
555
      "Cannot add locks if the set is only partially owned, or shared"
556

    
557
    # Support passing in a single resource to add rather than many
558
    if isinstance(names, basestring):
559
      names = [names]
560

    
561
    # If we don't already own the set-level lock acquired in an exclusive way
562
    # we'll get it and note we need to release it later.
563
    release_lock = False
564
    if not self.__lock._is_owned():
565
      release_lock = True
566
      self.__lock.acquire()
567

    
568
    try:
569
      invalid_names = set(self.__names()).intersection(names)
570
      if invalid_names:
571
        # This must be an explicit raise, not an assert, because assert is
572
        # turned off when using optimization, and this can happen because of
573
        # concurrency even if the user doesn't want it.
574
        raise errors.LockError("duplicate add() (%s)" % invalid_names)
575

    
576
      for lockname in names:
577
        lock = SharedLock()
578

    
579
        if acquired:
580
          lock.acquire(shared=shared)
581
          # now the lock cannot be deleted, we have it!
582
          try:
583
            self._add_owned(name=lockname)
584
          except:
585
            # We shouldn't have problems adding the lock to the owners list,
586
            # but if we did we'll try to release this lock and re-raise
587
            # exception.  Of course something is going to be really wrong,
588
            # after this.  On the other hand the lock hasn't been added to the
589
            # __lockdict yet so no other threads should be pending on it. This
590
            # release is just a safety measure.
591
            lock.release()
592
            raise
593

    
594
        self.__lockdict[lockname] = lock
595

    
596
    finally:
597
      # Only release __lock if we were not holding it previously.
598
      if release_lock:
599
        self.__lock.release()
600

    
601
    return True
602

    
603
  def remove(self, names, blocking=1):
604
    """Remove elements from the lock set.
605

606
    You can either not hold anything in the lockset or already hold a superset
607
    of the elements you want to delete, exclusively.
608

609
    @param names: names of the resource to remove.
610
    @param blocking: whether to block while trying to acquire or to
611
        operate in try-lock mode (this locking mode is not supported
612
        yet unless you are already holding exclusively the locks)
613

614
    @return:: a list of locks which we removed; the list is always
615
        equal to the names list if we were holding all the locks
616
        exclusively
617

618
    """
619
    if not blocking and not self._is_owned():
620
      # We don't have non-blocking mode for now
621
      raise NotImplementedError
622

    
623
    # Support passing in a single resource to remove rather than many
624
    if isinstance(names, basestring):
625
      names = [names]
626

    
627
    # If we own any subset of this lock it must be a superset of what we want
628
    # to delete. The ownership must also be exclusive, but that will be checked
629
    # by the lock itself.
630
    assert not self._is_owned() or self._list_owned().issuperset(names), (
631
      "remove() on acquired lockset while not owning all elements")
632

    
633
    removed = []
634

    
635
    for lname in names:
636
      # Calling delete() acquires the lock exclusively if we don't already own
637
      # it, and causes all pending and subsequent lock acquires to fail. It's
638
      # fine to call it out of order because delete() also implies release(),
639
      # and the assertion above guarantees that if we either already hold
640
      # everything we want to delete, or we hold none.
641
      try:
642
        self.__lockdict[lname].delete()
643
        removed.append(lname)
644
      except (KeyError, errors.LockError):
645
        # This cannot happen if we were already holding it, verify:
646
        assert not self._is_owned(), "remove failed while holding lockset"
647
      else:
648
        # If no LockError was raised we are the ones who deleted the lock.
649
        # This means we can safely remove it from lockdict, as any further or
650
        # pending delete() or acquire() will fail (and nobody can have the lock
651
        # since before our call to delete()).
652
        #
653
        # This is done in an else clause because if the exception was thrown
654
        # it's the job of the one who actually deleted it.
655
        del self.__lockdict[lname]
656
        # And let's remove it from our private list if we owned it.
657
        if self._is_owned():
658
          self._del_owned(name=lname)
659

    
660
    return removed
661

    
662

    
663
# Locking levels, must be acquired in increasing order.
664
# Current rules are:
665
#   - at level LEVEL_CLUSTER resides the Big Ganeti Lock (BGL) which must be
666
#   acquired before performing any operation, either in shared or in exclusive
667
#   mode. acquiring the BGL in exclusive mode is discouraged and should be
668
#   avoided.
669
#   - at levels LEVEL_NODE and LEVEL_INSTANCE reside node and instance locks.
670
#   If you need more than one node, or more than one instance, acquire them at
671
#   the same time.
672
LEVEL_CLUSTER = 0
673
LEVEL_INSTANCE = 1
674
LEVEL_NODE = 2
675

    
676
LEVELS = [LEVEL_CLUSTER,
677
          LEVEL_INSTANCE,
678
          LEVEL_NODE]
679

    
680
# Lock levels which are modifiable
681
LEVELS_MOD = [LEVEL_NODE, LEVEL_INSTANCE]
682

    
683
# Constant for the big ganeti lock
684
BGL = 'BGL'
685

    
686

    
687
class GanetiLockManager:
688
  """The Ganeti Locking Library
689

690
  The purpouse of this small library is to manage locking for ganeti clusters
691
  in a central place, while at the same time doing dynamic checks against
692
  possible deadlocks. It will also make it easier to transition to a different
693
  lock type should we migrate away from python threads.
694

695
  """
696
  _instance = None
697

    
698
  def __init__(self, nodes=None, instances=None):
699
    """Constructs a new GanetiLockManager object.
700

701
    There should be only a GanetiLockManager object at any time, so this
702
    function raises an error if this is not the case.
703

704
    @param nodes: list of node names
705
    @param instances: list of instance names
706

707
    """
708
    assert self.__class__._instance is None, \
709
           "double GanetiLockManager instance"
710

    
711
    self.__class__._instance = self
712

    
713
    # The keyring contains all the locks, at their level and in the correct
714
    # locking order.
715
    self.__keyring = {
716
      LEVEL_CLUSTER: LockSet([BGL]),
717
      LEVEL_NODE: LockSet(nodes),
718
      LEVEL_INSTANCE: LockSet(instances),
719
    }
720

    
721
  def _names(self, level):
722
    """List the lock names at the given level.
723

724
    This can be used for debugging/testing purposes.
725

726
    @param level: the level whose list of locks to get
727

728
    """
729
    assert level in LEVELS, "Invalid locking level %s" % level
730
    return self.__keyring[level]._names()
731

    
732
  def _is_owned(self, level):
733
    """Check whether we are owning locks at the given level
734

735
    """
736
    return self.__keyring[level]._is_owned()
737

    
738
  is_owned = _is_owned
739

    
740
  def _list_owned(self, level):
741
    """Get the set of owned locks at the given level
742

743
    """
744
    return self.__keyring[level]._list_owned()
745

    
746
  def _upper_owned(self, level):
747
    """Check that we don't own any lock at a level greater than the given one.
748

749
    """
750
    # This way of checking only works if LEVELS[i] = i, which we check for in
751
    # the test cases.
752
    return utils.any((self._is_owned(l) for l in LEVELS[level + 1:]))
753

    
754
  def _BGL_owned(self):
755
    """Check if the current thread owns the BGL.
756

757
    Both an exclusive or a shared acquisition work.
758

759
    """
760
    return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
761

    
762
  def _contains_BGL(self, level, names):
763
    """Check if the level contains the BGL.
764

765
    Check if acting on the given level and set of names will change
766
    the status of the Big Ganeti Lock.
767

768
    """
769
    return level == LEVEL_CLUSTER and (names is None or BGL in names)
770

    
771
  def acquire(self, level, names, blocking=1, shared=0):
772
    """Acquire a set of resource locks, at the same level.
773

774
    @param level: the level at which the locks shall be acquired;
775
        it must be a memmber of LEVELS.
776
    @param names: the names of the locks which shall be acquired
777
        (special lock names, or instance/node names)
778
    @param shared: whether to acquire in shared mode; by default
779
        an exclusive lock will be acquired
780
    @param blocking: whether to block while trying to acquire or to
781
        operate in try-lock mode (this locking mode is not supported yet)
782

783
    """
784
    assert level in LEVELS, "Invalid locking level %s" % level
785

    
786
    # Check that we are either acquiring the Big Ganeti Lock or we already own
787
    # it. Some "legacy" opcodes need to be sure they are run non-concurrently
788
    # so even if we've migrated we need to at least share the BGL to be
789
    # compatible with them. Of course if we own the BGL exclusively there's no
790
    # point in acquiring any other lock, unless perhaps we are half way through
791
    # the migration of the current opcode.
792
    assert (self._contains_BGL(level, names) or self._BGL_owned()), (
793
            "You must own the Big Ganeti Lock before acquiring any other")
794

    
795
    # Check we don't own locks at the same or upper levels.
796
    assert not self._upper_owned(level), ("Cannot acquire locks at a level"
797
           " while owning some at a greater one")
798

    
799
    # Acquire the locks in the set.
800
    return self.__keyring[level].acquire(names, shared=shared,
801
                                         blocking=blocking)
802

    
803
  def release(self, level, names=None):
804
    """Release a set of resource locks, at the same level.
805

806
    You must have acquired the locks, either in shared or in exclusive
807
    mode, before releasing them.
808

809
    @param level: the level at which the locks shall be released;
810
        it must be a memmber of LEVELS
811
    @param names: the names of the locks which shall be released
812
        (defaults to all the locks acquired at that level)
813

814
    """
815
    assert level in LEVELS, "Invalid locking level %s" % level
816
    assert (not self._contains_BGL(level, names) or
817
            not self._upper_owned(LEVEL_CLUSTER)), (
818
            "Cannot release the Big Ganeti Lock while holding something"
819
            " at upper levels")
820

    
821
    # Release will complain if we don't own the locks already
822
    return self.__keyring[level].release(names)
823

    
824
  def add(self, level, names, acquired=0, shared=0):
825
    """Add locks at the specified level.
826

827
    @param level: the level at which the locks shall be added;
828
        it must be a memmber of LEVELS_MOD.
829
    @param names: names of the locks to acquire
830
    @param acquired: whether to acquire the newly added locks
831
    @param shared: whether the acquisition will be shared
832

833
    """
834
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
835
    assert self._BGL_owned(), ("You must own the BGL before performing other"
836
           " operations")
837
    assert not self._upper_owned(level), ("Cannot add locks at a level"
838
           " while owning some at a greater one")
839
    return self.__keyring[level].add(names, acquired=acquired, shared=shared)
840

    
841
  def remove(self, level, names, blocking=1):
842
    """Remove locks from the specified level.
843

844
    You must either already own the locks you are trying to remove
845
    exclusively or not own any lock at an upper level.
846

847
    @param level: the level at which the locks shall be removed;
848
        it must be a member of LEVELS_MOD
849
    @param names: the names of the locks which shall be removed
850
        (special lock names, or instance/node names)
851
    @param blocking: whether to block while trying to operate in
852
        try-lock mode (this locking mode is not supported yet)
853

854
    """
855
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
856
    assert self._BGL_owned(), ("You must own the BGL before performing other"
857
           " operations")
858
    # Check we either own the level or don't own anything from here up.
859
    # LockSet.remove() will check the case in which we don't own all the needed
860
    # resources, or we have a shared ownership.
861
    assert self._is_owned(level) or not self._upper_owned(level), (
862
           "Cannot remove locks at a level while not owning it or"
863
           " owning some at a greater one")
864
    return self.__keyring[level].remove(names, blocking=blocking)