Revision 3977a4c1
b/lib/cmdlib.py | ||
---|---|---|
83 | 83 |
self.sstore = sstore |
84 | 84 |
self.context = context |
85 | 85 |
self.needed_locks = None |
86 |
self.share_locks = dict(((i, 0) for i in locking.LEVELS)) |
|
86 | 87 |
self.__ssh = None |
87 | 88 |
|
88 | 89 |
for attr_name in self._OP_REQP: |
... | ... | |
128 | 129 |
(this reflects what LockSet does, and will be replaced before |
129 | 130 |
CheckPrereq with the full list of nodes that have been locked) |
130 | 131 |
|
132 |
If you need to share locks (rather than acquire them exclusively) at one |
|
133 |
level you can modify self.share_locks, setting a true value (usually 1) for |
|
134 |
that level. By default locks are not shared. |
|
135 |
|
|
131 | 136 |
Examples: |
132 | 137 |
# Acquire all nodes and one instance |
133 | 138 |
self.needed_locks = { |
b/lib/mcpu.py | ||
---|---|---|
134 | 134 |
# This gives a chance to LUs to make last-minute changes after acquiring |
135 | 135 |
# locks at any preceding level. |
136 | 136 |
lu.DeclareLocks(level) |
137 |
needed_locks = lu.needed_locks[level] |
|
138 |
share = lu.share_locks[level] |
|
137 | 139 |
# This is always safe to do, as we can't acquire more/less locks than |
138 | 140 |
# what was requested. |
139 | 141 |
lu.needed_locks[level] = self.context.glm.acquire(level, |
140 |
lu.needed_locks[level]) |
|
142 |
needed_locks, |
|
143 |
shared=share) |
|
141 | 144 |
try: |
142 | 145 |
result = self._LockAndExecLU(lu, level + 1) |
143 | 146 |
finally: |
Also available in: Unified diff