126 |
126 |
- Use an empty dict if you don't need any lock
|
127 |
127 |
- If you don't need any lock at a particular level omit that level
|
128 |
128 |
- Don't put anything for the BGL level
|
129 |
|
- If you want all locks at a level use None as a value
|
130 |
|
(this reflects what LockSet does, and will be replaced before
|
131 |
|
CheckPrereq with the full list of nodes that have been locked)
|
|
129 |
- If you want all locks at a level use locking.ALL_SET as a value
|
132 |
130 |
|
133 |
131 |
If you need to share locks (rather than acquire them exclusively) at one
|
134 |
132 |
level you can modify self.share_locks, setting a true value (usually 1) for
|
... | ... | |
137 |
135 |
Examples:
|
138 |
136 |
# Acquire all nodes and one instance
|
139 |
137 |
self.needed_locks = {
|
140 |
|
locking.LEVEL_NODE: None,
|
|
138 |
locking.LEVEL_NODE: locking.ALL_SET,
|
141 |
139 |
locking.LEVEL_INSTANCES: ['instance1.example.tld'],
|
142 |
140 |
}
|
143 |
141 |
# Acquire just two nodes
|
... | ... | |
1232 |
1230 |
# Lock all nodes, in shared mode
|
1233 |
1231 |
self.needed_locks = {}
|
1234 |
1232 |
self.share_locks[locking.LEVEL_NODE] = 1
|
1235 |
|
self.needed_locks[locking.LEVEL_NODE] = None
|
|
1233 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1236 |
1234 |
|
1237 |
1235 |
def CheckPrereq(self):
|
1238 |
1236 |
"""Check prerequisites.
|
... | ... | |
1395 |
1393 |
# that we need atomic ways to get info for a group of nodes from the
|
1396 |
1394 |
# config, though.
|
1397 |
1395 |
if not self.op.names:
|
1398 |
|
self.needed_locks[locking.LEVEL_NODE] = None
|
|
1396 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1399 |
1397 |
else:
|
1400 |
1398 |
self.needed_locks[locking.LEVEL_NODE] = \
|
1401 |
1399 |
_GetWantedNodes(self, self.op.names)
|
... | ... | |
1499 |
1497 |
self.needed_locks = {}
|
1500 |
1498 |
self.share_locks[locking.LEVEL_NODE] = 1
|
1501 |
1499 |
if not self.op.nodes:
|
1502 |
|
self.needed_locks[locking.LEVEL_NODE] = None
|
|
1500 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
1503 |
1501 |
else:
|
1504 |
1502 |
self.needed_locks[locking.LEVEL_NODE] = \
|
1505 |
1503 |
_GetWantedNodes(self, self.op.nodes)
|
... | ... | |
2504 |
2502 |
# dynamic fields. For that we need atomic ways to get info for a group of
|
2505 |
2503 |
# instances from the config, though.
|
2506 |
2504 |
if not self.op.names:
|
2507 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = None # Acquire all
|
|
2505 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
2508 |
2506 |
else:
|
2509 |
2507 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
2510 |
2508 |
_GetWantedInstances(self, self.op.names)
|
... | ... | |
4479 |
4477 |
self.needed_locks = {}
|
4480 |
4478 |
self.share_locks[locking.LEVEL_NODE] = 1
|
4481 |
4479 |
if not self.op.nodes:
|
4482 |
|
self.needed_locks[locking.LEVEL_NODE] = None
|
|
4480 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
4483 |
4481 |
else:
|
4484 |
4482 |
self.needed_locks[locking.LEVEL_NODE] = \
|
4485 |
4483 |
_GetWantedNodes(self, self.op.nodes)
|