Revision 42828156
b/lib/cmdlib.py | ||
---|---|---|
9479 | 9479 |
self._ExpandAndLockInstance() |
9480 | 9480 |
|
9481 | 9481 |
assert locking.LEVEL_NODE not in self.needed_locks |
9482 |
assert locking.LEVEL_NODE_RES not in self.needed_locks |
|
9482 | 9483 |
assert locking.LEVEL_NODEGROUP not in self.needed_locks |
9483 | 9484 |
|
9484 | 9485 |
assert self.op.iallocator is None or self.op.remote_node is None, \ |
... | ... | |
9501 | 9502 |
# iallocator will select a new node in the same group |
9502 | 9503 |
self.needed_locks[locking.LEVEL_NODEGROUP] = [] |
9503 | 9504 |
|
9505 |
self.needed_locks[locking.LEVEL_NODE_RES] = [] |
|
9506 |
|
|
9504 | 9507 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
9505 | 9508 |
self.op.iallocator, self.op.remote_node, |
9506 | 9509 |
self.op.disks, False, self.op.early_release) |
... | ... | |
9514 | 9517 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
9515 | 9518 |
|
9516 | 9519 |
self.share_locks[locking.LEVEL_NODEGROUP] = 1 |
9520 |
# Lock all groups used by instance optimistically; this requires going |
|
9521 |
# via the node before it's locked, requiring verification later on |
|
9517 | 9522 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
9518 | 9523 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
9519 | 9524 |
|
... | ... | |
9528 | 9533 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
9529 | 9534 |
else: |
9530 | 9535 |
self._LockInstancesNodes() |
9536 |
elif level == locking.LEVEL_NODE_RES: |
|
9537 |
# Reuse node locks |
|
9538 |
self.needed_locks[locking.LEVEL_NODE_RES] = \ |
|
9539 |
self.needed_locks[locking.LEVEL_NODE] |
|
9531 | 9540 |
|
9532 | 9541 |
def BuildHooksEnv(self): |
9533 | 9542 |
"""Build hooks env. |
... | ... | |
9564 | 9573 |
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or |
9565 | 9574 |
self.op.iallocator is None) |
9566 | 9575 |
|
9576 |
# Verify if node group locks are still correct |
|
9567 | 9577 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP) |
9568 | 9578 |
if owned_groups: |
9569 | 9579 |
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) |
... | ... | |
9822 | 9832 |
self.target_node] |
9823 | 9833 |
if node_name is not None) |
9824 | 9834 |
|
9825 |
# Release unneeded node locks |
|
9835 |
# Release unneeded node and node resource locks
|
|
9826 | 9836 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes) |
9837 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes) |
|
9827 | 9838 |
|
9828 | 9839 |
# Release any owned node group |
9829 | 9840 |
if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP): |
... | ... | |
9852 | 9863 |
assert set(owned_nodes) == set(self.node_secondary_ip), \ |
9853 | 9864 |
("Incorrect node locks, owning %s, expected %s" % |
9854 | 9865 |
(owned_nodes, self.node_secondary_ip.keys())) |
9866 |
assert (self.lu.owned_locks(locking.LEVEL_NODE) == |
|
9867 |
self.lu.owned_locks(locking.LEVEL_NODE_RES)) |
|
9855 | 9868 |
|
9856 | 9869 |
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE) |
9857 | 9870 |
assert list(owned_instances) == [self.instance_name], \ |
... | ... | |
9887 | 9900 |
if activate_disks: |
9888 | 9901 |
_SafeShutdownInstanceDisks(self.lu, self.instance) |
9889 | 9902 |
|
9903 |
assert not self.lu.owned_locks(locking.LEVEL_NODE) |
|
9904 |
|
|
9890 | 9905 |
if __debug__: |
9891 | 9906 |
# Verify owned locks |
9892 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE) |
|
9907 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
|
|
9893 | 9908 |
nodes = frozenset(self.node_secondary_ip) |
9894 | 9909 |
assert ((self.early_release and not owned_nodes) or |
9895 | 9910 |
(not self.early_release and not (set(owned_nodes) - nodes))), \ |
... | ... | |
10129 | 10144 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
10130 | 10145 |
cstep += 1 |
10131 | 10146 |
self._RemoveOldStorage(self.target_node, iv_names) |
10132 |
# WARNING: we release both node locks here, do not do other RPCs |
|
10133 |
# than WaitForSync to the primary node |
|
10134 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, |
|
10135 |
names=[self.target_node, self.other_node]) |
|
10147 |
# TODO: Check if releasing locks early still makes sense |
|
10148 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES) |
|
10149 |
else: |
|
10150 |
# Release all resource locks except those used by the instance |
|
10151 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, |
|
10152 |
keep=self.node_secondary_ip.keys()) |
|
10153 |
|
|
10154 |
# Release all node locks while waiting for sync |
|
10155 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE) |
|
10156 |
|
|
10157 |
# TODO: Can the instance lock be downgraded here? Take the optional disk |
|
10158 |
# shutdown in the caller into consideration. |
|
10136 | 10159 |
|
10137 | 10160 |
# Wait for sync |
10138 | 10161 |
# This can fail as the old devices are degraded and _WaitForSync |
... | ... | |
10266 | 10289 |
|
10267 | 10290 |
self.cfg.Update(self.instance, feedback_fn) |
10268 | 10291 |
|
10292 |
# Release all node locks (the configuration has been updated) |
|
10293 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE) |
|
10294 |
|
|
10269 | 10295 |
# and now perform the drbd attach |
10270 | 10296 |
self.lu.LogInfo("Attaching primary drbds to new secondary" |
10271 | 10297 |
" (standalone => connected)") |
... | ... | |
10287 | 10313 |
self.lu.LogStep(cstep, steps_total, "Removing old storage") |
10288 | 10314 |
cstep += 1 |
10289 | 10315 |
self._RemoveOldStorage(self.target_node, iv_names) |
10290 |
# WARNING: we release all node locks here, do not do other RPCs |
|
10291 |
# than WaitForSync to the primary node |
|
10292 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, |
|
10293 |
names=[self.instance.primary_node, |
|
10294 |
self.target_node, |
|
10295 |
self.new_node]) |
|
10316 |
# TODO: Check if releasing locks early still makes sense |
|
10317 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES) |
|
10318 |
else: |
|
10319 |
# Release all resource locks except those used by the instance |
|
10320 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, |
|
10321 |
keep=self.node_secondary_ip.keys()) |
|
10322 |
|
|
10323 |
# TODO: Can the instance lock be downgraded here? Take the optional disk |
|
10324 |
# shutdown in the caller into consideration. |
|
10296 | 10325 |
|
10297 | 10326 |
# Wait for sync |
10298 | 10327 |
# This can fail as the old devices are degraded and _WaitForSync |
Also available in: Unified diff