Revision 8b0273a5 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
8509 | 8509 |
def ExpandNames(self): |
8510 | 8510 |
self._ExpandAndLockInstance() |
8511 | 8511 |
|
8512 |
if self.op.iallocator is not None:
|
|
8513 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
|
8512 |
assert locking.LEVEL_NODE not in self.needed_locks
|
|
8513 |
assert locking.LEVEL_NODEGROUP not in self.needed_locks
|
|
8514 | 8514 |
|
8515 |
elif self.op.remote_node is not None: |
|
8516 |
remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
|
8517 |
self.op.remote_node = remote_node |
|
8515 |
assert self.op.iallocator is None or self.op.remote_node is None, \ |
|
8516 |
"Conflicting options" |
|
8517 |
|
|
8518 |
if self.op.remote_node is not None: |
|
8519 |
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node) |
|
8518 | 8520 |
|
8519 | 8521 |
# Warning: do not remove the locking of the new secondary here |
8520 | 8522 |
# unless DRBD8.AddChildren is changed to work in parallel; |
8521 | 8523 |
# currently it doesn't since parallel invocations of |
8522 | 8524 |
# FindUnusedMinor will conflict |
8523 |
self.needed_locks[locking.LEVEL_NODE] = [remote_node] |
|
8525 |
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
|
|
8524 | 8526 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND |
8525 |
|
|
8526 | 8527 |
else: |
8527 | 8528 |
self.needed_locks[locking.LEVEL_NODE] = [] |
8528 | 8529 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
8529 | 8530 |
|
8531 |
if self.op.iallocator is not None: |
|
8532 |
# iallocator will select a new node in the same group |
|
8533 |
self.needed_locks[locking.LEVEL_NODEGROUP] = [] |
|
8534 |
|
|
8530 | 8535 |
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode, |
8531 | 8536 |
self.op.iallocator, self.op.remote_node, |
8532 | 8537 |
self.op.disks, False, self.op.early_release) |
... | ... | |
8534 | 8539 |
self.tasklets = [self.replacer] |
8535 | 8540 |
|
8536 | 8541 |
def DeclareLocks(self, level): |
8537 |
# If we're not already locking all nodes in the set we have to declare the |
|
8538 |
# instance's primary/secondary nodes. |
|
8539 |
if (level == locking.LEVEL_NODE and |
|
8540 |
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET): |
|
8541 |
self._LockInstancesNodes() |
|
8542 |
if level == locking.LEVEL_NODEGROUP: |
|
8543 |
assert self.op.remote_node is None |
|
8544 |
assert self.op.iallocator is not None |
|
8545 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
|
8546 |
|
|
8547 |
self.share_locks[locking.LEVEL_NODEGROUP] = 1 |
|
8548 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
|
8549 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
|
8550 |
|
|
8551 |
elif level == locking.LEVEL_NODE: |
|
8552 |
if self.op.iallocator is not None: |
|
8553 |
assert self.op.remote_node is None |
|
8554 |
assert not self.needed_locks[locking.LEVEL_NODE] |
|
8555 |
|
|
8556 |
# Lock member nodes of all locked groups |
|
8557 |
self.needed_locks[locking.LEVEL_NODE] = [node_name |
|
8558 |
for group_uuid in self.acquired_locks[locking.LEVEL_NODEGROUP] |
|
8559 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
|
8560 |
else: |
|
8561 |
self._LockInstancesNodes() |
|
8542 | 8562 |
|
8543 | 8563 |
def BuildHooksEnv(self): |
8544 | 8564 |
"""Build hooks env. |
... | ... | |
8568 | 8588 |
nl.append(self.op.remote_node) |
8569 | 8589 |
return nl, nl |
8570 | 8590 |
|
8591 |
def CheckPrereq(self): |
|
8592 |
"""Check prerequisites. |
|
8593 |
|
|
8594 |
""" |
|
8595 |
assert (locking.LEVEL_NODEGROUP in self.acquired_locks or |
|
8596 |
self.op.iallocator is None) |
|
8597 |
|
|
8598 |
if locking.LEVEL_NODEGROUP in self.acquired_locks: |
|
8599 |
groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
|
8600 |
prevgroups = self.acquired_locks[locking.LEVEL_NODEGROUP] |
|
8601 |
if prevgroups != groups: |
|
8602 |
raise errors.OpExecError("Node groups used by instance '%s' changed" |
|
8603 |
" since lock was acquired, current list is %r," |
|
8604 |
" used to be '%s'" % |
|
8605 |
(self.op.instance_name, |
|
8606 |
utils.CommaJoin(groups), |
|
8607 |
utils.CommaJoin(prevgroups))) |
|
8608 |
|
|
8609 |
return LogicalUnit.CheckPrereq(self) |
|
8610 |
|
|
8571 | 8611 |
|
8572 | 8612 |
class TLReplaceDisks(Tasklet): |
8573 | 8613 |
"""Replaces disks for an instance. |
... | ... | |
8720 | 8760 |
remote_node = self._RunAllocator(self.lu, self.iallocator_name, |
8721 | 8761 |
instance.name, instance.secondary_nodes) |
8722 | 8762 |
|
8723 |
if remote_node is not None: |
|
8763 |
if remote_node is None: |
|
8764 |
self.remote_node_info = None |
|
8765 |
else: |
|
8766 |
assert remote_node in self.lu.acquired_locks[locking.LEVEL_NODE], \ |
|
8767 |
"Remote node '%s' is not locked" % remote_node |
|
8768 |
|
|
8724 | 8769 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
8725 | 8770 |
assert self.remote_node_info is not None, \ |
8726 | 8771 |
"Cannot retrieve locked node %s" % remote_node |
8727 |
else: |
|
8728 |
self.remote_node_info = None |
|
8729 | 8772 |
|
8730 | 8773 |
if remote_node == self.instance.primary_node: |
8731 | 8774 |
raise errors.OpPrereqError("The specified node is the primary node of" |
... | ... | |
8809 | 8852 |
for node in check_nodes: |
8810 | 8853 |
_CheckNodeOnline(self.lu, node) |
8811 | 8854 |
|
8812 |
touched_nodes = frozenset([self.new_node, self.other_node, |
|
8813 |
self.target_node]) |
|
8855 |
touched_nodes = frozenset(node_name for node_name in [self.new_node, |
|
8856 |
self.other_node, |
|
8857 |
self.target_node] |
|
8858 |
if node_name is not None) |
|
8859 |
|
|
8860 |
# Release unneeded node locks |
|
8861 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes) |
|
8814 | 8862 |
|
8815 |
if self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
|
|
8816 |
# Release unneeded node locks
|
|
8817 |
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
|
|
8863 |
# Release any owned node group
|
|
8864 |
if self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP):
|
|
8865 |
_ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
|
|
8818 | 8866 |
|
8819 | 8867 |
# Check whether disks are valid |
8820 | 8868 |
for disk_idx in self.disks: |
... | ... | |
8823 | 8871 |
# Get secondary node IP addresses |
8824 | 8872 |
self.node_secondary_ip = \ |
8825 | 8873 |
dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip) |
8826 |
for node_name in touched_nodes |
|
8827 |
if node_name is not None) |
|
8874 |
for node_name in touched_nodes) |
|
8828 | 8875 |
|
8829 | 8876 |
def Exec(self, feedback_fn): |
8830 | 8877 |
"""Execute disk replacement. |
... | ... | |
8835 | 8882 |
if self.delay_iallocator: |
8836 | 8883 |
self._CheckPrereq2() |
8837 | 8884 |
|
8838 |
if (self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET and |
|
8839 |
__debug__): |
|
8885 |
if __debug__: |
|
8840 | 8886 |
# Verify owned locks before starting operation |
8841 | 8887 |
owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE) |
8842 | 8888 |
assert set(owned_locks) == set(self.node_secondary_ip), \ |
8843 |
"Not owning the correct locks: %s" % (owned_locks, ) |
|
8889 |
("Incorrect node locks, owning %s, expected %s" % |
|
8890 |
(owned_locks, self.node_secondary_ip.keys())) |
|
8891 |
|
|
8892 |
owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_INSTANCE) |
|
8893 |
assert list(owned_locks) == [self.instance_name], \ |
|
8894 |
"Instance '%s' not locked" % self.instance_name |
|
8895 |
|
|
8896 |
assert not self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP), \ |
|
8897 |
"Should not own any node group lock at this point" |
|
8844 | 8898 |
|
8845 | 8899 |
if not self.disks: |
8846 | 8900 |
feedback_fn("No disks need replacement") |
Also available in: Unified diff