Revision 0d5a0b96 lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
134 | 134 |
self.rpc = rpc |
135 | 135 |
# Dicts used to declare locking needs to mcpu |
136 | 136 |
self.needed_locks = None |
137 |
self.acquired_locks = {} |
|
138 | 137 |
self.share_locks = dict.fromkeys(locking.LEVELS, 0) |
139 | 138 |
self.add_locks = {} |
140 | 139 |
self.remove_locks = {} |
... | ... | |
386 | 385 |
# future we might want to have different behaviors depending on the value |
387 | 386 |
# of self.recalculate_locks[locking.LEVEL_NODE] |
388 | 387 |
wanted_nodes = [] |
389 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
|
|
388 |
for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
|
|
390 | 389 |
instance = self.context.cfg.GetInstanceInfo(instance_name) |
391 | 390 |
wanted_nodes.append(instance.primary_node) |
392 | 391 |
if not primary_only: |
... | ... | |
500 | 499 |
|
501 | 500 |
""" |
502 | 501 |
if self.do_locking: |
503 |
names = lu.acquired_locks[lock_level]
|
|
502 |
names = lu.glm.list_owned(lock_level)
|
|
504 | 503 |
else: |
505 | 504 |
names = all_names |
506 | 505 |
|
... | ... | |
511 | 510 |
|
512 | 511 |
# caller specified names and we must keep the same order |
513 | 512 |
assert self.names |
514 |
assert not self.do_locking or lu.acquired_locks[lock_level]
|
|
513 |
assert not self.do_locking or lu.glm.is_owned(lock_level)
|
|
515 | 514 |
|
516 | 515 |
missing = set(self.wanted).difference(names) |
517 | 516 |
if missing: |
... | ... | |
657 | 656 |
release = [] |
658 | 657 |
|
659 | 658 |
# Determine which locks to release |
660 |
for name in lu.acquired_locks[level]:
|
|
659 |
for name in lu.glm.list_owned(level):
|
|
661 | 660 |
if should_release(name): |
662 | 661 |
release.append(name) |
663 | 662 |
else: |
664 | 663 |
retain.append(name) |
665 | 664 |
|
666 |
assert len(lu.acquired_locks[level]) == (len(retain) + len(release))
|
|
665 |
assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
|
|
667 | 666 |
|
668 | 667 |
# Release just some locks |
669 | 668 |
lu.glm.release(level, names=release) |
670 |
lu.acquired_locks[level] = retain |
|
671 | 669 |
|
672 | 670 |
assert frozenset(lu.glm.list_owned(level)) == frozenset(retain) |
673 | 671 |
else: |
674 | 672 |
# Release everything |
675 | 673 |
lu.glm.release(level) |
676 |
del lu.acquired_locks[level] |
|
677 | 674 |
|
678 |
assert not lu.glm.list_owned(level), "No locks should be owned"
|
|
675 |
assert not lu.glm.is_owned(level), "No locks should be owned"
|
|
679 | 676 |
|
680 | 677 |
|
681 | 678 |
def _RunPostHook(lu, node_name): |
... | ... | |
2680 | 2677 |
|
2681 | 2678 |
""" |
2682 | 2679 |
if self.wanted_names is None: |
2683 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
|
|
2680 |
self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
2684 | 2681 |
|
2685 | 2682 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
2686 | 2683 |
in self.wanted_names] |
... | ... | |
2905 | 2902 |
" drbd-based instances exist", |
2906 | 2903 |
errors.ECODE_INVAL) |
2907 | 2904 |
|
2908 |
node_list = self.acquired_locks[locking.LEVEL_NODE]
|
|
2905 |
node_list = self.glm.list_owned(locking.LEVEL_NODE)
|
|
2909 | 2906 |
|
2910 | 2907 |
# if vg_name not None, checks given volume group on all nodes |
2911 | 2908 |
if self.op.vg_name: |
... | ... | |
3673 | 3670 |
|
3674 | 3671 |
""" |
3675 | 3672 |
# Locking is not used |
3676 |
assert not (lu.acquired_locks or self.do_locking or self.use_locking) |
|
3673 |
assert not (compat.any(lu.glm.is_owned(level) |
|
3674 |
for level in locking.LEVELS) or |
|
3675 |
self.do_locking or self.use_locking) |
|
3677 | 3676 |
|
3678 | 3677 |
valid_nodes = [node.name |
3679 | 3678 |
for node in lu.cfg.GetAllNodesInfo().values() |
... | ... | |
3979 | 3978 |
"""Computes the list of nodes and their attributes. |
3980 | 3979 |
|
3981 | 3980 |
""" |
3982 |
nodenames = self.acquired_locks[locking.LEVEL_NODE]
|
|
3981 |
nodenames = self.glm.list_owned(locking.LEVEL_NODE)
|
|
3983 | 3982 |
volumes = self.rpc.call_node_volumes(nodenames) |
3984 | 3983 |
|
3985 | 3984 |
ilist = [self.cfg.GetInstanceInfo(iname) for iname |
... | ... | |
4057 | 4056 |
"""Computes the list of nodes and their attributes. |
4058 | 4057 |
|
4059 | 4058 |
""" |
4060 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE]
|
|
4059 |
self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
4061 | 4060 |
|
4062 | 4061 |
# Always get name to sort by |
4063 | 4062 |
if constants.SF_NAME in self.op.output_fields: |
... | ... | |
4632 | 4631 |
instances_keep = [] |
4633 | 4632 |
|
4634 | 4633 |
# Build list of instances to release |
4635 |
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
|
|
4634 |
for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
|
|
4636 | 4635 |
instance = self.context.cfg.GetInstanceInfo(instance_name) |
4637 | 4636 |
if (instance.disk_template in constants.DTS_INT_MIRROR and |
4638 | 4637 |
self.op.node_name in instance.all_nodes): |
... | ... | |
4641 | 4640 |
|
4642 | 4641 |
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep) |
4643 | 4642 |
|
4644 |
assert (set(self.acquired_locks.get(locking.LEVEL_INSTANCE, [])) ==
|
|
4643 |
assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
|
|
4645 | 4644 |
set(instances_keep)) |
4646 | 4645 |
|
4647 | 4646 |
def BuildHooksEnv(self): |
... | ... | |
7770 | 7769 |
src_path = self.op.src_path |
7771 | 7770 |
|
7772 | 7771 |
if src_node is None: |
7773 |
locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
|
|
7772 |
locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
7774 | 7773 |
exp_list = self.rpc.call_export_list(locked_nodes) |
7775 | 7774 |
found = False |
7776 | 7775 |
for node in exp_list: |
... | ... | |
8553 | 8552 |
|
8554 | 8553 |
# Lock member nodes of all locked groups |
8555 | 8554 |
self.needed_locks[locking.LEVEL_NODE] = [node_name |
8556 |
for group_uuid in self.acquired_locks[locking.LEVEL_NODEGROUP]
|
|
8555 |
for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
8557 | 8556 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
8558 | 8557 |
else: |
8559 | 8558 |
self._LockInstancesNodes() |
... | ... | |
8590 | 8589 |
"""Check prerequisites. |
8591 | 8590 |
|
8592 | 8591 |
""" |
8593 |
assert (locking.LEVEL_NODEGROUP in self.acquired_locks or
|
|
8592 |
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
|
|
8594 | 8593 |
self.op.iallocator is None) |
8595 | 8594 |
|
8596 |
if locking.LEVEL_NODEGROUP in self.acquired_locks: |
|
8595 |
owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP) |
|
8596 |
if owned_groups: |
|
8597 | 8597 |
groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name) |
8598 |
prevgroups = self.acquired_locks[locking.LEVEL_NODEGROUP] |
|
8599 |
if prevgroups != groups: |
|
8598 |
if owned_groups != groups: |
|
8600 | 8599 |
raise errors.OpExecError("Node groups used by instance '%s' changed" |
8601 | 8600 |
" since lock was acquired, current list is %r," |
8602 | 8601 |
" used to be '%s'" % |
8603 | 8602 |
(self.op.instance_name, |
8604 | 8603 |
utils.CommaJoin(groups), |
8605 |
utils.CommaJoin(prevgroups)))
|
|
8604 |
utils.CommaJoin(owned_groups)))
|
|
8606 | 8605 |
|
8607 | 8606 |
return LogicalUnit.CheckPrereq(self) |
8608 | 8607 |
|
... | ... | |
8761 | 8760 |
if remote_node is None: |
8762 | 8761 |
self.remote_node_info = None |
8763 | 8762 |
else: |
8764 |
assert remote_node in self.lu.acquired_locks[locking.LEVEL_NODE], \
|
|
8763 |
assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
|
|
8765 | 8764 |
"Remote node '%s' is not locked" % remote_node |
8766 | 8765 |
|
8767 | 8766 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
... | ... | |
9592 | 9591 |
""" |
9593 | 9592 |
if self.wanted_names is None: |
9594 | 9593 |
assert self.op.use_locking, "Locking was not used" |
9595 |
self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
|
|
9594 |
self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
9596 | 9595 |
|
9597 | 9596 |
self.wanted_instances = [self.cfg.GetInstanceInfo(name) |
9598 | 9597 |
for name in self.wanted_names] |
... | ... | |
10400 | 10399 |
that node. |
10401 | 10400 |
|
10402 | 10401 |
""" |
10403 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE]
|
|
10402 |
self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
10404 | 10403 |
rpcresult = self.rpc.call_export_list(self.nodes) |
10405 | 10404 |
result = {} |
10406 | 10405 |
for node in rpcresult: |
... | ... | |
10782 | 10781 |
fqdn_warn = True |
10783 | 10782 |
instance_name = self.op.instance_name |
10784 | 10783 |
|
10785 |
locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
|
|
10784 |
locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
10786 | 10785 |
exportlist = self.rpc.call_export_list(locked_nodes) |
10787 | 10786 |
found = False |
10788 | 10787 |
for node in exportlist: |
Also available in: Unified diff