Revision af993a2c
b/lib/cmdlib.py | ||
---|---|---|
119 | 119 |
self.op = op |
120 | 120 |
self.cfg = context.cfg |
121 | 121 |
self.glm = context.glm |
122 |
# readability alias |
|
123 |
self.owned_locks = context.glm.list_owned |
|
122 | 124 |
self.context = context |
123 | 125 |
self.rpc = rpc |
124 | 126 |
# Dicts used to declare locking needs to mcpu |
... | ... | |
374 | 376 |
# future we might want to have different behaviors depending on the value |
375 | 377 |
# of self.recalculate_locks[locking.LEVEL_NODE] |
376 | 378 |
wanted_nodes = [] |
377 |
locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
379 |
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
378 | 380 |
for _, instance in self.cfg.GetMultiInstanceInfo(locked_i): |
379 | 381 |
wanted_nodes.append(instance.primary_node) |
380 | 382 |
if not primary_only: |
... | ... | |
488 | 490 |
|
489 | 491 |
""" |
490 | 492 |
if self.do_locking: |
491 |
names = lu.glm.list_owned(lock_level)
|
|
493 |
names = lu.owned_locks(lock_level)
|
|
492 | 494 |
else: |
493 | 495 |
names = all_names |
494 | 496 |
|
... | ... | |
691 | 693 |
release = [] |
692 | 694 |
|
693 | 695 |
# Determine which locks to release |
694 |
for name in lu.glm.list_owned(level):
|
|
696 |
for name in lu.owned_locks(level):
|
|
695 | 697 |
if should_release(name): |
696 | 698 |
release.append(name) |
697 | 699 |
else: |
698 | 700 |
retain.append(name) |
699 | 701 |
|
700 |
assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
|
|
702 |
assert len(lu.owned_locks(level)) == (len(retain) + len(release))
|
|
701 | 703 |
|
702 | 704 |
# Release just some locks |
703 | 705 |
lu.glm.release(level, names=release) |
704 | 706 |
|
705 |
assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
|
|
707 |
assert frozenset(lu.owned_locks(level)) == frozenset(retain)
|
|
706 | 708 |
else: |
707 | 709 |
# Release everything |
708 | 710 |
lu.glm.release(level) |
... | ... | |
1658 | 1660 |
# volumes for these instances are healthy, we will need to do an |
1659 | 1661 |
# extra call to their secondaries. We ensure here those nodes will |
1660 | 1662 |
# be locked. |
1661 |
for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
|
|
1663 |
for inst in self.owned_locks(locking.LEVEL_INSTANCE):
|
|
1662 | 1664 |
# Important: access only the instances whose lock is owned |
1663 | 1665 |
if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR: |
1664 | 1666 |
nodes.update(all_inst_info[inst].secondary_nodes) |
... | ... | |
1670 | 1672 |
group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid) |
1671 | 1673 |
|
1672 | 1674 |
unlocked_nodes = \ |
1673 |
group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
|
|
1675 |
group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
|
|
1674 | 1676 |
|
1675 | 1677 |
unlocked_instances = \ |
1676 |
group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
1678 |
group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
1677 | 1679 |
|
1678 | 1680 |
if unlocked_nodes: |
1679 | 1681 |
raise errors.OpPrereqError("Missing lock for nodes: %s" % |
... | ... | |
1707 | 1709 |
extra_lv_nodes.add(nname) |
1708 | 1710 |
|
1709 | 1711 |
unlocked_lv_nodes = \ |
1710 |
extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
|
|
1712 |
extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
|
|
1711 | 1713 |
|
1712 | 1714 |
if unlocked_lv_nodes: |
1713 | 1715 |
raise errors.OpPrereqError("these nodes could be locked: %s" % |
... | ... | |
2931 | 2933 |
} |
2932 | 2934 |
|
2933 | 2935 |
def Exec(self, feedback_fn): |
2934 |
group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
2936 |
group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
2935 | 2937 |
|
2936 | 2938 |
# Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group |
2937 | 2939 |
return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)] |
... | ... | |
2973 | 2975 |
# going via the node before it's locked, requiring verification |
2974 | 2976 |
# later on |
2975 | 2977 |
[group_uuid |
2976 |
for instance_name in |
|
2977 |
self.glm.list_owned(locking.LEVEL_INSTANCE) |
|
2978 |
for group_uuid in |
|
2979 |
self.cfg.GetInstanceNodeGroups(instance_name)]) |
|
2978 |
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE) |
|
2979 |
for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)]) |
|
2980 | 2980 |
|
2981 | 2981 |
elif level == locking.LEVEL_NODE: |
2982 | 2982 |
# This will only lock the nodes in the group to be verified which contain |
... | ... | |
2985 | 2985 |
self._LockInstancesNodes() |
2986 | 2986 |
|
2987 | 2987 |
# Lock all nodes in group to be verified |
2988 |
assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
2988 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
2989 | 2989 |
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members |
2990 | 2990 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) |
2991 | 2991 |
|
2992 | 2992 |
def CheckPrereq(self): |
2993 |
owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
2994 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
2995 |
owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
|
|
2993 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
2994 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
|
2995 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
|
2996 | 2996 |
|
2997 | 2997 |
assert self.group_uuid in owned_groups |
2998 | 2998 |
|
... | ... | |
3037 | 3037 |
if inst.admin_up]) |
3038 | 3038 |
|
3039 | 3039 |
if nv_dict: |
3040 |
nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
|
|
3040 |
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
|
|
3041 | 3041 |
set(self.cfg.GetVmCapableNodeList())) |
3042 | 3042 |
|
3043 | 3043 |
node_lvs = self.rpc.call_lv_list(nodes, []) |
... | ... | |
3098 | 3098 |
|
3099 | 3099 |
""" |
3100 | 3100 |
if self.wanted_names is None: |
3101 |
self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
3101 |
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
3102 | 3102 |
|
3103 | 3103 |
self.wanted_instances = \ |
3104 | 3104 |
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names)) |
... | ... | |
3323 | 3323 |
" drbd-based instances exist", |
3324 | 3324 |
errors.ECODE_INVAL) |
3325 | 3325 |
|
3326 |
node_list = self.glm.list_owned(locking.LEVEL_NODE)
|
|
3326 |
node_list = self.owned_locks(locking.LEVEL_NODE)
|
|
3327 | 3327 |
|
3328 | 3328 |
# if vg_name not None, checks given volume group on all nodes |
3329 | 3329 |
if self.op.vg_name: |
... | ... | |
4394 | 4394 |
"""Computes the list of nodes and their attributes. |
4395 | 4395 |
|
4396 | 4396 |
""" |
4397 |
nodenames = self.glm.list_owned(locking.LEVEL_NODE)
|
|
4397 |
nodenames = self.owned_locks(locking.LEVEL_NODE)
|
|
4398 | 4398 |
volumes = self.rpc.call_node_volumes(nodenames) |
4399 | 4399 |
|
4400 | 4400 |
ilist = self.cfg.GetAllInstancesInfo() |
... | ... | |
4463 | 4463 |
"""Computes the list of nodes and their attributes. |
4464 | 4464 |
|
4465 | 4465 |
""" |
4466 |
self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
4466 |
self.nodes = self.owned_locks(locking.LEVEL_NODE)
|
|
4467 | 4467 |
|
4468 | 4468 |
# Always get name to sort by |
4469 | 4469 |
if constants.SF_NAME in self.op.output_fields: |
... | ... | |
4552 | 4552 |
# via the node before it's locked, requiring verification later on |
4553 | 4553 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = \ |
4554 | 4554 |
set(group_uuid |
4555 |
for instance_name in |
|
4556 |
lu.glm.list_owned(locking.LEVEL_INSTANCE) |
|
4557 |
for group_uuid in |
|
4558 |
lu.cfg.GetInstanceNodeGroups(instance_name)) |
|
4555 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
|
4556 |
for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name)) |
|
4559 | 4557 |
elif level == locking.LEVEL_NODE: |
4560 | 4558 |
lu._LockInstancesNodes() # pylint: disable-msg=W0212 |
4561 | 4559 |
|
4562 | 4560 |
@staticmethod |
4563 | 4561 |
def _CheckGroupLocks(lu): |
4564 |
owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
4565 |
owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
4562 |
owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
|
|
4563 |
owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
|
|
4566 | 4564 |
|
4567 | 4565 |
# Check if node groups for locked instances are still correct |
4568 | 4566 |
for instance_name in owned_instances: |
... | ... | |
5075 | 5073 |
instances_keep = [] |
5076 | 5074 |
|
5077 | 5075 |
# Build list of instances to release |
5078 |
locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
5076 |
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
5079 | 5077 |
for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i): |
5080 | 5078 |
if (instance.disk_template in constants.DTS_INT_MIRROR and |
5081 | 5079 |
self.op.node_name in instance.all_nodes): |
... | ... | |
5084 | 5082 |
|
5085 | 5083 |
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep) |
5086 | 5084 |
|
5087 |
assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
|
|
5085 |
assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
|
|
5088 | 5086 |
set(instances_keep)) |
5089 | 5087 |
|
5090 | 5088 |
def BuildHooksEnv(self): |
... | ... | |
6898 | 6896 |
# running the iallocator and the actual migration, a good consistency model |
6899 | 6897 |
# will have to be found. |
6900 | 6898 |
|
6901 |
assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
|
|
6899 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
|
|
6902 | 6900 |
frozenset([self.op.node_name])) |
6903 | 6901 |
|
6904 | 6902 |
return ResultWithJobs(jobs) |
... | ... | |
8294 | 8292 |
src_path = self.op.src_path |
8295 | 8293 |
|
8296 | 8294 |
if src_node is None: |
8297 |
locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
8295 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
|
|
8298 | 8296 |
exp_list = self.rpc.call_export_list(locked_nodes) |
8299 | 8297 |
found = False |
8300 | 8298 |
for node in exp_list: |
... | ... | |
9122 | 9120 |
|
9123 | 9121 |
# Lock member nodes of all locked groups |
9124 | 9122 |
self.needed_locks[locking.LEVEL_NODE] = [node_name |
9125 |
for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
9123 |
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
9126 | 9124 |
for node_name in self.cfg.GetNodeGroup(group_uuid).members] |
9127 | 9125 |
else: |
9128 | 9126 |
self._LockInstancesNodes() |
... | ... | |
9162 | 9160 |
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or |
9163 | 9161 |
self.op.iallocator is None) |
9164 | 9162 |
|
9165 |
owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
9163 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
9166 | 9164 |
if owned_groups: |
9167 | 9165 |
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) |
9168 | 9166 |
|
... | ... | |
9323 | 9321 |
if remote_node is None: |
9324 | 9322 |
self.remote_node_info = None |
9325 | 9323 |
else: |
9326 |
assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
|
|
9324 |
assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
|
|
9327 | 9325 |
"Remote node '%s' is not locked" % remote_node |
9328 | 9326 |
|
9329 | 9327 |
self.remote_node_info = self.cfg.GetNodeInfo(remote_node) |
... | ... | |
9443 | 9441 |
|
9444 | 9442 |
if __debug__: |
9445 | 9443 |
# Verify owned locks before starting operation |
9446 |
owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
|
|
9447 |
assert set(owned_locks) == set(self.node_secondary_ip), \
|
|
9444 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
|
|
9445 |
assert set(owned_nodes) == set(self.node_secondary_ip), \
|
|
9448 | 9446 |
("Incorrect node locks, owning %s, expected %s" % |
9449 |
(owned_locks, self.node_secondary_ip.keys()))
|
|
9447 |
(owned_nodes, self.node_secondary_ip.keys()))
|
|
9450 | 9448 |
|
9451 |
owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
9452 |
assert list(owned_locks) == [self.instance_name], \
|
|
9449 |
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
|
|
9450 |
assert list(owned_instances) == [self.instance_name], \
|
|
9453 | 9451 |
"Instance '%s' not locked" % self.instance_name |
9454 | 9452 |
|
9455 | 9453 |
assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \ |
... | ... | |
9484 | 9482 |
|
9485 | 9483 |
if __debug__: |
9486 | 9484 |
# Verify owned locks |
9487 |
owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
|
|
9485 |
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
|
|
9488 | 9486 |
nodes = frozenset(self.node_secondary_ip) |
9489 |
assert ((self.early_release and not owned_locks) or
|
|
9490 |
(not self.early_release and not (set(owned_locks) - nodes))), \
|
|
9487 |
assert ((self.early_release and not owned_nodes) or
|
|
9488 |
(not self.early_release and not (set(owned_nodes) - nodes))), \
|
|
9491 | 9489 |
("Not owning the correct locks, early_release=%s, owned=%r," |
9492 |
" nodes=%r" % (self.early_release, owned_locks, nodes))
|
|
9490 |
" nodes=%r" % (self.early_release, owned_nodes, nodes))
|
|
9493 | 9491 |
|
9494 | 9492 |
return result |
9495 | 9493 |
|
... | ... | |
10048 | 10046 |
|
10049 | 10047 |
def CheckPrereq(self): |
10050 | 10048 |
# Verify locks |
10051 |
owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
10052 |
owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
10053 |
owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
10049 |
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
10050 |
owned_nodes = self.owned_locks(locking.LEVEL_NODE)
|
|
10051 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
10054 | 10052 |
|
10055 | 10053 |
assert owned_nodes == self.lock_nodes |
10056 | 10054 |
|
... | ... | |
10341 | 10339 |
""" |
10342 | 10340 |
if self.wanted_names is None: |
10343 | 10341 |
assert self.op.use_locking, "Locking was not used" |
10344 |
self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
10342 |
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
10345 | 10343 |
|
10346 | 10344 |
self.wanted_instances = \ |
10347 | 10345 |
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names)) |
... | ... | |
11183 | 11181 |
self._LockInstancesNodes() |
11184 | 11182 |
|
11185 | 11183 |
# Lock all nodes in all potential target groups |
11186 |
lock_groups = (frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP)) -
|
|
11184 |
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
|
|
11187 | 11185 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name)) |
11188 | 11186 |
member_nodes = [node_name |
11189 | 11187 |
for group in lock_groups |
... | ... | |
11194 | 11192 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET |
11195 | 11193 |
|
11196 | 11194 |
def CheckPrereq(self): |
11197 |
owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
11198 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
11199 |
owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
|
|
11195 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
11196 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
|
11197 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
|
11200 | 11198 |
|
11201 | 11199 |
assert (self.req_target_uuids is None or |
11202 | 11200 |
owned_groups.issuperset(self.req_target_uuids)) |
... | ... | |
11254 | 11252 |
return ([mn], [mn]) |
11255 | 11253 |
|
11256 | 11254 |
def Exec(self, feedback_fn): |
11257 |
instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
11255 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
11258 | 11256 |
|
11259 | 11257 |
assert instances == [self.op.instance_name], "Instance not locked" |
11260 | 11258 |
|
... | ... | |
11302 | 11300 |
that node. |
11303 | 11301 |
|
11304 | 11302 |
""" |
11305 |
self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
11303 |
self.nodes = self.owned_locks(locking.LEVEL_NODE)
|
|
11306 | 11304 |
rpcresult = self.rpc.call_export_list(self.nodes) |
11307 | 11305 |
result = {} |
11308 | 11306 |
for node in rpcresult: |
... | ... | |
11685 | 11683 |
fqdn_warn = True |
11686 | 11684 |
instance_name = self.op.instance_name |
11687 | 11685 |
|
11688 |
locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
|
|
11686 |
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
|
|
11689 | 11687 |
exportlist = self.rpc.call_export_list(locked_nodes) |
11690 | 11688 |
found = False |
11691 | 11689 |
for node in exportlist: |
... | ... | |
11805 | 11803 |
|
11806 | 11804 |
""" |
11807 | 11805 |
assert self.needed_locks[locking.LEVEL_NODEGROUP] |
11808 |
assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
|
|
11806 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
|
|
11809 | 11807 |
frozenset(self.op.nodes)) |
11810 | 11808 |
|
11811 | 11809 |
expected_locks = (set([self.group_uuid]) | |
11812 | 11810 |
self.cfg.GetNodeGroupsFromNodes(self.op.nodes)) |
11813 |
actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
11811 |
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
11814 | 11812 |
if actual_locks != expected_locks: |
11815 | 11813 |
raise errors.OpExecError("Nodes changed groups since locks were acquired," |
11816 | 11814 |
" current groups are '%s', used to be '%s'" % |
... | ... | |
12263 | 12261 |
# via the node before it's locked, requiring verification later on |
12264 | 12262 |
lock_groups.update(group_uuid |
12265 | 12263 |
for instance_name in |
12266 |
self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
12264 |
self.owned_locks(locking.LEVEL_INSTANCE)
|
|
12267 | 12265 |
for group_uuid in |
12268 | 12266 |
self.cfg.GetInstanceNodeGroups(instance_name)) |
12269 | 12267 |
else: |
... | ... | |
12279 | 12277 |
self._LockInstancesNodes() |
12280 | 12278 |
|
12281 | 12279 |
# Lock all nodes in group to be evacuated and target groups |
12282 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
12280 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
|
12283 | 12281 |
assert self.group_uuid in owned_groups |
12284 | 12282 |
member_nodes = [node_name |
12285 | 12283 |
for group in owned_groups |
... | ... | |
12287 | 12285 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) |
12288 | 12286 |
|
12289 | 12287 |
def CheckPrereq(self): |
12290 |
owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
12291 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
12292 |
owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
|
|
12288 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
12289 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
|
|
12290 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
|
12293 | 12291 |
|
12294 | 12292 |
assert owned_groups.issuperset(self.req_target_uuids) |
12295 | 12293 |
assert self.group_uuid in owned_groups |
... | ... | |
12347 | 12345 |
""" |
12348 | 12346 |
mn = self.cfg.GetMasterNode() |
12349 | 12347 |
|
12350 |
assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
12348 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
|
|
12351 | 12349 |
|
12352 | 12350 |
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members |
12353 | 12351 |
|
12354 | 12352 |
return (run_nodes, run_nodes) |
12355 | 12353 |
|
12356 | 12354 |
def Exec(self, feedback_fn): |
12357 |
instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
12355 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
12358 | 12356 |
|
12359 | 12357 |
assert self.group_uuid not in self.target_uuids |
12360 | 12358 |
|
Also available in: Unified diff