From adea59b3614e796a9b7fa9fdac9b9fd7d7500c47 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Mon, 25 Nov 2013 18:25:08 +0200 Subject: [PATCH] (2.8r) Workaround for Issue 621 Upon LUNetworkDisconnect() and LUNetworkConnect() try to acquire all cluster's instances. By that _LS_ACQUIRE_ALL acquire mode is set and not _LS_ACQUIRE_EXACT and thus the deleted lock does cause any problem. NOTE: This workaround is not merged upstream. They prefer to have one opcode fail and make the end user retry, rather than have a greedy way that locks all instances to resolve this. Since synnefo does not use nodegroups, locking all instances is the same with locking instances on default nodegroup. Signed-off-by: Dimitris Aragiorgis --- lib/cmdlib/network.py | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/lib/cmdlib/network.py b/lib/cmdlib/network.py index 2f2dcd7..430cdb3 100644 --- a/lib/cmdlib/network.py +++ b/lib/cmdlib/network.py @@ -30,7 +30,7 @@ from ganeti import qlang from ganeti import query from ganeti import utils from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase -from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances +from ganeti.cmdlib.common import ShareAll def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6, @@ -580,24 +580,17 @@ class LUNetworkConnect(LogicalUnit): self.group_uuid = self.cfg.LookupNodeGroup(self.group_name) self.needed_locks = { - locking.LEVEL_INSTANCE: [], locking.LEVEL_NODEGROUP: [self.group_uuid], } - self.share_locks[locking.LEVEL_INSTANCE] = 1 if self.op.conflicts_check: + self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid] self.share_locks[locking.LEVEL_NETWORK] = 1 + self.share_locks[locking.LEVEL_INSTANCE] = 1 def DeclareLocks(self, level): - if level == locking.LEVEL_INSTANCE: - assert not self.needed_locks[locking.LEVEL_INSTANCE] - - # Lock instances optimistically, needs verification once group lock has - # been acquired - if self.op.conflicts_check: - self.needed_locks[locking.LEVEL_INSTANCE] = \ - self.cfg.GetNodeGroupInstances(self.group_uuid) + pass def BuildHooksEnv(self): ret = { @@ -618,8 +611,6 @@ class LUNetworkConnect(LogicalUnit): # Check if locked instances are still correct owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) - if self.op.conflicts_check: - CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) self.netparams = { constants.NIC_MODE: self.network_mode, @@ -666,19 +657,13 @@ class LUNetworkDisconnect(LogicalUnit): self.group_uuid = self.cfg.LookupNodeGroup(self.group_name) self.needed_locks = { - locking.LEVEL_INSTANCE: [], + locking.LEVEL_INSTANCE: locking.ALL_SET, locking.LEVEL_NODEGROUP: [self.group_uuid], } self.share_locks[locking.LEVEL_INSTANCE] = 1 def DeclareLocks(self, level): - if level == locking.LEVEL_INSTANCE: - assert not self.needed_locks[locking.LEVEL_INSTANCE] - - # Lock instances optimistically, needs verification once group lock has - # been acquired - self.needed_locks[locking.LEVEL_INSTANCE] = \ - self.cfg.GetNodeGroupInstances(self.group_uuid) + pass def BuildHooksEnv(self): ret = { @@ -697,7 +682,6 @@ class LUNetworkDisconnect(LogicalUnit): # Check if locked instances are still correct owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) - CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) self.group = self.cfg.GetNodeGroup(self.group_uuid) self.connected = True -- 1.7.10.4