(2.8r) Workaround for Issue 621
authorDimitris Aragiorgis <dimara@grnet.gr>
Mon, 25 Nov 2013 16:25:08 +0000 (18:25 +0200)
committerDimitris Aragiorgis <dimara@grnet.gr>
Tue, 17 Dec 2013 10:46:25 +0000 (12:46 +0200)
Upon LUNetworkDisconnect() and LUNetworkConnect() try to acquire
all cluster's instances.

By that _LS_ACQUIRE_ALL acquire mode is set and not
_LS_ACQUIRE_EXACT and thus the deleted lock does cause any problem.

NOTE: This workaround is not merged upstream. They prefer to have
one opcode fail and make the end user retry, rather than have a
greedy way that locks all instances to resolve this. Since synnefo
does not use nodegroups, locking all instances is the same with
locking instances on default nodegroup.

Signed-off-by: Dimitris Aragiorgis <dimara@grnet.gr>

lib/cmdlib/network.py

index 2f2dcd7..430cdb3 100644 (file)
@@ -30,7 +30,7 @@ from ganeti import qlang
 from ganeti import query
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase
-from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances
+from ganeti.cmdlib.common import ShareAll
 
 
 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
@@ -580,24 +580,17 @@ class LUNetworkConnect(LogicalUnit):
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
 
     self.needed_locks = {
-      locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
-    self.share_locks[locking.LEVEL_INSTANCE] = 1
 
     if self.op.conflicts_check:
+      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
       self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
       self.share_locks[locking.LEVEL_NETWORK] = 1
+      self.share_locks[locking.LEVEL_INSTANCE] = 1
 
   def DeclareLocks(self, level):
-    if level == locking.LEVEL_INSTANCE:
-      assert not self.needed_locks[locking.LEVEL_INSTANCE]
-
-      # Lock instances optimistically, needs verification once group lock has
-      # been acquired
-      if self.op.conflicts_check:
-        self.needed_locks[locking.LEVEL_INSTANCE] = \
-            self.cfg.GetNodeGroupInstances(self.group_uuid)
+    pass
 
   def BuildHooksEnv(self):
     ret = {
@@ -618,8 +611,6 @@ class LUNetworkConnect(LogicalUnit):
 
     # Check if locked instances are still correct
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    if self.op.conflicts_check:
-      CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
@@ -666,19 +657,13 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
 
     self.needed_locks = {
-      locking.LEVEL_INSTANCE: [],
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
     self.share_locks[locking.LEVEL_INSTANCE] = 1
 
   def DeclareLocks(self, level):
-    if level == locking.LEVEL_INSTANCE:
-      assert not self.needed_locks[locking.LEVEL_INSTANCE]
-
-      # Lock instances optimistically, needs verification once group lock has
-      # been acquired
-      self.needed_locks[locking.LEVEL_INSTANCE] = \
-        self.cfg.GetNodeGroupInstances(self.group_uuid)
+    pass
 
   def BuildHooksEnv(self):
     ret = {
@@ -697,7 +682,6 @@ class LUNetworkDisconnect(LogicalUnit):
 
     # Check if locked instances are still correct
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True