(2.8r) Workaround for Issue 621
authorDimitris Aragiorgis <dimara@grnet.gr>
Mon, 25 Nov 2013 16:25:08 +0000 (18:25 +0200)
committerDimitris Aragiorgis <dimara@grnet.gr>
Thu, 29 May 2014 11:07:47 +0000 (14:07 +0300)
Upon LUNetworkDisconnect() and LUNetworkConnect() try to acquire
all cluster's instances.

By that _LS_ACQUIRE_ALL acquire mode is set and not
_LS_ACQUIRE_EXACT and thus the deleted lock does cause any problem.

NOTE: This workaround is not merged upstream. They prefer to have
one opcode fail and make the end user retry, rather than have a
greedy way that locks all instances to resolve this. Since synnefo
does not use nodegroups, locking all instances is the same with
locking instances on default nodegroup.

Signed-off-by: Dimitris Aragiorgis <dimara@grnet.gr>

lib/cmdlib/network.py

index 5dc8771..10f288c 100644 (file)
@@ -30,7 +30,7 @@ from ganeti import qlang
 from ganeti import query
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase
-from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances
+from ganeti.cmdlib.common import ShareAll
 
 
 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
@@ -576,25 +576,17 @@ class LUNetworkConnect(LogicalUnit):
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
 
     self.needed_locks = {
-      locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
-    self.share_locks[locking.LEVEL_INSTANCE] = 1
 
     if self.op.conflicts_check:
+      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
       self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
       self.share_locks[locking.LEVEL_NETWORK] = 1
+      self.share_locks[locking.LEVEL_INSTANCE] = 1
 
   def DeclareLocks(self, level):
-    if level == locking.LEVEL_INSTANCE:
-      assert not self.needed_locks[locking.LEVEL_INSTANCE]
-
-      # Lock instances optimistically, needs verification once group lock has
-      # been acquired
-      if self.op.conflicts_check:
-        self.needed_locks[locking.LEVEL_INSTANCE] = \
-          self.cfg.GetInstanceNames(
-            self.cfg.GetNodeGroupInstances(self.group_uuid))
+    pass
 
   def BuildHooksEnv(self):
     ret = {
@@ -615,8 +607,6 @@ class LUNetworkConnect(LogicalUnit):
 
     # Check if locked instances are still correct
     owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    if self.op.conflicts_check:
-      CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
 
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
@@ -665,20 +655,13 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
 
     self.needed_locks = {
-      locking.LEVEL_INSTANCE: [],
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
     self.share_locks[locking.LEVEL_INSTANCE] = 1
 
   def DeclareLocks(self, level):
-    if level == locking.LEVEL_INSTANCE:
-      assert not self.needed_locks[locking.LEVEL_INSTANCE]
-
-      # Lock instances optimistically, needs verification once group lock has
-      # been acquired
-      self.needed_locks[locking.LEVEL_INSTANCE] = \
-        self.cfg.GetInstanceNames(
-          self.cfg.GetNodeGroupInstances(self.group_uuid))
+    pass
 
   def BuildHooksEnv(self):
     ret = {
@@ -697,7 +680,6 @@ class LUNetworkDisconnect(LogicalUnit):
 
     # Check if locked instances are still correct
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True