Add error code for temporary lack of resources
[ganeti-local] / lib / cmdlib.py
index 92b881d..d29631a 100644 (file)
@@ -3685,6 +3685,12 @@ class LUGroupVerifyDisks(NoHooksLU):
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
       locking.LEVEL_NODE: [],
+
+      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
+      # starts one instance of this opcode for every group, which means all
+      # nodes will be locked for a short amount of time, so it's better to
+      # acquire the node allocation lock as well.
+      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
   def DeclareLocks(self, level):
@@ -3791,6 +3797,8 @@ class LUClusterRepairDiskSizes(NoHooksLU):
   def ExpandNames(self):
     if self.op.instances:
       self.wanted_names = _GetWantedInstances(self, self.op.instances)
+      # Not getting the node allocation lock as only a specific set of
+      # instances (and their nodes) is going to be acquired
       self.needed_locks = {
         locking.LEVEL_NODE_RES: [],
         locking.LEVEL_INSTANCE: self.wanted_names,
@@ -3801,10 +3809,15 @@ class LUClusterRepairDiskSizes(NoHooksLU):
       self.needed_locks = {
         locking.LEVEL_NODE_RES: locking.ALL_SET,
         locking.LEVEL_INSTANCE: locking.ALL_SET,
+
+        # This opcode is acquires the node locks for all instances
+        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
+
     self.share_locks = {
       locking.LEVEL_NODE_RES: 1,
       locking.LEVEL_INSTANCE: 0,
+      locking.LEVEL_NODE_ALLOC: 1,
       }
 
   def DeclareLocks(self, level):
@@ -4601,8 +4614,9 @@ class LUClusterRedistConf(NoHooksLU):
   def ExpandNames(self):
     self.needed_locks = {
       locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
     }
-    self.share_locks[locking.LEVEL_NODE] = 1
+    self.share_locks = _ShareAll()
 
   def Exec(self, feedback_fn):
     """Redistribute the configuration.
@@ -4800,6 +4814,11 @@ class LUOobCommand(NoHooksLU):
       locking.LEVEL_NODE: lock_names,
       }
 
+    if not self.op.node_names:
+      # Acquire node allocation lock only if all nodes are affected
+      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -5327,13 +5346,16 @@ class LUNodeQueryvols(NoHooksLU):
 
   def ExpandNames(self):
     self.share_locks = _ShareAll()
-    self.needed_locks = {}
 
-    if not self.op.nodes:
-      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    if self.op.nodes:
+      self.needed_locks = {
+        locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+        }
     else:
-      self.needed_locks[locking.LEVEL_NODE] = \
-        _GetWantedNodes(self, self.op.nodes)
+      self.needed_locks = {
+        locking.LEVEL_NODE: locking.ALL_SET,
+        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+        }
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
@@ -6031,19 +6053,28 @@ class LUNodeSetParams(LogicalUnit):
 
   def ExpandNames(self):
     if self.lock_all:
-      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
+      self.needed_locks = {
+        locking.LEVEL_NODE: locking.ALL_SET,
+
+        # Block allocations when all nodes are locked
+        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+        }
     else:
-      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
+      self.needed_locks = {
+        locking.LEVEL_NODE: self.op.node_name,
+        }
 
     # Since modifying a node can have severe effects on currently running
     # operations the resource lock is at least acquired in shared mode
     self.needed_locks[locking.LEVEL_NODE_RES] = \
       self.needed_locks[locking.LEVEL_NODE]
 
-    # Get node resource and instance locks in shared mode; they are not used
-    # for anything but read-only access
-    self.share_locks[locking.LEVEL_NODE_RES] = 1
-    self.share_locks[locking.LEVEL_INSTANCE] = 1
+    # Get all locks except nodes in shared mode; they are not used for anything
+    # but read-only access
+    self.share_locks = _ShareAll()
+    self.share_locks[locking.LEVEL_NODE] = 0
+    self.share_locks[locking.LEVEL_NODE_RES] = 0
+    self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
 
     if self.lock_instances:
       self.needed_locks[locking.LEVEL_INSTANCE] = \
@@ -7363,6 +7394,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+
     if self.op.nodes:
       self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
       self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
@@ -7371,6 +7403,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
       if self.op.iallocator:
         # iallocator will select a new node in the same group
         self.needed_locks[locking.LEVEL_NODEGROUP] = []
+        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
     self.needed_locks[locking.LEVEL_NODE_RES] = []
 
   def DeclareLocks(self, level):
@@ -7400,6 +7434,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
         for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
           self.needed_locks[locking.LEVEL_NODE].extend(
             self.cfg.GetNodeGroup(group_uuid).members)
+
+        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
       elif not self.op.nodes:
         self._LockInstancesNodes(primary_only=False)
     elif level == locking.LEVEL_NODE_RES:
@@ -7490,6 +7526,9 @@ class LUInstanceRecreateDisks(LogicalUnit):
       # Release unneeded node and node resource locks
       _ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
       _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
+      _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+
+    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 
   def Exec(self, feedback_fn):
     """Recreate the disks.
@@ -9501,13 +9540,15 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams):
                  osname, node)
 
 
-def _CreateInstanceAllocRequest(op, disks, nics, beparams):
+def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
   """Wrapper around IAReqInstanceAlloc.
 
   @param op: The instance opcode
   @param disks: The computed disks
   @param nics: The computed nics
   @param beparams: The full filled beparams
+  @param node_whitelist: List of nodes which should appear as online to the
+    allocator (unless the node is already marked offline)
 
   @returns: A filled L{iallocator.IAReqInstanceAlloc}
 
@@ -9522,7 +9563,8 @@ def _CreateInstanceAllocRequest(op, disks, nics, beparams):
                                        spindle_use=spindle_use,
                                        disks=disks,
                                        nics=[n.ToDict() for n in nics],
-                                       hypervisor=op.hypervisor)
+                                       hypervisor=op.hypervisor,
+                                       node_whitelist=node_whitelist)
 
 
 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
@@ -9856,6 +9898,10 @@ class LUInstanceCreate(LogicalUnit):
       # that group
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
       self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
+      if self.op.opportunistic_locking:
+        self.opportunistic_locks[locking.LEVEL_NODE] = True
+        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
       self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
       nodelist = [self.op.pnode]
@@ -9895,19 +9941,33 @@ class LUInstanceCreate(LogicalUnit):
     """Run the allocator based on input opcode.
 
     """
+    if self.op.opportunistic_locking:
+      # Only consider nodes for which a lock is held
+      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+    else:
+      node_whitelist = None
+
     #TODO Export network to iallocator so that it chooses a pnode
     #     in a nodegroup that has the desired network connected to
     req = _CreateInstanceAllocRequest(self.op, self.disks,
-                                      self.nics, self.be_full)
+                                      self.nics, self.be_full,
+                                      node_whitelist)
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
 
     if not ial.success:
+      # When opportunistic locks are used only a temporary failure is generated
+      if self.op.opportunistic_locking:
+        ecode = errors.ECODE_TEMP_NORES
+      else:
+        ecode = errors.ECODE_NORES
+
       raise errors.OpPrereqError("Can't compute nodes using"
                                  " iallocator '%s': %s" %
                                  (self.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
+                                 ecode)
+
     self.op.pnode = ial.result[0]
     self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
                  self.op.instance_name, self.op.iallocator,
@@ -10772,11 +10832,19 @@ class LUInstanceMultiAlloc(NoHooksLU):
 
     """
     self.share_locks = _ShareAll()
-    self.needed_locks = {}
+    self.needed_locks = {
+      # iallocator will select nodes and even if no iallocator is used,
+      # collisions with LUInstanceCreate should be avoided
+      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+      }
 
     if self.op.iallocator:
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
       self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
+
+      if self.op.opportunistic_locking:
+        self.opportunistic_locks[locking.LEVEL_NODE] = True
+        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
       nodeslist = []
       for inst in self.op.instances:
@@ -10799,10 +10867,17 @@ class LUInstanceMultiAlloc(NoHooksLU):
     default_vg = self.cfg.GetVGName()
     ec_id = self.proc.GetECId()
 
+    if self.op.opportunistic_locking:
+      # Only consider nodes for which a lock is held
+      node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+    else:
+      node_whitelist = None
+
     insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
                                          _ComputeNics(op, cluster, None,
                                                       self.cfg, ec_id),
-                                         _ComputeFullBeParams(op, cluster))
+                                         _ComputeFullBeParams(op, cluster),
+                                         node_whitelist)
              for op in self.op.instances]
 
     req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
@@ -13908,6 +13983,9 @@ class _ExportQuery(_QueryBase):
         locking.LEVEL_NODE: self.wanted,
         }
 
+      if not self.names:
+        lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
   def DeclareLocks(self, lu, level):
     pass
 
@@ -14025,6 +14103,11 @@ class LUBackupExport(LogicalUnit):
       #  - removing the removal operation altogether
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
+      # Allocations should be stopped while this LU runs with node locks, but
+      # it doesn't have to be exclusive
+      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
   def DeclareLocks(self, level):
     """Last minute lock declaration."""
     # All nodes are locked anyway, so nothing to do here.
@@ -14291,11 +14374,19 @@ class LUBackupRemove(NoHooksLU):
   REQ_BGL = False
 
   def ExpandNames(self):
-    self.needed_locks = {}
-    # We need all nodes to be locked in order for RemoveExport to work, but we
-    # don't need to lock the instance itself, as nothing will happen to it (and
-    # we can remove exports also for a removed instance)
-    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+    self.needed_locks = {
+      # We need all nodes to be locked in order for RemoveExport to work, but
+      # we don't need to lock the instance itself, as nothing will happen to it
+      # (and we can remove exports also for a removed instance)
+      locking.LEVEL_NODE: locking.ALL_SET,
+
+      # Removing backups is quick, so blocking allocations is justified
+      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+      }
+
+    # Allocations should be stopped while this LU runs with node locks, but it
+    # doesn't have to be exclusive
+    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
 
   def Exec(self, feedback_fn):
     """Remove any export.
@@ -15648,6 +15739,11 @@ class LUNetworkAdd(LogicalUnit):
     mn = self.cfg.GetMasterNode()
     return ([mn], [mn])
 
+  def CheckArguments(self):
+    if self.op.mac_prefix:
+      self.op.mac_prefix = \
+        utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
+
   def ExpandNames(self):
     self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 
@@ -15662,12 +15758,6 @@ class LUNetworkAdd(LogicalUnit):
     self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
 
   def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the given group name is not an existing node group
-    already.
-
-    """
     if self.op.network is None:
       raise errors.OpPrereqError("Network must be given",
                                  errors.ECODE_INVAL)
@@ -15678,9 +15768,6 @@ class LUNetworkAdd(LogicalUnit):
       raise errors.OpPrereqError("Network '%s' already defined" %
                                  self.op.network, errors.ECODE_EXISTS)
 
-    if self.op.mac_prefix:
-      utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
-
     # Check tag validity
     for tag in self.op.tags:
       objects.TaggableObject.ValidateTag(tag)
@@ -15768,7 +15855,8 @@ class LUNetworkRemove(LogicalUnit):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
 
     if not self.network_uuid:
-      raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
+      raise errors.OpPrereqError(("Network '%s' not found" %
+                                  self.op.network_name),
                                  errors.ECODE_INVAL)
 
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
@@ -15785,19 +15873,17 @@ class LUNetworkRemove(LogicalUnit):
     cluster.
 
     """
-
     # Verify that the network is not conncted.
     node_groups = [group.name
                    for group in self.cfg.GetAllNodeGroupsInfo().values()
-                   for net in group.networks.keys()
-                   if net == self.network_uuid]
+                   if self.network_uuid in group.networks]
 
     if node_groups:
-      self.LogWarning("Nework '%s' is connected to the following"
-                      " node groups: %s" % (self.op.network_name,
-                      utils.CommaJoin(utils.NiceSort(node_groups))))
-      raise errors.OpPrereqError("Network still connected",
-                                 errors.ECODE_STATE)
+      self.LogWarning("Network '%s' is connected to the following"
+                      " node groups: %s" %
+                      (self.op.network_name,
+                       utils.CommaJoin(utils.NiceSort(node_groups))))
+      raise errors.OpPrereqError("Network still connected", errors.ECODE_STATE)
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -15841,11 +15927,11 @@ class LUNetworkSetParams(LogicalUnit):
 
   def ExpandNames(self):
     self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
-    self.network = self.cfg.GetNetwork(self.network_uuid)
-    if self.network is None:
-      raise errors.OpPrereqError("Could not retrieve network '%s' (UUID: %s)" %
-                                 (self.op.network_name, self.network_uuid),
+    if self.network_uuid is None:
+      raise errors.OpPrereqError(("Network '%s' not found" %
+                                  self.op.network_name),
                                  errors.ECODE_INVAL)
+
     self.needed_locks = {
       locking.LEVEL_NETWORK: [self.network_uuid],
       }
@@ -15854,6 +15940,7 @@ class LUNetworkSetParams(LogicalUnit):
     """Check prerequisites.
 
     """
+    self.network = self.cfg.GetNetwork(self.network_uuid)
     self.gateway = self.network.gateway
     self.network_type = self.network.network_type
     self.mac_prefix = self.network.mac_prefix
@@ -15882,8 +15969,8 @@ class LUNetworkSetParams(LogicalUnit):
       if self.op.mac_prefix == constants.VALUE_NONE:
         self.mac_prefix = None
       else:
-        utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
-        self.mac_prefix = self.op.mac_prefix
+        self.mac_prefix = \
+          utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
 
     if self.op.gateway6:
       if self.op.gateway6 == constants.VALUE_NONE:
@@ -16105,22 +16192,24 @@ class LUNetworkConnect(LogicalUnit):
     self.network_link = self.op.network_link
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    self.network = self.cfg.GetNetwork(self.network_uuid)
-    if self.network is None:
+    if self.network_uuid is None:
       raise errors.OpPrereqError("Network %s does not exist" %
                                  self.network_name, errors.ECODE_INVAL)
 
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    self.group = self.cfg.GetNodeGroup(self.group_uuid)
-    if self.group is None:
+    if self.group_uuid is None:
       raise errors.OpPrereqError("Group %s does not exist" %
                                  self.group_name, errors.ECODE_INVAL)
 
-    self.share_locks[locking.LEVEL_INSTANCE] = 1
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
+    self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+    if self.op.conflicts_check:
+      self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
+      self.share_locks[locking.LEVEL_NETWORK] = 1
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_INSTANCE:
@@ -16131,7 +16220,6 @@ class LUNetworkConnect(LogicalUnit):
       if self.op.conflicts_check:
         self.needed_locks[locking.LEVEL_INSTANCE] = \
             self.cfg.GetNodeGroupInstances(self.group_uuid)
-        self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
 
   def BuildHooksEnv(self):
     ret = {
@@ -16139,7 +16227,6 @@ class LUNetworkConnect(LogicalUnit):
       "GROUP_NETWORK_MODE": self.network_mode,
       "GROUP_NETWORK_LINK": self.network_link,
       }
-    ret.update(_BuildNetworkHookEnvByObject(self.network))
     return ret
 
   def BuildHooksNodes(self):
@@ -16147,14 +16234,10 @@ class LUNetworkConnect(LogicalUnit):
     return (nodes, nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
 
     assert self.group_uuid in owned_groups
 
-    # Check if locked instances are still correct
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
-
     l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
                                       for i in value)
 
@@ -16164,6 +16247,7 @@ class LUNetworkConnect(LogicalUnit):
       }
     objects.NIC.CheckParameterSyntax(self.netparams)
 
+    self.group = self.cfg.GetNodeGroup(self.group_uuid)
     #if self.network_mode == constants.NIC_MODE_BRIDGED:
     #  _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
     self.connected = False
@@ -16174,7 +16258,12 @@ class LUNetworkConnect(LogicalUnit):
       return
 
     if self.op.conflicts_check:
-      pool = network.AddressPool(self.network)
+      # Check if locked instances are still correct
+      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
+      nobj = self.cfg.GetNetwork(self.network_uuid)
+      pool = network.AddressPool(nobj)
       conflicting_instances = []
 
       for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):
@@ -16213,18 +16302,17 @@ class LUNetworkDisconnect(LogicalUnit):
     self.group_name = self.op.group_name
 
     self.network_uuid = self.cfg.LookupNetwork(self.network_name)
-    self.network = self.cfg.GetNetwork(self.network_uuid)
-    if self.network is None:
+    if self.network_uuid is None:
       raise errors.OpPrereqError("Network %s does not exist" %
                                  self.network_name, errors.ECODE_INVAL)
 
     self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
-    self.group = self.cfg.GetNodeGroup(self.group_uuid)
-    if self.group is None:
+    if self.group_uuid is None:
       raise errors.OpPrereqError("Group %s does not exist" %
                                  self.group_name, errors.ECODE_INVAL)
 
     self.needed_locks = {
+      locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       }
     self.share_locks[locking.LEVEL_INSTANCE] = 1
@@ -16243,7 +16331,6 @@ class LUNetworkDisconnect(LogicalUnit):
     ret = {
       "GROUP_NAME": self.group_name,
       }
-    ret.update(_BuildNetworkHookEnvByObject(self.network))
     return ret
 
   def BuildHooksNodes(self):
@@ -16251,17 +16338,14 @@ class LUNetworkDisconnect(LogicalUnit):
     return (nodes, nodes)
 
   def CheckPrereq(self):
-    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
 
     assert self.group_uuid in owned_groups
 
-    # Check if locked instances are still correct
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
-
     l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
                                       for i in value)
 
+    self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
     if self.network_uuid not in self.group.networks:
       self.LogWarning("Network '%s' is not mapped to group '%s'",
@@ -16270,6 +16354,10 @@ class LUNetworkDisconnect(LogicalUnit):
       return
 
     if self.op.conflicts_check:
+      # Check if locked instances are still correct
+      owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+
       conflicting_instances = []
 
       for (_, instance) in self.cfg.GetMultiInstanceInfo(owned_instances):