Revision 87ed6b79 lib/cmdlib/instance_storage.py

b/lib/cmdlib/instance_storage.py
784 784
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
785 785
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
786 786

  
787
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
788

  
789 787
    if self.op.node_uuids:
790 788
      node_uuids = self.op.node_uuids
791 789
    else:
......
1590 1588
    ReleaseLocks(self, locking.LEVEL_NODE)
1591 1589

  
1592 1590
    # Downgrade lock while waiting for sync
1593
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1591
    self.WConfdClient().DownGradeLocksLevel(
1592
          locking.LEVEL_NAMES[locking.LEVEL_INSTANCE])
1594 1593

  
1595 1594
    assert wipe_disks ^ (old_disk_size is None)
1596 1595

  
......
1707 1706
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1708 1707
           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
1709 1708
      else:
1710
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1711

  
1712 1709
        self._LockInstancesNodes()
1713 1710

  
1714 1711
    elif level == locking.LEVEL_NODE_RES:
......
1748 1745
    """Check prerequisites.
1749 1746

  
1750 1747
    """
1751
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1752
            self.op.iallocator is None)
1753

  
1754 1748
    # Verify if node group locks are still correct
1755 1749
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1756 1750
    if owned_groups:
......
2170 2164
           (owned_nodes, self.node_secondary_ip.keys()))
2171 2165
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2172 2166
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2173
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2174 2167

  
2175 2168
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2176 2169
      assert list(owned_instances) == [self.instance_name], \
2177 2170
          "Instance '%s' not locked" % self.instance_name
2178 2171

  
2179
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2180
          "Should not own any node group lock at this point"
2181

  
2182 2172
    if not self.disks:
2183 2173
      feedback_fn("No disks need replacement for instance '%s'" %
2184 2174
                  self.instance.name)

Also available in: Unified diff