Revision 0db3d0b5 lib/cmdlib.py

b/lib/cmdlib.py
129 129
    self.proc = processor
130 130
    self.op = op
131 131
    self.cfg = context.cfg
132
    self.glm = context.glm
132 133
    self.context = context
133 134
    self.rpc = rpc
134 135
    # Dicts used to declare locking needs to mcpu
......
665 666
    assert len(lu.acquired_locks[level]) == (len(retain) + len(release))
666 667

  
667 668
    # Release just some locks
668
    lu.context.glm.release(level, names=release)
669
    lu.glm.release(level, names=release)
669 670
    lu.acquired_locks[level] = retain
670 671

  
671
    assert frozenset(lu.context.glm.list_owned(level)) == frozenset(retain)
672
    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
672 673
  else:
673 674
    # Release everything
674
    lu.context.glm.release(level)
675
    lu.glm.release(level)
675 676
    del lu.acquired_locks[level]
676 677

  
677
    assert not lu.context.glm.list_owned(level), "No locks should be owned"
678
    assert not lu.glm.list_owned(level), "No locks should be owned"
678 679

  
679 680

  
680 681
def _RunPostHook(lu, node_name):
......
5843 5844
    # Change the instance lock. This is definitely safe while we hold the BGL.
5844 5845
    # Otherwise the new lock would have to be added in acquired mode.
5845 5846
    assert self.REQ_BGL
5846
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5847
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5847
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
5848
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5848 5849

  
5849 5850
    # re-read the instance from the configuration after rename
5850 5851
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
......
8858 8859
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
8859 8860

  
8860 8861
    # Release any owned node group
8861
    if self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP):
8862
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
8862 8863
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
8863 8864

  
8864 8865
    # Check whether disks are valid
......
8881 8882

  
8882 8883
    if __debug__:
8883 8884
      # Verify owned locks before starting operation
8884
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8885
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
8885 8886
      assert set(owned_locks) == set(self.node_secondary_ip), \
8886 8887
          ("Incorrect node locks, owning %s, expected %s" %
8887 8888
           (owned_locks, self.node_secondary_ip.keys()))
8888 8889

  
8889
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_INSTANCE)
8890
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
8890 8891
      assert list(owned_locks) == [self.instance_name], \
8891 8892
          "Instance '%s' not locked" % self.instance_name
8892 8893

  
8893
      assert not self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP), \
8894
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
8894 8895
          "Should not own any node group lock at this point"
8895 8896

  
8896 8897
    if not self.disks:
......
8922 8923

  
8923 8924
    if __debug__:
8924 8925
      # Verify owned locks
8925
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8926
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
8926 8927
      nodes = frozenset(self.node_secondary_ip)
8927 8928
      assert ((self.early_release and not owned_locks) or
8928 8929
              (not self.early_release and not (set(owned_locks) - nodes))), \

Also available in: Unified diff