Revision 6683bba2

b/lib/cmdlib.py
81 81
    self.sstore = sstore
82 82
    self.context = context
83 83
    self.needed_locks = None
84
    self.acquired_locks = {}
84 85
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 86
    # Used to force good behavior when calling helper functions
86 87
    self.recalculate_locks = {}
......
291 292
    # future we might want to have different behaviors depending on the value
292 293
    # of self.recalculate_locks[locking.LEVEL_NODE]
293 294
    wanted_nodes = []
294
    for instance_name in self.needed_locks[locking.LEVEL_INSTANCE]:
295
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
295 296
      instance = self.context.cfg.GetInstanceInfo(instance_name)
296 297
      wanted_nodes.append(instance.primary_node)
297 298
      wanted_nodes.extend(instance.secondary_nodes)
......
1398 1399

  
1399 1400
    """
1400 1401
    # This of course is valid only if we locked the nodes
1401
    self.wanted = self.needed_locks[locking.LEVEL_NODE]
1402
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
1402 1403

  
1403 1404
  def Exec(self, feedback_fn):
1404 1405
    """Computes the list of nodes and their attributes.
......
2501 2502

  
2502 2503
    """
2503 2504
    # This of course is valid only if we locked the instances
2504
    self.wanted = self.needed_locks[locking.LEVEL_INSTANCE]
2505
    self.wanted = self.acquired_locks[locking.LEVEL_INSTANCE]
2505 2506

  
2506 2507
  def Exec(self, feedback_fn):
2507 2508
    """Computes the list of nodes and their attributes.
b/lib/mcpu.py
139 139
      share = lu.share_locks[level]
140 140
      # This is always safe to do, as we can't acquire more/less locks than
141 141
      # what was requested.
142
      lu.needed_locks[level] = self.context.glm.acquire(level,
143
                                                        needed_locks,
144
                                                        shared=share)
142
      lu.acquired_locks[level] = self.context.glm.acquire(level,
143
                                                          needed_locks,
144
                                                          shared=share)
145 145
      try:
146 146
        result = self._LockAndExecLU(lu, level + 1)
147 147
      finally:
148
        if lu.needed_locks[level]:
148
        # We need to release the current level if we acquired any lock, or if
149
        # we acquired the set-lock (needed_locks is None)
150
        if lu.needed_locks[level] is None or lu.acquired_locks[level]:
149 151
          self.context.glm.release(level)
150 152
    else:
151 153
      result = self._LockAndExecLU(lu, level + 1)

Also available in: Unified diff