Revision 52f33103 lib/cmdlib.py

b/lib/cmdlib.py
630 630
  return params_copy
631 631

  
632 632

  
633
def _ReleaseLocks(lu, level, names=None, keep=None):
634
  """Releases locks owned by an LU.
635

  
636
  @type lu: L{LogicalUnit}
637
  @param level: Lock level
638
  @type names: list or None
639
  @param names: Names of locks to release
640
  @type keep: list or None
641
  @param keep: Names of locks to retain
642

  
643
  """
644
  assert not (keep is not None and names is not None), \
645
         "Only one of the 'names' and the 'keep' parameters can be given"
646

  
647
  if names is not None:
648
    should_release = names.__contains__
649
  elif keep:
650
    should_release = lambda name: name not in keep
651
  else:
652
    should_release = None
653

  
654
  if should_release:
655
    retain = []
656
    release = []
657

  
658
    # Determine which locks to release
659
    for name in lu.acquired_locks[level]:
660
      if should_release(name):
661
        release.append(name)
662
      else:
663
        retain.append(name)
664

  
665
    assert len(lu.acquired_locks[level]) == (len(retain) + len(release))
666

  
667
    # Release just some locks
668
    lu.context.glm.release(level, names=release)
669
    lu.acquired_locks[level] = retain
670

  
671
    assert frozenset(lu.context.glm.list_owned(level)) == frozenset(retain)
672
  else:
673
    # Release everything
674
    lu.context.glm.release(level)
675
    del lu.acquired_locks[level]
676

  
677
    assert not lu.context.glm.list_owned(level), "No locks should be owned"
678

  
679

  
633 680
def _RunPostHook(lu, node_name):
634 681
  """Runs the post-hook for an opcode on a single node.
635 682

  
......
4580 4627
    # If we have locked all instances, before waiting to lock nodes, release
4581 4628
    # all the ones living on nodes unrelated to the current operation.
4582 4629
    if level == locking.LEVEL_NODE and self.lock_instances:
4583
      instances_release = []
4584
      instances_keep = []
4585 4630
      self.affected_instances = []
4586 4631
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4632
        instances_keep = []
4633

  
4634
        # Build list of instances to release
4587 4635
        for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4588 4636
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4589
          i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR
4590
          if i_mirrored and self.op.node_name in instance.all_nodes:
4637
          if (instance.disk_template in constants.DTS_INT_MIRROR and
4638
              self.op.node_name in instance.all_nodes):
4591 4639
            instances_keep.append(instance_name)
4592 4640
            self.affected_instances.append(instance)
4593
          else:
4594
            instances_release.append(instance_name)
4595
        if instances_release:
4596
          self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4597
          self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4641

  
4642
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
4643

  
4644
        assert (set(self.acquired_locks.get(locking.LEVEL_INSTANCE, [])) ==
4645
                set(instances_keep))
4598 4646

  
4599 4647
  def BuildHooksEnv(self):
4600 4648
    """Build hooks env.
......
6430 6478
      target_node = self.target_node
6431 6479

  
6432 6480
      if len(self.lu.tasklets) == 1:
6433
        # It is safe to remove locks only when we're the only tasklet in the LU
6434
        nodes_keep = [instance.primary_node, self.target_node]
6435
        nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE]
6436
                     if node not in nodes_keep]
6437
        self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel)
6438
        self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6481
        # It is safe to release locks only when we're the only tasklet in the LU
6482
        _ReleaseLocks(self, locking.LEVEL_NODE,
6483
                      keep=[instance.primary_node, self.target_node])
6439 6484

  
6440 6485
    else:
6441 6486
      secondary_nodes = instance.secondary_nodes
......
8261 8306
    # Declare that we don't want to remove the instance lock anymore, as we've
8262 8307
    # added the instance to the config
8263 8308
    del self.remove_locks[locking.LEVEL_INSTANCE]
8264
    # Unlock all the nodes
8309

  
8265 8310
    if self.op.mode == constants.INSTANCE_IMPORT:
8266
      nodes_keep = [self.op.src_node]
8267
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
8268
                       if node != self.op.src_node]
8269
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
8270
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
8311
      # Release unused nodes
8312
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
8271 8313
    else:
8272
      self.context.glm.release(locking.LEVEL_NODE)
8273
      del self.acquired_locks[locking.LEVEL_NODE]
8314
      # Release all nodes
8315
      _ReleaseLocks(self, locking.LEVEL_NODE)
8274 8316

  
8275 8317
    disk_abort = False
8276 8318
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
......
8639 8681

  
8640 8682
    return True
8641 8683

  
8642

  
8643 8684
  def CheckPrereq(self):
8644 8685
    """Check prerequisites.
8645 8686

  
......
8775 8816

  
8776 8817
    if self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
8777 8818
      # Release unneeded node locks
8778
      for name in self.lu.acquired_locks[locking.LEVEL_NODE]:
8779
        if name not in touched_nodes:
8780
          self._ReleaseNodeLock(name)
8819
      _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
8781 8820

  
8782 8821
    # Check whether disks are valid
8783 8822
    for disk_idx in self.disks:
......
8825 8864
      else:
8826 8865
        fn = self._ExecDrbd8DiskOnly
8827 8866

  
8828
      return fn(feedback_fn)
8829

  
8867
      result = fn(feedback_fn)
8830 8868
    finally:
8831 8869
      # Deactivate the instance disks if we're replacing them on a
8832 8870
      # down instance
8833 8871
      if activate_disks:
8834 8872
        _SafeShutdownInstanceDisks(self.lu, self.instance)
8835 8873

  
8836
      if __debug__:
8837
        # Verify owned locks
8838
        owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8839
        assert ((self.early_release and not owned_locks) or
8840
                (not self.early_release and
8841
                 set(owned_locks) == set(self.node_secondary_ip))), \
8842
          ("Not owning the correct locks, early_release=%s, owned=%r" %
8843
           (self.early_release, owned_locks))
8874
    if __debug__:
8875
      # Verify owned locks
8876
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8877
      nodes = frozenset(self.node_secondary_ip)
8878
      assert ((self.early_release and not owned_locks) or
8879
              (not self.early_release and not (set(owned_locks) - nodes))), \
8880
        ("Not owning the correct locks, early_release=%s, owned=%r,"
8881
         " nodes=%r" % (self.early_release, owned_locks, nodes))
8882

  
8883
    return result
8844 8884

  
8845 8885
  def _CheckVolumeGroup(self, nodes):
8846 8886
    self.lu.LogInfo("Checking volume groups")
......
8952 8992
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
8953 8993
                             hint="remove unused LVs manually")
8954 8994

  
8955
  def _ReleaseNodeLock(self, node_name):
8956
    """Releases the lock for a given node."""
8957
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8958

  
8959 8995
  def _ExecDrbd8DiskOnly(self, feedback_fn):
8960 8996
    """Replace a disk on the primary or secondary for DRBD 8.
8961 8997

  
......
9073 9109
      self._RemoveOldStorage(self.target_node, iv_names)
9074 9110
      # WARNING: we release both node locks here, do not do other RPCs
9075 9111
      # than WaitForSync to the primary node
9076
      self._ReleaseNodeLock([self.target_node, self.other_node])
9112
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9113
                    names=[self.target_node, self.other_node])
9077 9114

  
9078 9115
    # Wait for sync
9079 9116
    # This can fail as the old devices are degraded and _WaitForSync
......
9230 9267
      self._RemoveOldStorage(self.target_node, iv_names)
9231 9268
      # WARNING: we release all node locks here, do not do other RPCs
9232 9269
      # than WaitForSync to the primary node
9233
      self._ReleaseNodeLock([self.instance.primary_node,
9234
                             self.target_node,
9235
                             self.new_node])
9270
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9271
                    names=[self.instance.primary_node,
9272
                           self.target_node,
9273
                           self.new_node])
9236 9274

  
9237 9275
    # Wait for sync
9238 9276
    # This can fail as the old devices are degraded and _WaitForSync

Also available in: Unified diff