Revision a2f9ee87 lib/cmdlib/network.py

b/lib/cmdlib/network.py
30 30
from ganeti import query
31 31
from ganeti import utils
32 32
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase
33
from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances
33
from ganeti.cmdlib.common import ShareAll
34 34

  
35 35

  
36 36
def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
......
576 576
    self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
577 577

  
578 578
    self.needed_locks = {
579
      locking.LEVEL_INSTANCE: [],
580 579
      locking.LEVEL_NODEGROUP: [self.group_uuid],
581 580
      }
582
    self.share_locks[locking.LEVEL_INSTANCE] = 1
583 581

  
584 582
    if self.op.conflicts_check:
583
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
585 584
      self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
586 585
      self.share_locks[locking.LEVEL_NETWORK] = 1
586
      self.share_locks[locking.LEVEL_INSTANCE] = 1
587 587

  
588 588
  def DeclareLocks(self, level):
589
    if level == locking.LEVEL_INSTANCE:
590
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
591

  
592
      # Lock instances optimistically, needs verification once group lock has
593
      # been acquired
594
      if self.op.conflicts_check:
595
        self.needed_locks[locking.LEVEL_INSTANCE] = \
596
          self.cfg.GetInstanceNames(
597
            self.cfg.GetNodeGroupInstances(self.group_uuid))
589
    pass
598 590

  
599 591
  def BuildHooksEnv(self):
600 592
    ret = {
......
615 607

  
616 608
    # Check if locked instances are still correct
617 609
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
618
    if self.op.conflicts_check:
619
      CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
620 610

  
621 611
    self.netparams = {
622 612
      constants.NIC_MODE: self.network_mode,
......
665 655
    self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
666 656

  
667 657
    self.needed_locks = {
668
      locking.LEVEL_INSTANCE: [],
658
      locking.LEVEL_INSTANCE: locking.ALL_SET,
669 659
      locking.LEVEL_NODEGROUP: [self.group_uuid],
670 660
      }
671 661
    self.share_locks[locking.LEVEL_INSTANCE] = 1
672 662

  
673 663
  def DeclareLocks(self, level):
674
    if level == locking.LEVEL_INSTANCE:
675
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
676

  
677
      # Lock instances optimistically, needs verification once group lock has
678
      # been acquired
679
      self.needed_locks[locking.LEVEL_INSTANCE] = \
680
        self.cfg.GetInstanceNames(
681
          self.cfg.GetNodeGroupInstances(self.group_uuid))
664
    pass
682 665

  
683 666
  def BuildHooksEnv(self):
684 667
    ret = {
......
697 680

  
698 681
    # Check if locked instances are still correct
699 682
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
700
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
701 683

  
702 684
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
703 685
    self.connected = True

Also available in: Unified diff