Revision d0d7d7cf lib/cmdlib/instance_storage.py

b/lib/cmdlib/instance_storage.py
806 806
    """Recreate the disks.
807 807

  
808 808
    """
809
    instance = self.instance
810

  
811 809
    assert (self.owned_locks(locking.LEVEL_NODE) ==
812 810
            self.owned_locks(locking.LEVEL_NODE_RES))
813 811

  
814 812
    to_skip = []
815 813
    mods = [] # keeps track of needed changes
816 814

  
817
    for idx, disk in enumerate(instance.disks):
815
    for idx, disk in enumerate(self.instance.disks):
818 816
      try:
819 817
        changes = self.disks[idx]
820 818
      except KeyError:
......
830 828
                                         # have changed
831 829
        (_, _, old_port, _, _, old_secret) = disk.logical_id
832 830
        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
833
                                                instance.name)
831
                                                self.instance.name)
834 832
        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
835 833
                  new_minors[0], new_minors[1], old_secret)
836 834
        assert len(disk.logical_id) == len(new_id)
......
842 840
    # now that we have passed all asserts above, we can apply the mods
843 841
    # in a single run (to avoid partial changes)
844 842
    for idx, new_id, changes in mods:
845
      disk = instance.disks[idx]
843
      disk = self.instance.disks[idx]
846 844
      if new_id is not None:
847 845
        assert disk.dev_type == constants.LD_DRBD8
848 846
        disk.logical_id = new_id
......
853 851

  
854 852
    # change primary node, if needed
855 853
    if self.op.node_uuids:
856
      instance.primary_node = self.op.node_uuids[0]
854
      self.instance.primary_node = self.op.node_uuids[0]
857 855
      self.LogWarning("Changing the instance's nodes, you will have to"
858 856
                      " remove any disks left on the older nodes manually")
859 857

  
860 858
    if self.op.node_uuids:
861
      self.cfg.Update(instance, feedback_fn)
859
      self.cfg.Update(self.instance, feedback_fn)
862 860

  
863 861
    # All touched nodes must be locked
864 862
    mylocks = self.owned_locks(locking.LEVEL_NODE)
865
    assert mylocks.issuperset(frozenset(instance.all_nodes))
866
    new_disks = CreateDisks(self, instance, to_skip=to_skip)
863
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
864
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
867 865

  
868 866
    # TODO: Release node locks before wiping, or explain why it's not possible
869 867
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
870 868
      wipedisks = [(idx, disk, 0)
871
                   for (idx, disk) in enumerate(instance.disks)
869
                   for (idx, disk) in enumerate(self.instance.disks)
872 870
                   if idx not in to_skip]
873
      WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks)
871
      WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
872
                         cleanup=new_disks)
874 873

  
875 874

  
876 875
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
......
1445 1444
    """Execute disk grow.
1446 1445

  
1447 1446
    """
1448
    instance = self.instance
1449
    disk = self.disk
1450

  
1451
    assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1447
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1452 1448
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1453 1449
            self.owned_locks(locking.LEVEL_NODE_RES))
1454 1450

  
1455 1451
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1456 1452

  
1457
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk])
1453
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
1458 1454
    if not disks_ok:
1459 1455
      raise errors.OpExecError("Cannot activate block device to grow")
1460 1456

  
1461 1457
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1462
                (self.op.disk, instance.name,
1458
                (self.op.disk, self.instance.name,
1463 1459
                 utils.FormatUnit(self.delta, "h"),
1464 1460
                 utils.FormatUnit(self.target, "h")))
1465 1461

  
1466 1462
    # First run all grow ops in dry-run mode
1467
    for node_uuid in instance.all_nodes:
1468
      self.cfg.SetDiskID(disk, node_uuid)
1469
      result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
1463
    for node_uuid in self.instance.all_nodes:
1464
      self.cfg.SetDiskID(self.disk, node_uuid)
1465
      result = self.rpc.call_blockdev_grow(node_uuid,
1466
                                           (self.disk, self.instance),
1470 1467
                                           self.delta, True, True)
1471 1468
      result.Raise("Dry-run grow request failed to node %s" %
1472 1469
                   self.cfg.GetNodeName(node_uuid))
1473 1470

  
1474 1471
    if wipe_disks:
1475 1472
      # Get disk size from primary node for wiping
1476
      result = self.rpc.call_blockdev_getdimensions(instance.primary_node,
1477
                                                    [disk])
1473
      result = self.rpc.call_blockdev_getdimensions(self.instance.primary_node,
1474
                                                    [self.disk])
1478 1475
      result.Raise("Failed to retrieve disk size from node '%s'" %
1479
                   instance.primary_node)
1476
                   self.instance.primary_node)
1480 1477

  
1481 1478
      (disk_dimensions, ) = result.payload
1482 1479

  
1483 1480
      if disk_dimensions is None:
1484 1481
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1485
                                 " node '%s'" % instance.primary_node)
1482
                                 " node '%s'" % self.instance.primary_node)
1486 1483
      (disk_size_in_bytes, _) = disk_dimensions
1487 1484

  
1488 1485
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1489 1486

  
1490
      assert old_disk_size >= disk.size, \
1487
      assert old_disk_size >= self.disk.size, \
1491 1488
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1492
         (old_disk_size, disk.size))
1489
         (old_disk_size, self.disk.size))
1493 1490
    else:
1494 1491
      old_disk_size = None
1495 1492

  
1496 1493
    # We know that (as far as we can test) operations across different
1497 1494
    # nodes will succeed, time to run it for real on the backing storage
1498
    for node_uuid in instance.all_nodes:
1499
      self.cfg.SetDiskID(disk, node_uuid)
1500
      result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
1495
    for node_uuid in self.instance.all_nodes:
1496
      self.cfg.SetDiskID(self.disk, node_uuid)
1497
      result = self.rpc.call_blockdev_grow(node_uuid,
1498
                                           (self.disk, self.instance),
1501 1499
                                           self.delta, False, True)
1502 1500
      result.Raise("Grow request failed to node %s" %
1503 1501
                   self.cfg.GetNodeName(node_uuid))
1504 1502

  
1505 1503
    # And now execute it for logical storage, on the primary node
1506
    node_uuid = instance.primary_node
1507
    self.cfg.SetDiskID(disk, node_uuid)
1508
    result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
1504
    node_uuid = self.instance.primary_node
1505
    self.cfg.SetDiskID(self.disk, node_uuid)
1506
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1509 1507
                                         self.delta, False, False)
1510 1508
    result.Raise("Grow request failed to node %s" %
1511 1509
                 self.cfg.GetNodeName(node_uuid))
1512 1510

  
1513
    disk.RecordGrow(self.delta)
1514
    self.cfg.Update(instance, feedback_fn)
1511
    self.disk.RecordGrow(self.delta)
1512
    self.cfg.Update(self.instance, feedback_fn)
1515 1513

  
1516 1514
    # Changes have been recorded, release node lock
1517 1515
    ReleaseLocks(self, locking.LEVEL_NODE)
......
1522 1520
    assert wipe_disks ^ (old_disk_size is None)
1523 1521

  
1524 1522
    if wipe_disks:
1525
      assert instance.disks[self.op.disk] == disk
1523
      assert self.instance.disks[self.op.disk] == self.disk
1526 1524

  
1527 1525
      # Wipe newly added disk space
1528
      WipeDisks(self, instance,
1529
                disks=[(self.op.disk, disk, old_disk_size)])
1526
      WipeDisks(self, self.instance,
1527
                disks=[(self.op.disk, self.disk, old_disk_size)])
1530 1528

  
1531 1529
    if self.op.wait_for_sync:
1532
      disk_abort = not WaitForSync(self, instance, disks=[disk])
1530
      disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
1533 1531
      if disk_abort:
1534 1532
        self.LogWarning("Disk syncing has not returned a good status; check"
1535 1533
                        " the instance")
1536
      if not instance.disks_active:
1537
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
1538
    elif not instance.disks_active:
1534
      if not self.instance.disks_active:
1535
        _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
1536
    elif not self.instance.disks_active:
1539 1537
      self.LogWarning("Not shutting down the disk even if the instance is"
1540 1538
                      " not supposed to be running because no wait for"
1541 1539
                      " sync mode was requested")
1542 1540

  
1543 1541
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1544
    assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1542
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1545 1543

  
1546 1544

  
1547 1545
class LUInstanceReplaceDisks(LogicalUnit):
......
1556 1554
    """Check arguments.
1557 1555

  
1558 1556
    """
1559
    remote_node = self.op.remote_node
1560
    ialloc = self.op.iallocator
1561 1557
    if self.op.mode == constants.REPLACE_DISK_CHG:
1562
      if remote_node is None and ialloc is None:
1558
      if self.op.remote_node is None and self.op.iallocator is None:
1563 1559
        raise errors.OpPrereqError("When changing the secondary either an"
1564 1560
                                   " iallocator script must be used or the"
1565 1561
                                   " new node given", errors.ECODE_INVAL)
1566 1562
      else:
1567 1563
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1568 1564

  
1569
    elif remote_node is not None or ialloc is not None:
1565
    elif self.op.remote_node is not None or self.op.iallocator is not None:
1570 1566
      # Not replacing the secondary
1571 1567
      raise errors.OpPrereqError("The iallocator and new node options can"
1572 1568
                                 " only be used when changing the"
......
1760 1756
    """Deactivate the disks
1761 1757

  
1762 1758
    """
1763
    instance = self.instance
1764 1759
    if self.op.force:
1765
      ShutdownInstanceDisks(self, instance)
1760
      ShutdownInstanceDisks(self, self.instance)
1766 1761
    else:
1767
      _SafeShutdownInstanceDisks(self, instance)
1762
      _SafeShutdownInstanceDisks(self, self.instance)
1768 1763

  
1769 1764

  
1770 1765
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
......
1939 1934
    This checks that the instance is in the cluster.
1940 1935

  
1941 1936
    """
1942
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
1943
    assert instance is not None, \
1937
    self.instance = self.cfg.GetInstanceInfo(self.instance_name)
1938
    assert self.instance is not None, \
1944 1939
      "Cannot retrieve locked instance %s" % self.instance_name
1945 1940

  
1946
    if instance.disk_template != constants.DT_DRBD8:
1941
    if self.instance.disk_template != constants.DT_DRBD8:
1947 1942
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1948 1943
                                 " instances", errors.ECODE_INVAL)
1949 1944

  
1950
    if len(instance.secondary_nodes) != 1:
1945
    if len(self.instance.secondary_nodes) != 1:
1951 1946
      raise errors.OpPrereqError("The instance has a strange layout,"
1952 1947
                                 " expected one secondary but found %d" %
1953
                                 len(instance.secondary_nodes),
1948
                                 len(self.instance.secondary_nodes),
1954 1949
                                 errors.ECODE_FAULT)
1955 1950

  
1956
    instance = self.instance
1957
    secondary_node_uuid = instance.secondary_nodes[0]
1951
    secondary_node_uuid = self.instance.secondary_nodes[0]
1958 1952

  
1959 1953
    if self.iallocator_name is None:
1960 1954
      remote_node_uuid = self.remote_node_uuid
1961 1955
    else:
1962 1956
      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
1963
                                            instance.name,
1964
                                            instance.secondary_nodes)
1957
                                            self.instance.name,
1958
                                            self.instance.secondary_nodes)
1965 1959

  
1966 1960
    if remote_node_uuid is None:
1967 1961
      self.remote_node_info = None
......
1988 1982
                                 errors.ECODE_INVAL)
1989 1983

  
1990 1984
    if self.mode == constants.REPLACE_DISK_AUTO:
1991
      if not self._CheckDisksActivated(instance):
1985
      if not self._CheckDisksActivated(self.instance):
1992 1986
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
1993 1987
                                   " first" % self.instance_name,
1994 1988
                                   errors.ECODE_STATE)
1995
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
1989
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
1996 1990
      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
1997 1991

  
1998 1992
      if faulty_primary and faulty_secondary:
......
2003 1997

  
2004 1998
      if faulty_primary:
2005 1999
        self.disks = faulty_primary
2006
        self.target_node_uuid = instance.primary_node
2000
        self.target_node_uuid = self.instance.primary_node
2007 2001
        self.other_node_uuid = secondary_node_uuid
2008 2002
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2009 2003
      elif faulty_secondary:
2010 2004
        self.disks = faulty_secondary
2011 2005
        self.target_node_uuid = secondary_node_uuid
2012
        self.other_node_uuid = instance.primary_node
2006
        self.other_node_uuid = self.instance.primary_node
2013 2007
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2014 2008
      else:
2015 2009
        self.disks = []
......
2018 2012
    else:
2019 2013
      # Non-automatic modes
2020 2014
      if self.mode == constants.REPLACE_DISK_PRI:
2021
        self.target_node_uuid = instance.primary_node
2015
        self.target_node_uuid = self.instance.primary_node
2022 2016
        self.other_node_uuid = secondary_node_uuid
2023 2017
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2024 2018

  
2025 2019
      elif self.mode == constants.REPLACE_DISK_SEC:
2026 2020
        self.target_node_uuid = secondary_node_uuid
2027
        self.other_node_uuid = instance.primary_node
2021
        self.other_node_uuid = self.instance.primary_node
2028 2022
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2029 2023

  
2030 2024
      elif self.mode == constants.REPLACE_DISK_CHG:
2031 2025
        self.new_node_uuid = remote_node_uuid
2032
        self.other_node_uuid = instance.primary_node
2026
        self.other_node_uuid = self.instance.primary_node
2033 2027
        self.target_node_uuid = secondary_node_uuid
2034 2028
        check_nodes = [self.new_node_uuid, self.other_node_uuid]
2035 2029

  
......
2060 2054
      cluster = self.cfg.GetClusterInfo()
2061 2055
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2062 2056
                                                              new_group_info)
2063
      CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
2064
                             self.cfg, ignore=self.ignore_ipolicy)
2057
      CheckTargetNodeIPolicy(self, ipolicy, self.instance,
2058
                             self.remote_node_info, self.cfg,
2059
                             ignore=self.ignore_ipolicy)
2065 2060

  
2066 2061
    for node_uuid in check_nodes:
2067 2062
      CheckNodeOnline(self.lu, node_uuid)
......
2081 2076

  
2082 2077
    # Check whether disks are valid
2083 2078
    for disk_idx in self.disks:
2084
      instance.FindDisk(disk_idx)
2079
      self.instance.FindDisk(disk_idx)
2085 2080

  
2086 2081
    # Get secondary node IP addresses
2087 2082
    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)

Also available in: Unified diff