Revision d0d7d7cf lib/cmdlib/instance.py

b/lib/cmdlib/instance.py
501 501
    """
502 502
    self.needed_locks = {}
503 503

  
504
    instance_name = self.op.instance_name
505 504
    # this is just a preventive check, but someone might still add this
506 505
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
506
    if self.op.instance_name in self.cfg.GetInstanceList():
508 507
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
508
                                 self.op.instance_name, errors.ECODE_EXISTS)
510 509

  
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
510
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
512 511

  
513 512
    if self.op.iallocator:
514 513
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
......
654 653
    """
655 654
    assert self.op.mode == constants.INSTANCE_IMPORT
656 655

  
657
    src_node_uuid = self.op.src_node_uuid
658
    src_path = self.op.src_path
659

  
660
    if src_node_uuid is None:
656
    if self.op.src_node_uuid is None:
661 657
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
662 658
      exp_list = self.rpc.call_export_list(locked_nodes)
663 659
      found = False
664 660
      for node in exp_list:
665 661
        if exp_list[node].fail_msg:
666 662
          continue
667
        if src_path in exp_list[node].payload:
663
        if self.op.src_path in exp_list[node].payload:
668 664
          found = True
669 665
          self.op.src_node = node
670
          self.op.src_node_uuid = src_node_uuid = \
671
            self.cfg.GetNodeInfoByName(node).uuid
672
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
673
                                                       src_path)
666
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
667
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
668
                                            self.op.src_path)
674 669
          break
675 670
      if not found:
676 671
        raise errors.OpPrereqError("No export found for relative path %s" %
677
                                   src_path, errors.ECODE_INVAL)
672
                                   self.op.src_path, errors.ECODE_INVAL)
678 673

  
679
    CheckNodeOnline(self, src_node_uuid)
680
    result = self.rpc.call_export_info(src_node_uuid, src_path)
681
    result.Raise("No export or invalid export found in dir %s" % src_path)
674
    CheckNodeOnline(self, self.op.src_node_uuid)
675
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
676
    result.Raise("No export or invalid export found in dir %s" %
677
                 self.op.src_path)
682 678

  
683 679
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
684 680
    if not export_info.has_section(constants.INISECT_EXP):
......
1179 1175
    """Create and add the instance to the cluster.
1180 1176

  
1181 1177
    """
1182
    instance = self.op.instance_name
1183
    pnode_name = self.pnode.name
1184

  
1185 1178
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1186 1179
                self.owned_locks(locking.LEVEL_NODE)), \
1187 1180
      "Node locks differ from node resource locks"
......
1199 1192
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1200 1193
    disks = GenerateDiskTemplate(self,
1201 1194
                                 self.op.disk_template,
1202
                                 instance, self.pnode.uuid,
1195
                                 self.op.instance_name, self.pnode.uuid,
1203 1196
                                 self.secondaries,
1204 1197
                                 self.disks,
1205 1198
                                 self.instance_file_storage_dir,
......
1208 1201
                                 feedback_fn,
1209 1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1210 1203

  
1211
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1204
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1212 1205
                            primary_node=self.pnode.uuid,
1213 1206
                            nics=self.nics, disks=disks,
1214 1207
                            disk_template=self.op.disk_template,
......
1244 1237
        CreateDisks(self, iobj)
1245 1238
      except errors.OpExecError:
1246 1239
        self.LogWarning("Device creation failed")
1247
        self.cfg.ReleaseDRBDMinors(instance)
1240
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1248 1241
        raise
1249 1242

  
1250
    feedback_fn("adding instance %s to cluster config" % instance)
1243
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1251 1244

  
1252 1245
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1253 1246

  
......
1316 1309
            for idx, success in enumerate(result.payload):
1317 1310
              if not success:
1318 1311
                logging.warn("pause-sync of instance %s for disk %d failed",
1319
                             instance, idx)
1312
                             self.op.instance_name, idx)
1320 1313

  
1321 1314
          feedback_fn("* running the instance OS create scripts...")
1322 1315
          # FIXME: pass debug option from opcode to backend
......
1331 1324
            for idx, success in enumerate(result.payload):
1332 1325
              if not success:
1333 1326
                logging.warn("resume-sync of instance %s for disk %d failed",
1334
                             instance, idx)
1327
                             self.op.instance_name, idx)
1335 1328

  
1336 1329
          os_add_result.Raise("Could not add os for instance %s"
1337
                              " on node %s" % (instance, pnode_name))
1330
                              " on node %s" % (self.op.instance_name,
1331
                                               self.pnode.name))
1338 1332

  
1339 1333
      else:
1340 1334
        if self.op.mode == constants.INSTANCE_IMPORT:
......
1362 1356
                                                  iobj, transfers)
1363 1357
          if not compat.all(import_result):
1364 1358
            self.LogWarning("Some disks for instance %s on node %s were not"
1365
                            " imported successfully" % (instance, pnode_name))
1359
                            " imported successfully" % (self.op.instance_name,
1360
                                                        self.pnode.name))
1366 1361

  
1367 1362
          rename_from = self._old_instance_name
1368 1363

  
......
1385 1380
            # TODO: Should the instance still be started, even if some disks
1386 1381
            # failed to import (valid for local imports, too)?
1387 1382
            self.LogWarning("Some disks for instance %s on node %s were not"
1388
                            " imported successfully" % (instance, pnode_name))
1383
                            " imported successfully" % (self.op.instance_name,
1384
                                                        self.pnode.name))
1389 1385

  
1390 1386
          rename_from = self.source_instance_name
1391 1387

  
......
1395 1391
                                       % self.op.mode)
1396 1392

  
1397 1393
        # Run rename script on newly imported instance
1398
        assert iobj.name == instance
1399
        feedback_fn("Running rename script for %s" % instance)
1394
        assert iobj.name == self.op.instance_name
1395
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1400 1396
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1401 1397
                                                   rename_from,
1402 1398
                                                   self.op.debug_level)
1403 1399
        result.Warn("Failed to run rename script for %s on node %s" %
1404
                    (instance, pnode_name), self.LogWarning)
1400
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1405 1401

  
1406 1402
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407 1403

  
1408 1404
    if self.op.start:
1409 1405
      iobj.admin_state = constants.ADMINST_UP
1410 1406
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1407
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1408
                   self.pnode.name)
1412 1409
      feedback_fn("* starting instance...")
1413 1410
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1414 1411
                                            False, self.op.reason)
......
1484 1481
    """Rename the instance.
1485 1482

  
1486 1483
    """
1487
    inst = self.instance
1488
    old_name = inst.name
1484
    old_name = self.instance.name
1489 1485

  
1490 1486
    rename_file_storage = False
1491
    if (inst.disk_template in constants.DTS_FILEBASED and
1492
        self.op.new_name != inst.name):
1493
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1487
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1488
        self.op.new_name != self.instance.name):
1489
      old_file_storage_dir = os.path.dirname(
1490
                               self.instance.disks[0].logical_id[1])
1494 1491
      rename_file_storage = True
1495 1492

  
1496
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1493
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1497 1494
    # Change the instance lock. This is definitely safe while we hold the BGL.
1498 1495
    # Otherwise the new lock would have to be added in acquired mode.
1499 1496
    assert self.REQ_BGL
......
1502 1499
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1503 1500

  
1504 1501
    # re-read the instance from the configuration after rename
1505
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1502
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1506 1503

  
1507 1504
    if rename_file_storage:
1508
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1509
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1505
      new_file_storage_dir = os.path.dirname(
1506
                               renamed_inst.disks[0].logical_id[1])
1507
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1510 1508
                                                     old_file_storage_dir,
1511 1509
                                                     new_file_storage_dir)
1512 1510
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1513 1511
                   " (but the instance has been renamed in Ganeti)" %
1514
                   (self.cfg.GetNodeName(inst.primary_node),
1512
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1515 1513
                    old_file_storage_dir, new_file_storage_dir))
1516 1514

  
1517
    StartInstanceDisks(self, inst, None)
1515
    StartInstanceDisks(self, renamed_inst, None)
1518 1516
    # update info on disks
1519
    info = GetInstanceInfoText(inst)
1520
    for (idx, disk) in enumerate(inst.disks):
1521
      for node_uuid in inst.all_nodes:
1517
    info = GetInstanceInfoText(renamed_inst)
1518
    for (idx, disk) in enumerate(renamed_inst.disks):
1519
      for node_uuid in renamed_inst.all_nodes:
1522 1520
        self.cfg.SetDiskID(disk, node_uuid)
1523 1521
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1524 1522
        result.Warn("Error setting info on node %s for disk %s" %
1525 1523
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1526 1524
    try:
1527
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1528
                                                 old_name, self.op.debug_level)
1525
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1526
                                                 renamed_inst, old_name,
1527
                                                 self.op.debug_level)
1529 1528
      result.Warn("Could not run OS rename script for instance %s on node %s"
1530 1529
                  " (but the instance has been renamed in Ganeti)" %
1531
                  (inst.name, self.cfg.GetNodeName(inst.primary_node)),
1530
                  (renamed_inst.name,
1531
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1532 1532
                  self.LogWarning)
1533 1533
    finally:
1534
      ShutdownInstanceDisks(self, inst)
1534
      ShutdownInstanceDisks(self, renamed_inst)
1535 1535

  
1536
    return inst.name
1536
    return renamed_inst.name
1537 1537

  
1538 1538

  
1539 1539
class LUInstanceRemove(LogicalUnit):
......
1590 1590
    """Remove the instance.
1591 1591

  
1592 1592
    """
1593
    instance = self.instance
1594
    logging.info("Shutting down instance %s on node %s",
1595
                 instance.name, self.cfg.GetNodeName(instance.primary_node))
1593
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1594
                 self.cfg.GetNodeName(self.instance.primary_node))
1596 1595

  
1597
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1596
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1597
                                             self.instance,
1598 1598
                                             self.op.shutdown_timeout,
1599 1599
                                             self.op.reason)
1600 1600
    if self.op.ignore_failures:
1601 1601
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1602 1602
    else:
1603 1603
      result.Raise("Could not shutdown instance %s on node %s" %
1604
                   (instance.name, self.cfg.GetNodeName(instance.primary_node)))
1604
                   (self.instance.name,
1605
                    self.cfg.GetNodeName(self.instance.primary_node)))
1605 1606

  
1606 1607
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1607 1608
            self.owned_locks(locking.LEVEL_NODE_RES))
1608
    assert not (set(instance.all_nodes) -
1609
    assert not (set(self.instance.all_nodes) -
1609 1610
                self.owned_locks(locking.LEVEL_NODE)), \
1610 1611
      "Not owning correct locks"
1611 1612

  
1612
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1613
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1613 1614

  
1614 1615

  
1615 1616
class LUInstanceMove(LogicalUnit):
......
1667 1668
    This checks that the instance is in the cluster.
1668 1669

  
1669 1670
    """
1670
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1671
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1671 1672
    assert self.instance is not None, \
1672 1673
      "Cannot retrieve locked instance %s" % self.op.instance_name
1673 1674

  
1674
    if instance.disk_template not in constants.DTS_COPYABLE:
1675
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1675 1676
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1676
                                 instance.disk_template, errors.ECODE_STATE)
1677
                                 self.instance.disk_template,
1678
                                 errors.ECODE_STATE)
1677 1679

  
1678 1680
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1679 1681
    assert target_node is not None, \
1680 1682
      "Cannot retrieve locked node %s" % self.op.target_node
1681 1683

  
1682 1684
    self.target_node_uuid = target_node.uuid
1683
    if target_node.uuid == instance.primary_node:
1685
    if target_node.uuid == self.instance.primary_node:
1684 1686
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1685
                                 (instance.name, target_node.name),
1687
                                 (self.instance.name, target_node.name),
1686 1688
                                 errors.ECODE_STATE)
1687 1689

  
1688
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1690
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1689 1691

  
1690
    for idx, dsk in enumerate(instance.disks):
1692
    for idx, dsk in enumerate(self.instance.disks):
1691 1693
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1692 1694
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1693 1695
                                   " cannot copy" % idx, errors.ECODE_STATE)
......
1698 1700
    cluster = self.cfg.GetClusterInfo()
1699 1701
    group_info = self.cfg.GetNodeGroup(target_node.group)
1700 1702
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1701
    CheckTargetNodeIPolicy(self, ipolicy, instance, target_node, self.cfg,
1703
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1702 1704
                           ignore=self.op.ignore_ipolicy)
1703 1705

  
1704
    if instance.admin_state == constants.ADMINST_UP:
1706
    if self.instance.admin_state == constants.ADMINST_UP:
1705 1707
      # check memory requirements on the secondary node
1706 1708
      CheckNodeFreeMemory(
1707 1709
          self, target_node.uuid, "failing over instance %s" %
1708
          instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
1709
          self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
1710
          self.instance.name, bep[constants.BE_MAXMEM],
1711
          self.instance.hypervisor,
1712
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1710 1713
    else:
1711 1714
      self.LogInfo("Not checking memory on the secondary node as"
1712 1715
                   " instance will not be started")
1713 1716

  
1714 1717
    # check bridge existance
1715
    CheckInstanceBridgesExist(self, instance, node_uuid=target_node.uuid)
1718
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1716 1719

  
1717 1720
  def Exec(self, feedback_fn):
1718 1721
    """Move an instance.
......
1721 1724
    the data over (slow) and starting it on the new node.
1722 1725

  
1723 1726
    """
1724
    instance = self.instance
1725

  
1726
    source_node = self.cfg.GetNodeInfo(instance.primary_node)
1727
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1727 1728
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1728 1729

  
1729 1730
    self.LogInfo("Shutting down instance %s on source node %s",
1730
                 instance.name, source_node.name)
1731
                 self.instance.name, source_node.name)
1731 1732

  
1732 1733
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1733 1734
            self.owned_locks(locking.LEVEL_NODE_RES))
1734 1735

  
1735
    result = self.rpc.call_instance_shutdown(source_node.uuid, instance,
1736
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1736 1737
                                             self.op.shutdown_timeout,
1737 1738
                                             self.op.reason)
1738 1739
    if self.op.ignore_consistency:
1739 1740
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1740 1741
                  " anyway. Please make sure node %s is down. Error details" %
1741
                  (instance.name, source_node.name, source_node.name),
1742
                  (self.instance.name, source_node.name, source_node.name),
1742 1743
                  self.LogWarning)
1743 1744
    else:
1744 1745
      result.Raise("Could not shutdown instance %s on node %s" %
1745
                   (instance.name, source_node.name))
1746
                   (self.instance.name, source_node.name))
1746 1747

  
1747 1748
    # create the target disks
1748 1749
    try:
1749
      CreateDisks(self, instance, target_node_uuid=target_node.uuid)
1750
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1750 1751
    except errors.OpExecError:
1751 1752
      self.LogWarning("Device creation failed")
1752
      self.cfg.ReleaseDRBDMinors(instance.name)
1753
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1753 1754
      raise
1754 1755

  
1755 1756
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1756 1757

  
1757 1758
    errs = []
1758 1759
    # activate, get path, copy the data over
1759
    for idx, disk in enumerate(instance.disks):
1760
    for idx, disk in enumerate(self.instance.disks):
1760 1761
      self.LogInfo("Copying data for disk %d", idx)
1761
      result = self.rpc.call_blockdev_assemble(target_node.uuid,
1762
                                               (disk, instance), instance.name,
1763
                                               True, idx)
1762
      result = self.rpc.call_blockdev_assemble(
1763
                 target_node.uuid, (disk, self.instance), self.instance.name,
1764
                 True, idx)
1764 1765
      if result.fail_msg:
1765 1766
        self.LogWarning("Can't assemble newly created disk %d: %s",
1766 1767
                        idx, result.fail_msg)
1767 1768
        errs.append(result.fail_msg)
1768 1769
        break
1769 1770
      dev_path = result.payload
1770
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk, instance),
1771
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1772
                                                                self.instance),
1771 1773
                                             target_node.name, dev_path,
1772 1774
                                             cluster_name)
1773 1775
      if result.fail_msg:
......
1779 1781
    if errs:
1780 1782
      self.LogWarning("Some disks failed to copy, aborting")
1781 1783
      try:
1782
        RemoveDisks(self, instance, target_node_uuid=target_node.uuid)
1784
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1783 1785
      finally:
1784
        self.cfg.ReleaseDRBDMinors(instance.name)
1786
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1785 1787
        raise errors.OpExecError("Errors during disk copy: %s" %
1786 1788
                                 (",".join(errs),))
1787 1789

  
1788
    instance.primary_node = target_node.uuid
1789
    self.cfg.Update(instance, feedback_fn)
1790
    self.instance.primary_node = target_node.uuid
1791
    self.cfg.Update(self.instance, feedback_fn)
1790 1792

  
1791 1793
    self.LogInfo("Removing the disks on the original node")
1792
    RemoveDisks(self, instance, target_node_uuid=source_node.uuid)
1794
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1793 1795

  
1794 1796
    # Only start the instance if it's marked as up
1795
    if instance.admin_state == constants.ADMINST_UP:
1797
    if self.instance.admin_state == constants.ADMINST_UP:
1796 1798
      self.LogInfo("Starting instance %s on node %s",
1797
                   instance.name, target_node.name)
1799
                   self.instance.name, target_node.name)
1798 1800

  
1799
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1801
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1800 1802
                                          ignore_secondaries=True)
1801 1803
      if not disks_ok:
1802
        ShutdownInstanceDisks(self, instance)
1804
        ShutdownInstanceDisks(self, self.instance)
1803 1805
        raise errors.OpExecError("Can't activate the instance's disks")
1804 1806

  
1805 1807
      result = self.rpc.call_instance_start(target_node.uuid,
1806
                                            (instance, None, None), False,
1808
                                            (self.instance, None, None), False,
1807 1809
                                            self.op.reason)
1808 1810
      msg = result.fail_msg
1809 1811
      if msg:
1810
        ShutdownInstanceDisks(self, instance)
1812
        ShutdownInstanceDisks(self, self.instance)
1811 1813
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1812
                                 (instance.name, target_node.name, msg))
1814
                                 (self.instance.name, target_node.name, msg))
1813 1815

  
1814 1816

  
1815 1817
class LUInstanceMultiAlloc(NoHooksLU):
......
2571 2573
  def _PreCheckDiskTemplate(self, pnode_info):
2572 2574
    """CheckPrereq checks related to a new disk template."""
2573 2575
    # Arguments are passed to avoid configuration lookups
2574
    instance = self.instance
2575
    pnode_uuid = instance.primary_node
2576
    cluster = self.cluster
2577
    if instance.disk_template == self.op.disk_template:
2576
    pnode_uuid = self.instance.primary_node
2577
    if self.instance.disk_template == self.op.disk_template:
2578 2578
      raise errors.OpPrereqError("Instance already has disk template %s" %
2579
                                 instance.disk_template, errors.ECODE_INVAL)
2579
                                 self.instance.disk_template,
2580
                                 errors.ECODE_INVAL)
2580 2581

  
2581
    if (instance.disk_template,
2582
    if (self.instance.disk_template,
2582 2583
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2583 2584
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2584
                                 " %s to %s" % (instance.disk_template,
2585
                                 " %s to %s" % (self.instance.disk_template,
2585 2586
                                                self.op.disk_template),
2586 2587
                                 errors.ECODE_INVAL)
2587
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2588
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2588 2589
                       msg="cannot change disk template")
2589 2590
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2590 2591
      if self.op.remote_node_uuid == pnode_uuid:
......
2594 2595
      CheckNodeOnline(self, self.op.remote_node_uuid)
2595 2596
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2596 2597
      # FIXME: here we assume that the old instance type is DT_PLAIN
2597
      assert instance.disk_template == constants.DT_PLAIN
2598
      assert self.instance.disk_template == constants.DT_PLAIN
2598 2599
      disks = [{constants.IDISK_SIZE: d.size,
2599 2600
                constants.IDISK_VG: d.logical_id[0]}
2600
               for d in instance.disks]
2601
               for d in self.instance.disks]
2601 2602
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2602 2603
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2603 2604

  
2604 2605
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2605 2606
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2606
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2607
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2607 2608
                                                              snode_group)
2608
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2609
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2609 2610
                             ignore=self.op.ignore_ipolicy)
2610 2611
      if pnode_info.group != snode_info.group:
2611 2612
        self.LogWarning("The primary and secondary nodes are in two"
......
2622 2623
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2623 2624
      if compat.any(map(has_es, nodes)):
2624 2625
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2625
                  " storage is enabled" % (instance.disk_template,
2626
                  " storage is enabled" % (self.instance.disk_template,
2626 2627
                                           self.op.disk_template))
2627 2628
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2628 2629

  
......
2633 2634
    @param ispec: instance specs to be updated with the new disks
2634 2635

  
2635 2636
    """
2636
    instance = self.instance
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2638 2638

  
2639 2639
    excl_stor = compat.any(
2640
      rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes).values()
2640
      rpc.GetExclusiveStorageForNodes(self.cfg,
2641
                                      self.instance.all_nodes).values()
2641 2642
      )
2642 2643

  
2643 2644
    # Check disk modifications. This is done here and not in CheckArguments
2644 2645
    # (as with NICs), because we need to know the instance's disk template
2645 2646
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2646
    if instance.disk_template == constants.DT_EXT:
2647
    if self.instance.disk_template == constants.DT_EXT:
2647 2648
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2648 2649
    else:
2649 2650
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
......
2652 2653
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2653 2654

  
2654 2655
    # Check the validity of the `provider' parameter
2655
    if instance.disk_template in constants.DT_EXT:
2656
    if self.instance.disk_template in constants.DT_EXT:
2656 2657
      for mod in self.diskmod:
2657 2658
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2658 2659
        if mod[0] == constants.DDM_ADD:
......
2678 2679
                                      constants.DT_EXT),
2679 2680
                                     errors.ECODE_INVAL)
2680 2681

  
2681
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2682
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2682 2683
      raise errors.OpPrereqError("Disk operations not supported for"
2683 2684
                                 " diskless instances", errors.ECODE_INVAL)
2684 2685

  
......
2686 2687
      disk.name = params.get(constants.IDISK_NAME, None)
2687 2688

  
2688 2689
    # Verify disk changes (operating on a copy)
2689
    disks = copy.deepcopy(instance.disks)
2690
    disks = copy.deepcopy(self.instance.disks)
2690 2691
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2691 2692
                        _PrepareDiskMod, None)
2692 2693
    utils.ValidateDeviceNames("disk", disks)
......
2694 2695
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2695 2696
                                 " more" % constants.MAX_DISKS,
2696 2697
                                 errors.ECODE_STATE)
2697
    disk_sizes = [disk.size for disk in instance.disks]
2698
    disk_sizes = [disk.size for disk in self.instance.disks]
2698 2699
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2699 2700
                      self.diskmod if op == constants.DDM_ADD)
2700 2701
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2701 2702
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2702 2703

  
2703 2704
    if self.op.offline is not None and self.op.offline:
2704
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2705
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2705 2706
                         msg="can't change to offline")
2706 2707

  
2707 2708
  def CheckPrereq(self):
......
2711 2712

  
2712 2713
    """
2713 2714
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2714
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2715
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2716
    self.cluster = self.cfg.GetClusterInfo()
2715 2717

  
2716
    cluster = self.cluster = self.cfg.GetClusterInfo()
2717 2718
    assert self.instance is not None, \
2718 2719
      "Cannot retrieve locked instance %s" % self.op.instance_name
2719 2720

  
2720
    pnode_uuid = instance.primary_node
2721
    pnode_uuid = self.instance.primary_node
2721 2722

  
2722 2723
    self.warn = []
2723 2724

  
......
2725 2726
        not self.op.force):
2726 2727
      # verify that the instance is not up
2727 2728
      instance_info = self.rpc.call_instance_info(
2728
          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
2729
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2730
          self.instance.hvparams)
2729 2731
      if instance_info.fail_msg:
2730 2732
        self.warn.append("Can't get instance runtime information: %s" %
2731 2733
                         instance_info.fail_msg)
......
2735 2737
                                   errors.ECODE_STATE)
2736 2738

  
2737 2739
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2738
    node_uuids = list(instance.all_nodes)
2740
    node_uuids = list(self.instance.all_nodes)
2739 2741
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2740 2742

  
2741 2743
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
......
2750 2752

  
2751 2753
    # OS change
2752 2754
    if self.op.os_name and not self.op.force:
2753
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2755
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2754 2756
                     self.op.force_variant)
2755 2757
      instance_os = self.op.os_name
2756 2758
    else:
2757
      instance_os = instance.os
2759
      instance_os = self.instance.os
2758 2760

  
2759 2761
    assert not (self.op.disk_template and self.op.disks), \
2760 2762
      "Can't modify disk template and apply disk changes at the same time"
......
2766 2768

  
2767 2769
    # hvparams processing
2768 2770
    if self.op.hvparams:
2769
      hv_type = instance.hypervisor
2770
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2771
      hv_type = self.instance.hypervisor
2772
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2771 2773
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2772
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2774
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2773 2775

  
2774 2776
      # local check
2775 2777
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2776
      CheckHVParams(self, node_uuids, instance.hypervisor, hv_new)
2778
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2777 2779
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2778 2780
      self.hv_inst = i_hvdict # the new dict (without defaults)
2779 2781
    else:
2780
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2781
                                              instance.hvparams)
2782
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2783
                                                   self.instance.os,
2784
                                                   self.instance.hvparams)
2782 2785
      self.hv_new = self.hv_inst = {}
2783 2786

  
2784 2787
    # beparams processing
2785 2788
    if self.op.beparams:
2786
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2789
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2787 2790
                                  use_none=True)
2788 2791
      objects.UpgradeBeParams(i_bedict)
2789 2792
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2790
      be_new = cluster.SimpleFillBE(i_bedict)
2793
      be_new = self.cluster.SimpleFillBE(i_bedict)
2791 2794
      self.be_proposed = self.be_new = be_new # the new actual values
2792 2795
      self.be_inst = i_bedict # the new dict (without defaults)
2793 2796
    else:
2794 2797
      self.be_new = self.be_inst = {}
2795
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2796
    be_old = cluster.FillBE(instance)
2798
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2799
    be_old = self.cluster.FillBE(self.instance)
2797 2800

  
2798 2801
    # CPU param validation -- checking every time a parameter is
2799 2802
    # changed to cover all cases where either CPU mask or vcpus have
......
2819 2822
        max_requested_cpu = max(map(max, cpu_list))
2820 2823
        # Check that all of the instance's nodes have enough physical CPUs to
2821 2824
        # satisfy the requested CPU mask
2822
        hvspecs = [(instance.hypervisor,
2823
                    self.cfg.GetClusterInfo().hvparams[instance.hypervisor])]
2824
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2825
        hvspecs = [(self.instance.hypervisor,
2826
                    self.cfg.GetClusterInfo()
2827
                      .hvparams[self.instance.hypervisor])]
2828
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2825 2829
                                max_requested_cpu + 1,
2826 2830
                                hvspecs)
2827 2831

  
2828 2832
    # osparams processing
2829 2833
    if self.op.osparams:
2830
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2834
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2831 2835
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2832 2836
      self.os_inst = i_osdict # the new dict (without defaults)
2833 2837
    else:
......
2839 2843
      mem_check_list = [pnode_uuid]
2840 2844
      if be_new[constants.BE_AUTO_BALANCE]:
2841 2845
        # either we changed auto_balance to yes or it was from before
2842
        mem_check_list.extend(instance.secondary_nodes)
2846
        mem_check_list.extend(self.instance.secondary_nodes)
2843 2847
      instance_info = self.rpc.call_instance_info(
2844
          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
2845
      hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])]
2848
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2849
          self.instance.hvparams)
2850
      hvspecs = [(self.instance.hypervisor,
2851
                  self.cluster.hvparams[self.instance.hypervisor])]
2846 2852
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2847 2853
                                         hvspecs, False)
2848 2854
      pninfo = nodeinfo[pnode_uuid]
......
2880 2886

  
2881 2887
      if be_new[constants.BE_AUTO_BALANCE]:
2882 2888
        for node_uuid, nres in nodeinfo.items():
2883
          if node_uuid not in instance.secondary_nodes:
2889
          if node_uuid not in self.instance.secondary_nodes:
2884 2890
            continue
2885 2891
          nres.Raise("Can't get info from secondary node %s" %
2886 2892
                     self.cfg.GetNodeName(node_uuid), prereq=True,
......
2901 2907

  
2902 2908
    if self.op.runtime_mem:
2903 2909
      remote_info = self.rpc.call_instance_info(
2904
         instance.primary_node, instance.name, instance.hypervisor,
2905
         instance.hvparams)
2910
         self.instance.primary_node, self.instance.name,
2911
         self.instance.hypervisor, self.instance.hvparams)
2906 2912
      remote_info.Raise("Error checking node %s" %
2907
                        self.cfg.GetNodeName(instance.primary_node))
2913
                        self.cfg.GetNodeName(self.instance.primary_node))
2908 2914
      if not remote_info.payload: # not running already
2909 2915
        raise errors.OpPrereqError("Instance %s is not running" %
2910
                                   instance.name, errors.ECODE_STATE)
2916
                                   self.instance.name, errors.ECODE_STATE)
2911 2917

  
2912 2918
      current_memory = remote_info.payload["memory"]
2913 2919
      if (not self.op.force and
......
2916 2922
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2917 2923
                                   " and %d MB of memory unless --force is"
2918 2924
                                   " given" %
2919
                                   (instance.name,
2925
                                   (self.instance.name,
2920 2926
                                    self.be_proposed[constants.BE_MINMEM],
2921 2927
                                    self.be_proposed[constants.BE_MAXMEM]),
2922 2928
                                   errors.ECODE_INVAL)
......
2924 2930
      delta = self.op.runtime_mem - current_memory
2925 2931
      if delta > 0:
2926 2932
        CheckNodeFreeMemory(
2927
            self, instance.primary_node, "ballooning memory for instance %s" %
2928
            instance.name, delta, instance.hypervisor,
2929
            self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
2933
            self, self.instance.primary_node,
2934
            "ballooning memory for instance %s" % self.instance.name, delta,
2935
            self.instance.hypervisor,
2936
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2937

  
2938
    # make self.cluster visible in the functions below
2939
    cluster = self.cluster
2930 2940

  
2931 2941
    def _PrepareNicCreate(_, params, private):
2932 2942
      self._PrepareNicModification(params, private, None, None,
......
2945 2955
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2946 2956

  
2947 2957
    # Verify NIC changes (operating on copy)
2948
    nics = instance.nics[:]
2958
    nics = self.instance.nics[:]
2949 2959
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2950 2960
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2951 2961
    if len(nics) > constants.MAX_NICS:
......
2957 2967
    self._nic_chgdesc = []
2958 2968
    if self.nicmod:
2959 2969
      # Operate on copies as this is still in prereq
2960
      nics = [nic.Copy() for nic in instance.nics]
2970
      nics = [nic.Copy() for nic in self.instance.nics]
2961 2971
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2962 2972
                          self._CreateNewNic, self._ApplyNicMods, None)
2963 2973
      # Verify that NIC names are unique and valid
......
2966 2976
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2967 2977
    else:
2968 2978
      self._new_nics = None
2969
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2979
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2970 2980

  
2971 2981
    if not self.op.ignore_ipolicy:
2972
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2982
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2973 2983
                                                              group_info)
2974 2984

  
2975 2985
      # Fill ispec with backend parameters
......
2982 2992
      if self.op.disk_template:
2983 2993
        new_disk_template = self.op.disk_template
2984 2994
      else:
2985
        new_disk_template = instance.disk_template
2995
        new_disk_template = self.instance.disk_template
2986 2996
      ispec_max = ispec.copy()
2987 2997
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2988 2998
        self.be_new.get(constants.BE_MAXMEM, None)
......
3007 3017

  
3008 3018
    """
3009 3019
    feedback_fn("Converting template to drbd")
3010
    instance = self.instance
3011
    pnode_uuid = instance.primary_node
3020
    pnode_uuid = self.instance.primary_node
3012 3021
    snode_uuid = self.op.remote_node_uuid
3013 3022

  
3014
    assert instance.disk_template == constants.DT_PLAIN
3023
    assert self.instance.disk_template == constants.DT_PLAIN
3015 3024

  
3016 3025
    # create a fake disk info for _GenerateDiskTemplate
3017 3026
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3018 3027
                  constants.IDISK_VG: d.logical_id[0],
3019 3028
                  constants.IDISK_NAME: d.name}
3020
                 for d in instance.disks]
3029
                 for d in self.instance.disks]
3021 3030
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3022
                                     instance.name, pnode_uuid, [snode_uuid],
3023
                                     disk_info, None, None, 0, feedback_fn,
3024
                                     self.diskparams)
3031
                                     self.instance.name, pnode_uuid,
3032
                                     [snode_uuid], disk_info, None, None, 0,
3033
                                     feedback_fn, self.diskparams)
3025 3034
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3026 3035
                                        self.diskparams)
3027 3036
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3028 3037
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3029
    info = GetInstanceInfoText(instance)
3038
    info = GetInstanceInfoText(self.instance)
3030 3039
    feedback_fn("Creating additional volumes...")
3031 3040
    # first, create the missing data and meta devices
3032 3041
    for disk in anno_disks:
3033 3042
      # unfortunately this is... not too nice
3034
      CreateSingleBlockDev(self, pnode_uuid, instance, disk.children[1],
3043
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3035 3044
                           info, True, p_excl_stor)
3036 3045
      for child in disk.children:
3037
        CreateSingleBlockDev(self, snode_uuid, instance, child, info, True,
3046
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3038 3047
                             s_excl_stor)
3039 3048
    # at this stage, all new LVs have been created, we can rename the
3040 3049
    # old ones
3041 3050
    feedback_fn("Renaming original volumes...")
3042 3051
    rename_list = [(o, n.children[0].logical_id)
3043
                   for (o, n) in zip(instance.disks, new_disks)]
3052
                   for (o, n) in zip(self.instance.disks, new_disks)]
3044 3053
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3045 3054
    result.Raise("Failed to rename original LVs")
3046 3055

  
......
3051 3060
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3052 3061
                                       (snode_uuid, s_excl_stor)]:
3053 3062
          f_create = node_uuid == pnode_uuid
3054
          CreateSingleBlockDev(self, node_uuid, instance, disk, info, f_create,
3055
                               excl_stor)
3063
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3064
                               f_create, excl_stor)
3056 3065
    except errors.GenericError, e:
3057 3066
      feedback_fn("Initializing of DRBD devices failed;"
3058 3067
                  " renaming back original volumes...")
3059 3068
      for disk in new_disks:
3060 3069
        self.cfg.SetDiskID(disk, pnode_uuid)
3061 3070
      rename_back_list = [(n.children[0], o.logical_id)
3062
                          for (n, o) in zip(new_disks, instance.disks)]
3071
                          for (n, o) in zip(new_disks, self.instance.disks)]
3063 3072
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3064 3073
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3065 3074
      raise
3066 3075

  
3067 3076
    # at this point, the instance has been modified
3068
    instance.disk_template = constants.DT_DRBD8
3069
    instance.disks = new_disks
3070
    self.cfg.Update(instance, feedback_fn)
3077
    self.instance.disk_template = constants.DT_DRBD8
3078
    self.instance.disks = new_disks
3079
    self.cfg.Update(self.instance, feedback_fn)
3071 3080

  
3072 3081
    # Release node locks while waiting for sync
3073 3082
    ReleaseLocks(self, locking.LEVEL_NODE)
3074 3083

  
3075 3084
    # disks are created, waiting for sync
3076
    disk_abort = not WaitForSync(self, instance,
3085
    disk_abort = not WaitForSync(self, self.instance,
3077 3086
                                 oneshot=not self.op.wait_for_sync)
3078 3087
    if disk_abort:
3079 3088
      raise errors.OpExecError("There are some degraded disks for"
......
3085 3094
    """Converts an instance from drbd to plain.
3086 3095

  
3087 3096
    """
3088
    instance = self.instance
3089

  
3090
    assert len(instance.secondary_nodes) == 1
3091
    assert instance.disk_template == constants.DT_DRBD8
3097
    assert len(self.instance.secondary_nodes) == 1
3098
    assert self.instance.disk_template == constants.DT_DRBD8
3092 3099

  
3093
    pnode_uuid = instance.primary_node
3094
    snode_uuid = instance.secondary_nodes[0]
3100
    pnode_uuid = self.instance.primary_node
3101
    snode_uuid = self.instance.secondary_nodes[0]
3095 3102
    feedback_fn("Converting template to plain")
3096 3103

  
3097
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3098
    new_disks = [d.children[0] for d in instance.disks]
3104
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3105
    new_disks = [d.children[0] for d in self.instance.disks]
3099 3106

  
3100 3107
    # copy over size, mode and name
3101 3108
    for parent, child in zip(old_disks, new_disks):
......
3110 3117
      self.cfg.AddTcpUdpPort(tcp_port)
3111 3118

  
3112 3119
    # update instance structure
3113
    instance.disks = new_disks
3114
    instance.disk_template = constants.DT_PLAIN
3115
    _UpdateIvNames(0, instance.disks)
3116
    self.cfg.Update(instance, feedback_fn)
3120
    self.instance.disks = new_disks
3121
    self.instance.disk_template = constants.DT_PLAIN
3122
    _UpdateIvNames(0, self.instance.disks)
3123
    self.cfg.Update(self.instance, feedback_fn)
3117 3124

  
3118 3125
    # Release locks in case removing disks takes a while
3119 3126
    ReleaseLocks(self, locking.LEVEL_NODE)
......
3141 3148
    """Creates a new disk.
3142 3149

  
3143 3150
    """
3144
    instance = self.instance
3145

  
3146 3151
    # add a new disk
3147
    if instance.disk_template in constants.DTS_FILEBASED:
3148
      (file_driver, file_path) = instance.disks[0].logical_id
3152
    if self.instance.disk_template in constants.DTS_FILEBASED:
3153
      (file_driver, file_path) = self.instance.disks[0].logical_id
3149 3154
      file_path = os.path.dirname(file_path)
3150 3155
    else:
3151 3156
      file_driver = file_path = None
3152 3157

  
3153 3158
    disk = \
3154
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3155
                           instance.primary_node, instance.secondary_nodes,
3156
                           [params], file_path, file_driver, idx,
3157
                           self.Log, self.diskparams)[0]
3159
      GenerateDiskTemplate(self, self.instance.disk_template,
3160
                           self.instance.name, self.instance.primary_node,
3161
                           self.instance.secondary_nodes, [params], file_path,
3162
                           file_driver, idx, self.Log, self.diskparams)[0]
3158 3163

  
3159
    new_disks = CreateDisks(self, instance, disks=[disk])
3164
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3160 3165

  
3161 3166
    if self.cluster.prealloc_wipe_disks:
3162 3167
      # Wipe new disk
3163
      WipeOrCleanupDisks(self, instance,
3168
      WipeOrCleanupDisks(self, self.instance,
3164 3169
                         disks=[(idx, disk, 0)],
3165 3170
                         cleanup=new_disks)
3166 3171

  
......
3268 3273
      "Not owning any node resource locks"
3269 3274

  
3270 3275
    result = []
3271
    instance = self.instance
3272 3276

  
3273 3277
    # New primary node
3274 3278
    if self.op.pnode_uuid:
3275
      instance.primary_node = self.op.pnode_uuid
3279
      self.instance.primary_node = self.op.pnode_uuid
3276 3280

  
3277 3281
    # runtime memory
3278 3282
    if self.op.runtime_mem:
3279
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3280
                                                     instance,
3283
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3284
                                                     self.instance,
3281 3285
                                                     self.op.runtime_mem)
3282 3286
      rpcres.Raise("Cannot modify instance runtime memory")
3283 3287
      result.append(("runtime_memory", self.op.runtime_mem))
3284 3288

  
3285 3289
    # Apply disk changes
3286
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3290
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3287 3291
                        self._CreateNewDisk, self._ModifyDisk,
3288 3292
                        self._RemoveDisk)
3289
    _UpdateIvNames(0, instance.disks)
3293
    _UpdateIvNames(0, self.instance.disks)
3290 3294

  
3291 3295
    if self.op.disk_template:
3292 3296
      if __debug__:
3293
        check_nodes = set(instance.all_nodes)
3297
        check_nodes = set(self.instance.all_nodes)
3294 3298
        if self.op.remote_node_uuid:
3295 3299
          check_nodes.add(self.op.remote_node_uuid)
3296 3300
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
......
3299 3303
            ("Not owning the correct locks, owning %r, expected at least %r" %
3300 3304
             (owned, check_nodes))
3301 3305

  
3302
      r_shut = ShutdownInstanceDisks(self, instance)
3306
      r_shut = ShutdownInstanceDisks(self, self.instance)
3303 3307
      if not r_shut:
3304 3308
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3305 3309
                                 " proceed with disk template conversion")
3306
      mode = (instance.disk_template, self.op.disk_template)
3310
      mode = (self.instance.disk_template, self.op.disk_template)
3307 3311
      try:
3308 3312
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3309 3313
      except:
3310
        self.cfg.ReleaseDRBDMinors(instance.name)
3314
        self.cfg.ReleaseDRBDMinors(self.instance.name)
3311 3315
        raise
3312 3316
      result.append(("disk_template", self.op.disk_template))
3313 3317

  
3314
      assert instance.disk_template == self.op.disk_template, \
3318
      assert self.instance.disk_template == self.op.disk_template, \
3315 3319
        ("Expected disk template '%s', found '%s'" %
3316
         (self.op.disk_template, instance.disk_template))
3320
         (self.op.disk_template, self.instance.disk_template))
3317 3321

  
3318 3322
    # Release node and resource locks if there are any (they might already have
3319 3323
    # been released during disk conversion)
......
3322 3326

  
3323 3327
    # Apply NIC changes
3324 3328
    if self._new_nics is not None:
3325
      instance.nics = self._new_nics
3329
      self.instance.nics = self._new_nics
3326 3330
      result.extend(self._nic_chgdesc)
3327 3331

  
3328 3332
    # hvparams changes
3329 3333
    if self.op.hvparams:
3330
      instance.hvparams = self.hv_inst
3334
      self.instance.hvparams = self.hv_inst
3331 3335
      for key, val in self.op.hvparams.iteritems():
3332 3336
        result.append(("hv/%s" % key, val))
3333 3337

  
3334 3338
    # beparams changes
3335 3339
    if self.op.beparams:
3336
      instance.beparams = self.be_inst
3340
      self.instance.beparams = self.be_inst
3337 3341
      for key, val in self.op.beparams.iteritems():
3338 3342
        result.append(("be/%s" % key, val))
3339 3343

  
3340 3344
    # OS change
3341 3345
    if self.op.os_name:
3342
      instance.os = self.op.os_name
3346
      self.instance.os = self.op.os_name
3343 3347

  
3344 3348
    # osparams changes
3345 3349
    if self.op.osparams:
3346
      instance.osparams = self.os_inst
3350
      self.instance.osparams = self.os_inst
3347 3351
      for key, val in self.op.osparams.iteritems():
3348 3352
        result.append(("os/%s" % key, val))
3349 3353

  
......
3352 3356
      pass
3353 3357
    elif self.op.offline:
3354 3358
      # Mark instance as offline
3355
      self.cfg.MarkInstanceOffline(instance.name)
3359
      self.cfg.MarkInstanceOffline(self.instance.name)
3356 3360
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3357 3361
    else:
3358 3362
      # Mark instance as online, but stopped
3359
      self.cfg.MarkInstanceDown(instance.name)
3363
      self.cfg.MarkInstanceDown(self.instance.name)
3360 3364
      result.append(("admin_state", constants.ADMINST_DOWN))
3361 3365

  
3362
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3366
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3363 3367

  
3364 3368
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3365 3369
                self.owned_locks(locking.LEVEL_NODE)), \

Also available in: Unified diff