Revision d0d7d7cf

b/lib/cmdlib/backup.py
136 136
    """Check prerequisites.
137 137

  
138 138
    """
139
    instance_name = self.op.instance_name
140

  
141
    self.instance = self.cfg.GetInstanceInfo(instance_name)
139
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
142 140
    assert self.instance is not None, \
143 141
          "Cannot retrieve locked instance %s" % self.op.instance_name
144 142
    CheckNodeOnline(self, self.instance.primary_node)
......
149 147
    """Prepares an instance for an export.
150 148

  
151 149
    """
152
    instance = self.instance
153

  
154 150
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
155 151
      salt = utils.GenerateSecret(8)
156 152

  
157 153
      feedback_fn("Generating X509 certificate on %s" %
158
                  self.cfg.GetNodeName(instance.primary_node))
159
      result = self.rpc.call_x509_cert_create(instance.primary_node,
154
                  self.cfg.GetNodeName(self.instance.primary_node))
155
      result = self.rpc.call_x509_cert_create(self.instance.primary_node,
160 156
                                              constants.RIE_CERT_VALIDITY)
161 157
      result.Raise("Can't create X509 key and certificate on %s" %
162 158
                   self.cfg.GetNodeName(result.node))
......
263 259
    This checks that the instance and node names are valid.
264 260

  
265 261
    """
266
    instance_name = self.op.instance_name
267

  
268
    self.instance = self.cfg.GetInstanceInfo(instance_name)
262
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
269 263
    assert self.instance is not None, \
270 264
          "Cannot retrieve locked instance %s" % self.op.instance_name
271 265
    CheckNodeOnline(self, self.instance.primary_node)
......
293 287
      if len(self.op.target_node) != len(self.instance.disks):
294 288
        raise errors.OpPrereqError(("Received destination information for %s"
295 289
                                    " disks, but instance %s has %s disks") %
296
                                   (len(self.op.target_node), instance_name,
290
                                   (len(self.op.target_node),
291
                                    self.op.instance_name,
297 292
                                    len(self.instance.disks)),
298 293
                                   errors.ECODE_INVAL)
299 294

  
......
385 380
    """
386 381
    assert self.op.mode in constants.EXPORT_MODES
387 382

  
388
    instance = self.instance
389
    src_node_uuid = instance.primary_node
383
    src_node_uuid = self.instance.primary_node
390 384

  
391 385
    if self.op.shutdown:
392 386
      # shutdown the instance, but not the disks
393
      feedback_fn("Shutting down instance %s" % instance.name)
394
      result = self.rpc.call_instance_shutdown(src_node_uuid, instance,
387
      feedback_fn("Shutting down instance %s" % self.instance.name)
388
      result = self.rpc.call_instance_shutdown(src_node_uuid, self.instance,
395 389
                                               self.op.shutdown_timeout,
396 390
                                               self.op.reason)
397 391
      # TODO: Maybe ignore failures if ignore_remove_failures is set
398 392
      result.Raise("Could not shutdown instance %s on"
399
                   " node %s" % (instance.name,
393
                   " node %s" % (self.instance.name,
400 394
                                 self.cfg.GetNodeName(src_node_uuid)))
401 395

  
402 396
    # set the disks ID correctly since call_instance_start needs the
403 397
    # correct drbd minor to create the symlinks
404
    for disk in instance.disks:
398
    for disk in self.instance.disks:
405 399
      self.cfg.SetDiskID(disk, src_node_uuid)
406 400

  
407
    activate_disks = not instance.disks_active
401
    activate_disks = not self.instance.disks_active
408 402

  
409 403
    if activate_disks:
410 404
      # Activate the instance disks if we'exporting a stopped instance
411
      feedback_fn("Activating disks for %s" % instance.name)
412
      StartInstanceDisks(self, instance, None)
405
      feedback_fn("Activating disks for %s" % self.instance.name)
406
      StartInstanceDisks(self, self.instance, None)
413 407

  
414 408
    try:
415 409
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
416
                                                     instance)
410
                                                     self.instance)
417 411

  
418 412
      helper.CreateSnapshots()
419 413
      try:
420 414
        if (self.op.shutdown and
421
            instance.admin_state == constants.ADMINST_UP and
415
            self.instance.admin_state == constants.ADMINST_UP and
422 416
            not self.op.remove_instance):
423 417
          assert not activate_disks
424
          feedback_fn("Starting instance %s" % instance.name)
418
          feedback_fn("Starting instance %s" % self.instance.name)
425 419
          result = self.rpc.call_instance_start(src_node_uuid,
426
                                                (instance, None, None), False,
427
                                                 self.op.reason)
420
                                                (self.instance, None, None),
421
                                                False, self.op.reason)
428 422
          msg = result.fail_msg
429 423
          if msg:
430 424
            feedback_fn("Failed to start instance: %s" % msg)
431
            ShutdownInstanceDisks(self, instance)
425
            ShutdownInstanceDisks(self, self.instance)
432 426
            raise errors.OpExecError("Could not start instance: %s" % msg)
433 427

  
434 428
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
......
450 444
        helper.Cleanup()
451 445

  
452 446
      # Check for backwards compatibility
453
      assert len(dresults) == len(instance.disks)
447
      assert len(dresults) == len(self.instance.disks)
454 448
      assert compat.all(isinstance(i, bool) for i in dresults), \
455 449
             "Not all results are boolean: %r" % dresults
456 450

  
457 451
    finally:
458 452
      if activate_disks:
459
        feedback_fn("Deactivating disks for %s" % instance.name)
460
        ShutdownInstanceDisks(self, instance)
453
        feedback_fn("Deactivating disks for %s" % self.instance.name)
454
        ShutdownInstanceDisks(self, self.instance)
461 455

  
462 456
    if not (compat.all(dresults) and fin_resu):
463 457
      failures = []
......
475 469

  
476 470
    # Remove instance if requested
477 471
    if self.op.remove_instance:
478
      feedback_fn("Removing instance %s" % instance.name)
479
      RemoveInstance(self, feedback_fn, instance,
472
      feedback_fn("Removing instance %s" % self.instance.name)
473
      RemoveInstance(self, feedback_fn, self.instance,
480 474
                     self.op.ignore_remove_failures)
481 475

  
482 476
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
b/lib/cmdlib/cluster.py
1567 1567
         reasonable values in the respose)
1568 1568

  
1569 1569
    """
1570
    node_name = ninfo.name
1571
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1572

  
1573 1570
    # main result, nresult should be a non-empty dict
1574 1571
    test = not nresult or not isinstance(nresult, dict)
1575
    _ErrorIf(test, constants.CV_ENODERPC, node_name,
1572
    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
1576 1573
                  "unable to verify node: no data returned")
1577 1574
    if test:
1578 1575
      return False
......
1583 1580
    test = not (remote_version and
1584 1581
                isinstance(remote_version, (list, tuple)) and
1585 1582
                len(remote_version) == 2)
1586
    _ErrorIf(test, constants.CV_ENODERPC, node_name,
1587
             "connection to node returned invalid data")
1583
    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
1584
                  "connection to node returned invalid data")
1588 1585
    if test:
1589 1586
      return False
1590 1587

  
1591 1588
    test = local_version != remote_version[0]
1592
    _ErrorIf(test, constants.CV_ENODEVERSION, node_name,
1593
             "incompatible protocol versions: master %s,"
1594
             " node %s", local_version, remote_version[0])
1589
    self._ErrorIf(test, constants.CV_ENODEVERSION, ninfo.name,
1590
                  "incompatible protocol versions: master %s,"
1591
                  " node %s", local_version, remote_version[0])
1595 1592
    if test:
1596 1593
      return False
1597 1594

  
......
1599 1596

  
1600 1597
    # full package version
1601 1598
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1602
                  constants.CV_ENODEVERSION, node_name,
1599
                  constants.CV_ENODEVERSION, ninfo.name,
1603 1600
                  "software version mismatch: master %s, node %s",
1604 1601
                  constants.RELEASE_VERSION, remote_version[1],
1605 1602
                  code=self.ETYPE_WARNING)
......
1608 1605
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1609 1606
      for hv_name, hv_result in hyp_result.iteritems():
1610 1607
        test = hv_result is not None
1611
        _ErrorIf(test, constants.CV_ENODEHV, node_name,
1612
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1608
        self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
1609
                      "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1613 1610

  
1614 1611
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1615 1612
    if ninfo.vm_capable and isinstance(hvp_result, list):
1616 1613
      for item, hv_name, hv_result in hvp_result:
1617
        _ErrorIf(True, constants.CV_ENODEHV, node_name,
1618
                 "hypervisor %s parameter verify failure (source %s): %s",
1619
                 hv_name, item, hv_result)
1614
        self._ErrorIf(True, constants.CV_ENODEHV, ninfo.name,
1615
                      "hypervisor %s parameter verify failure (source %s): %s",
1616
                      hv_name, item, hv_result)
1620 1617

  
1621 1618
    test = nresult.get(constants.NV_NODESETUP,
1622 1619
                       ["Missing NODESETUP results"])
1623
    _ErrorIf(test, constants.CV_ENODESETUP, node_name, "node setup error: %s",
1624
             "; ".join(test))
1620
    self._ErrorIf(test, constants.CV_ENODESETUP, ninfo.name,
1621
                  "node setup error: %s", "; ".join(test))
1625 1622

  
1626 1623
    return True
1627 1624

  
......
1636 1633
    @param nvinfo_endtime: the end time of the RPC call
1637 1634

  
1638 1635
    """
1639
    node_name = ninfo.name
1640
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1641

  
1642 1636
    ntime = nresult.get(constants.NV_TIME, None)
1643 1637
    try:
1644 1638
      ntime_merged = utils.MergeTime(ntime)
1645 1639
    except (ValueError, TypeError):
1646
      _ErrorIf(True, constants.CV_ENODETIME, node_name,
1647
               "Node returned invalid time")
1640
      self._ErrorIf(True, constants.CV_ENODETIME, ninfo.name,
1641
                    "Node returned invalid time")
1648 1642
      return
1649 1643

  
1650 1644
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
......
1654 1648
    else:
1655 1649
      ntime_diff = None
1656 1650

  
1657
    _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node_name,
1658
             "Node time diverges by at least %s from master node time",
1659
             ntime_diff)
1651
    self._ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, ninfo.name,
1652
                  "Node time diverges by at least %s from master node time",
1653
                  ntime_diff)
1660 1654

  
1661 1655
  def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
1662 1656
    """Check the node LVM results and update info for cross-node checks.
......
1672 1666
    if vg_name is None:
1673 1667
      return
1674 1668

  
1675
    node_name = ninfo.name
1676
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1677

  
1678 1669
    # checks vg existence and size > 20G
1679 1670
    vglist = nresult.get(constants.NV_VGLIST, None)
1680 1671
    test = not vglist
1681
    _ErrorIf(test, constants.CV_ENODELVM, node_name,
1682
             "unable to check volume groups")
1672
    self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
1673
                  "unable to check volume groups")
1683 1674
    if not test:
1684 1675
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1685 1676
                                            constants.MIN_VG_SIZE)
1686
      _ErrorIf(vgstatus, constants.CV_ENODELVM, node_name, vgstatus)
1677
      self._ErrorIf(vgstatus, constants.CV_ENODELVM, ninfo.name, vgstatus)
1687 1678

  
1688 1679
    # Check PVs
1689 1680
    (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
1690 1681
    for em in errmsgs:
1691
      self._Error(constants.CV_ENODELVM, node_name, em)
1682
      self._Error(constants.CV_ENODELVM, ninfo.name, em)
1692 1683
    if pvminmax is not None:
1693 1684
      (nimg.pv_min, nimg.pv_max) = pvminmax
1694 1685

  
......
1754 1745
    if not bridges:
1755 1746
      return
1756 1747

  
1757
    node_name = ninfo.name
1758
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1759

  
1760 1748
    missing = nresult.get(constants.NV_BRIDGES, None)
1761 1749
    test = not isinstance(missing, list)
1762
    _ErrorIf(test, constants.CV_ENODENET, node_name,
1763
             "did not return valid bridge information")
1750
    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
1751
                  "did not return valid bridge information")
1764 1752
    if not test:
1765
      _ErrorIf(bool(missing), constants.CV_ENODENET, node_name,
1766
               "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
1753
      self._ErrorIf(bool(missing), constants.CV_ENODENET, ninfo.name,
1754
                    "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
1767 1755

  
1768 1756
  def _VerifyNodeUserScripts(self, ninfo, nresult):
1769 1757
    """Check the results of user scripts presence and executability on the node
......
1773 1761
    @param nresult: the remote results for the node
1774 1762

  
1775 1763
    """
1776
    node_name = ninfo.name
1777

  
1778 1764
    test = not constants.NV_USERSCRIPTS in nresult
1779
    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node_name,
1765
    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
1780 1766
                  "did not return user scripts information")
1781 1767

  
1782 1768
    broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
1783 1769
    if not test:
1784
      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node_name,
1770
      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
1785 1771
                    "user scripts not present or not executable: %s" %
1786 1772
                    utils.CommaJoin(sorted(broken_scripts)))
1787 1773

  
......
1793 1779
    @param nresult: the remote results for the node
1794 1780

  
1795 1781
    """
1796
    node_name = ninfo.name
1797
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1798

  
1799 1782
    test = constants.NV_NODELIST not in nresult
1800
    _ErrorIf(test, constants.CV_ENODESSH, node_name,
1801
             "node hasn't returned node ssh connectivity data")
1783
    self._ErrorIf(test, constants.CV_ENODESSH, ninfo.name,
1784
                  "node hasn't returned node ssh connectivity data")
1802 1785
    if not test:
1803 1786
      if nresult[constants.NV_NODELIST]:
1804 1787
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1805
          _ErrorIf(True, constants.CV_ENODESSH, node_name,
1806
                   "ssh communication with node '%s': %s", a_node, a_msg)
1788
          self._ErrorIf(True, constants.CV_ENODESSH, ninfo.name,
1789
                        "ssh communication with node '%s': %s", a_node, a_msg)
1807 1790

  
1808 1791
    test = constants.NV_NODENETTEST not in nresult
1809
    _ErrorIf(test, constants.CV_ENODENET, node_name,
1810
             "node hasn't returned node tcp connectivity data")
1792
    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
1793
                  "node hasn't returned node tcp connectivity data")
1811 1794
    if not test:
1812 1795
      if nresult[constants.NV_NODENETTEST]:
1813 1796
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1814 1797
        for anode in nlist:
1815
          _ErrorIf(True, constants.CV_ENODENET, node_name,
1816
                   "tcp communication with node '%s': %s",
1817
                   anode, nresult[constants.NV_NODENETTEST][anode])
1798
          self._ErrorIf(True, constants.CV_ENODENET, ninfo.name,
1799
                        "tcp communication with node '%s': %s",
1800
                        anode, nresult[constants.NV_NODENETTEST][anode])
1818 1801

  
1819 1802
    test = constants.NV_MASTERIP not in nresult
1820
    _ErrorIf(test, constants.CV_ENODENET, node_name,
1821
             "node hasn't returned node master IP reachability data")
1803
    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
1804
                  "node hasn't returned node master IP reachability data")
1822 1805
    if not test:
1823 1806
      if not nresult[constants.NV_MASTERIP]:
1824 1807
        if ninfo.uuid == self.master_node:
1825 1808
          msg = "the master node cannot reach the master IP (not configured?)"
1826 1809
        else:
1827 1810
          msg = "cannot reach the master IP"
1828
        _ErrorIf(True, constants.CV_ENODENET, node_name, msg)
1811
        self._ErrorIf(True, constants.CV_ENODENET, ninfo.name, msg)
1829 1812

  
1830 1813
  def _VerifyInstance(self, instance, inst_config, node_image,
1831 1814
                      diskstatus):
......
1836 1819
    state.
1837 1820

  
1838 1821
    """
1839
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1840 1822
    pnode = inst_config.primary_node
1841 1823
    pnode_img = node_image[pnode]
1842 1824
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
......
1848 1830
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
1849 1831
                                                            self.group_info)
1850 1832
    err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
1851
    _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
1852
             code=self.ETYPE_WARNING)
1833
    self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance,
1834
                  utils.CommaJoin(err), code=self.ETYPE_WARNING)
1853 1835

  
1854 1836
    for node in node_vol_should:
1855 1837
      n_img = node_image[node]
......
1858 1840
        continue
1859 1841
      for volume in node_vol_should[node]:
1860 1842
        test = volume not in n_img.volumes
1861
        _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
1862
                 "volume %s missing on node %s", volume,
1863
                 self.cfg.GetNodeName(node))
1843
        self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
1844
                      "volume %s missing on node %s", volume,
1845
                      self.cfg.GetNodeName(node))
1864 1846

  
1865 1847
    if inst_config.admin_state == constants.ADMINST_UP:
1866 1848
      test = instance not in pnode_img.instances and not pnode_img.offline
1867
      _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
1868
               "instance not running on its primary node %s",
1869
               self.cfg.GetNodeName(pnode))
1870
      _ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE, instance,
1871
               "instance is marked as running and lives on offline node %s",
1872
               self.cfg.GetNodeName(pnode))
1849
      self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
1850
                    "instance not running on its primary node %s",
1851
                     self.cfg.GetNodeName(pnode))
1852
      self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE, instance,
1853
                    "instance is marked as running and lives on"
1854
                    " offline node %s", self.cfg.GetNodeName(pnode))
1873 1855

  
1874 1856
    diskdata = [(nname, success, status, idx)
1875 1857
                for (nname, disks) in diskstatus.items()
......
1880 1862
      # node here
1881 1863
      snode = node_image[nname]
1882 1864
      bad_snode = snode.ghost or snode.offline
1883
      _ErrorIf(inst_config.disks_active and
1884
               not success and not bad_snode,
1885
               constants.CV_EINSTANCEFAULTYDISK, instance,
1886
               "couldn't retrieve status for disk/%s on %s: %s",
1887
               idx, self.cfg.GetNodeName(nname), bdev_status)
1888
      _ErrorIf((inst_config.disks_active and
1889
                success and bdev_status.ldisk_status == constants.LDS_FAULTY),
1890
               constants.CV_EINSTANCEFAULTYDISK, instance,
1891
               "disk/%s on %s is faulty", idx, self.cfg.GetNodeName(nname))
1892

  
1893
    _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1894
             constants.CV_ENODERPC, pnode, "instance %s, connection to"
1895
             " primary node failed", instance)
1896

  
1897
    _ErrorIf(len(inst_config.secondary_nodes) > 1,
1898
             constants.CV_EINSTANCELAYOUT,
1899
             instance, "instance has multiple secondary nodes: %s",
1900
             utils.CommaJoin(inst_config.secondary_nodes),
1901
             code=self.ETYPE_WARNING)
1865
      self._ErrorIf(inst_config.disks_active and
1866
                    not success and not bad_snode,
1867
                    constants.CV_EINSTANCEFAULTYDISK, instance,
1868
                    "couldn't retrieve status for disk/%s on %s: %s",
1869
                    idx, self.cfg.GetNodeName(nname), bdev_status)
1870
      self._ErrorIf((inst_config.disks_active and
1871
                     success and
1872
                     bdev_status.ldisk_status == constants.LDS_FAULTY),
1873
                    constants.CV_EINSTANCEFAULTYDISK, instance,
1874
                    "disk/%s on %s is faulty", idx, self.cfg.GetNodeName(nname))
1875

  
1876
    self._ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1877
                  constants.CV_ENODERPC, pnode, "instance %s, connection to"
1878
                  " primary node failed", instance)
1879

  
1880
    self._ErrorIf(len(inst_config.secondary_nodes) > 1,
1881
                  constants.CV_EINSTANCELAYOUT,
1882
                  instance, "instance has multiple secondary nodes: %s",
1883
                  utils.CommaJoin(inst_config.secondary_nodes),
1884
                  code=self.ETYPE_WARNING)
1902 1885

  
1903 1886
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
1904 1887
                                               inst_config.all_nodes)
......
1915 1898
                    inst_config.disk_template,
1916 1899
                    utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
1917 1900
      for (idx, disk) in enumerate(inst_config.disks):
1918
        _ErrorIf(disk.spindles is None,
1919
                 constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance,
1920
                 "number of spindles not configured for disk %s while"
1921
                 " exclusive storage is enabled, try running"
1922
                 " gnt-cluster repair-disk-sizes",
1923
                 idx)
1901
        self._ErrorIf(disk.spindles is None,
1902
                      constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance,
1903
                      "number of spindles not configured for disk %s while"
1904
                      " exclusive storage is enabled, try running"
1905
                      " gnt-cluster repair-disk-sizes", idx)
1924 1906

  
1925 1907
    if inst_config.disk_template in constants.DTS_INT_MIRROR:
1926 1908
      instance_nodes = utils.NiceSort(inst_config.all_nodes)
......
1947 1929
    inst_nodes_offline = []
1948 1930
    for snode in inst_config.secondary_nodes:
1949 1931
      s_img = node_image[snode]
1950
      _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
1951
               snode, "instance %s, connection to secondary node failed",
1952
               instance)
1932
      self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
1933
                    snode, "instance %s, connection to secondary node failed",
1934
                    instance)
1953 1935

  
1954 1936
      if s_img.offline:
1955 1937
        inst_nodes_offline.append(snode)
1956 1938

  
1957 1939
    # warn that the instance lives on offline nodes
1958
    _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
1959
             "instance has offline secondary node(s) %s",
1960
             utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
1940
    self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
1941
                  "instance has offline secondary node(s) %s",
1942
                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
1961 1943
    # ... or ghost/non-vm_capable nodes
1962 1944
    for node in inst_config.all_nodes:
1963
      _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
1964
               instance, "instance lives on ghost node %s",
1965
               self.cfg.GetNodeName(node))
1966
      _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
1967
               instance, "instance lives on non-vm_capable node %s",
1968
               self.cfg.GetNodeName(node))
1945
      self._ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
1946
                    instance, "instance lives on ghost node %s",
1947
                    self.cfg.GetNodeName(node))
1948
      self._ErrorIf(not node_image[node].vm_capable,
1949
                    constants.CV_EINSTANCEBADNODE, instance,
1950
                    "instance lives on non-vm_capable node %s",
1951
                    self.cfg.GetNodeName(node))
1969 1952

  
1970 1953
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1971 1954
    """Verify if there are any unknown volumes in the cluster.
......
2155 2138
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2156 2139

  
2157 2140
    """
2158
    node_name = ninfo.name
2159
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2160

  
2161 2141
    if drbd_helper:
2162 2142
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2163 2143
      test = (helper_result is None)
2164
      _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
2165
               "no drbd usermode helper returned")
2144
      self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
2145
                    "no drbd usermode helper returned")
2166 2146
      if helper_result:
2167 2147
        status, payload = helper_result
2168 2148
        test = not status
2169
        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
2170
                 "drbd usermode helper check unsuccessful: %s", payload)
2149
        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
2150
                      "drbd usermode helper check unsuccessful: %s", payload)
2171 2151
        test = status and (payload != drbd_helper)
2172
        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
2173
                 "wrong drbd usermode helper: %s", payload)
2152
        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
2153
                      "wrong drbd usermode helper: %s", payload)
2174 2154

  
2175 2155
    # compute the DRBD minors
2176 2156
    node_drbd = {}
2177 2157
    for minor, instance in drbd_map[ninfo.uuid].items():
2178 2158
      test = instance not in instanceinfo
2179
      _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2180
               "ghost instance '%s' in temporary DRBD map", instance)
2159
      self._ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2160
                    "ghost instance '%s' in temporary DRBD map", instance)
2181 2161
        # ghost instance should not be running, but otherwise we
2182 2162
        # don't give double warnings (both ghost instance and
2183 2163
        # unallocated minor in use)
......
2190 2170
    # and now check them
2191 2171
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
2192 2172
    test = not isinstance(used_minors, (tuple, list))
2193
    _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
2194
             "cannot parse drbd status file: %s", str(used_minors))
2173
    self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
2174
                  "cannot parse drbd status file: %s", str(used_minors))
2195 2175
    if test:
2196 2176
      # we cannot check drbd status
2197 2177
      return
2198 2178

  
2199 2179
    for minor, (iname, must_exist) in node_drbd.items():
2200 2180
      test = minor not in used_minors and must_exist
2201
      _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
2202
               "drbd minor %d of instance %s is not active", minor, iname)
2181
      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
2182
                    "drbd minor %d of instance %s is not active", minor, iname)
2203 2183
    for minor in used_minors:
2204 2184
      test = minor not in node_drbd
2205
      _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
2206
               "unallocated drbd minor %d is in use", minor)
2185
      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
2186
                    "unallocated drbd minor %d is in use", minor)
2207 2187

  
2208 2188
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
2209 2189
    """Builds the node OS structures.
......
2214 2194
    @param nimg: the node image object
2215 2195

  
2216 2196
    """
2217
    node_name = ninfo.name
2218
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2219

  
2220 2197
    remote_os = nresult.get(constants.NV_OSLIST, None)
2221 2198
    test = (not isinstance(remote_os, list) or
2222 2199
            not compat.all(isinstance(v, list) and len(v) == 7
2223 2200
                           for v in remote_os))
2224 2201

  
2225
    _ErrorIf(test, constants.CV_ENODEOS, node_name,
2226
             "node hasn't returned valid OS data")
2202
    self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
2203
                  "node hasn't returned valid OS data")
2227 2204

  
2228 2205
    nimg.os_fail = test
2229 2206

  
......
2255 2232
    @param base: the 'template' node we match against (e.g. from the master)
2256 2233

  
2257 2234
    """
2258
    node_name = ninfo.name
2259
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2260

  
2261 2235
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2262 2236

  
2263 2237
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2264 2238
    for os_name, os_data in nimg.oslist.items():
2265 2239
      assert os_data, "Empty OS status for OS %s?!" % os_name
2266 2240
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2267
      _ErrorIf(not f_status, constants.CV_ENODEOS, node_name,
2268
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2269
      _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node_name,
2270
               "OS '%s' has multiple entries (first one shadows the rest): %s",
2271
               os_name, utils.CommaJoin([v[0] for v in os_data]))
2241
      self._ErrorIf(not f_status, constants.CV_ENODEOS, ninfo.name,
2242
                    "Invalid OS %s (located at %s): %s",
2243
                    os_name, f_path, f_diag)
2244
      self._ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, ninfo.name,
2245
                    "OS '%s' has multiple entries"
2246
                    " (first one shadows the rest): %s",
2247
                    os_name, utils.CommaJoin([v[0] for v in os_data]))
2272 2248
      # comparisons with the 'base' image
2273 2249
      test = os_name not in base.oslist
2274
      _ErrorIf(test, constants.CV_ENODEOS, node_name,
2275
               "Extra OS %s not present on reference node (%s)",
2276
               os_name, self.cfg.GetNodeName(base.uuid))
2250
      self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
2251
                    "Extra OS %s not present on reference node (%s)",
2252
                    os_name, self.cfg.GetNodeName(base.uuid))
2277 2253
      if test:
2278 2254
        continue
2279 2255
      assert base.oslist[os_name], "Base node has empty OS status?"
......
2285 2261
                         ("variants list", f_var, b_var),
2286 2262
                         ("parameters", beautify_params(f_param),
2287 2263
                          beautify_params(b_param))]:
2288
        _ErrorIf(a != b, constants.CV_ENODEOS, node_name,
2289
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2290
                 kind, os_name, self.cfg.GetNodeName(base.uuid),
2291
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2264
        self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
2265
                      "OS %s for %s differs from reference node %s:"
2266
                      " [%s] vs. [%s]", kind, os_name,
2267
                      self.cfg.GetNodeName(base.uuid),
2268
                      utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2292 2269

  
2293 2270
    # check any missing OSes
2294 2271
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2295
    _ErrorIf(missing, constants.CV_ENODEOS, node_name,
2296
             "OSes present on reference node %s but missing on this node: %s",
2297
             self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
2272
    self._ErrorIf(missing, constants.CV_ENODEOS, ninfo.name,
2273
                  "OSes present on reference node %s"
2274
                  " but missing on this node: %s",
2275
                  self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
2298 2276

  
2299 2277
  def _VerifyFileStoragePaths(self, ninfo, nresult, is_master):
2300 2278
    """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
......
2306 2284
    @param is_master: Whether node is the master node
2307 2285

  
2308 2286
    """
2309
    node_name = ninfo.name
2310

  
2311 2287
    if (is_master and
2312 2288
        (constants.ENABLE_FILE_STORAGE or
2313 2289
         constants.ENABLE_SHARED_FILE_STORAGE)):
......
2315 2291
        fspaths = nresult[constants.NV_FILE_STORAGE_PATHS]
2316 2292
      except KeyError:
2317 2293
        # This should never happen
2318
        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, node_name,
2294
        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
2319 2295
                      "Node did not return forbidden file storage paths")
2320 2296
      else:
2321
        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, node_name,
2297
        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
2322 2298
                      "Found forbidden file storage paths: %s",
2323 2299
                      utils.CommaJoin(fspaths))
2324 2300
    else:
2325 2301
      self._ErrorIf(constants.NV_FILE_STORAGE_PATHS in nresult,
2326
                    constants.CV_ENODEFILESTORAGEPATHS, node_name,
2302
                    constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
2327 2303
                    "Node should not have returned forbidden file storage"
2328 2304
                    " paths")
2329 2305

  
......
2335 2311
    @param nresult: the remote results for the node
2336 2312

  
2337 2313
    """
2338
    node_name = ninfo.name
2339 2314
    # We just have to verify the paths on master and/or master candidates
2340 2315
    # as the oob helper is invoked on the master
2341 2316
    if ((ninfo.master_candidate or ninfo.master_capable) and
2342 2317
        constants.NV_OOB_PATHS in nresult):
2343 2318
      for path_result in nresult[constants.NV_OOB_PATHS]:
2344 2319
        self._ErrorIf(path_result, constants.CV_ENODEOOBPATH,
2345
                      node_name, path_result)
2320
                      ninfo.name, path_result)
2346 2321

  
2347 2322
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2348 2323
    """Verifies and updates the node volume data.
......
2357 2332
    @param vg_name: the configured VG name
2358 2333

  
2359 2334
    """
2360
    node_name = ninfo.name
2361
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2362

  
2363 2335
    nimg.lvm_fail = True
2364 2336
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2365 2337
    if vg_name is None:
2366 2338
      pass
2367 2339
    elif isinstance(lvdata, basestring):
2368
      _ErrorIf(True, constants.CV_ENODELVM, node_name,
2369
               "LVM problem on node: %s", utils.SafeEncode(lvdata))
2340
      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
2341
                    "LVM problem on node: %s", utils.SafeEncode(lvdata))
2370 2342
    elif not isinstance(lvdata, dict):
2371
      _ErrorIf(True, constants.CV_ENODELVM, node_name,
2372
               "rpc call to node failed (lvlist)")
2343
      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
2344
                    "rpc call to node failed (lvlist)")
2373 2345
    else:
2374 2346
      nimg.volumes = lvdata
2375 2347
      nimg.lvm_fail = False
......
2407 2379
    @param vg_name: the configured VG name
2408 2380

  
2409 2381
    """
2410
    node_name = ninfo.name
2411
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2412

  
2413 2382
    # try to read free memory (from the hypervisor)
2414 2383
    hv_info = nresult.get(constants.NV_HVINFO, None)
2415 2384
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2416
    _ErrorIf(test, constants.CV_ENODEHV, node_name,
2417
             "rpc call to node failed (hvinfo)")
2385
    self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
2386
                  "rpc call to node failed (hvinfo)")
2418 2387
    if not test:
2419 2388
      try:
2420 2389
        nimg.mfree = int(hv_info["memory_free"])
2421 2390
      except (ValueError, TypeError):
2422
        _ErrorIf(True, constants.CV_ENODERPC, node_name,
2423
                 "node returned invalid nodeinfo, check hypervisor")
2391
        self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
2392
                      "node returned invalid nodeinfo, check hypervisor")
2424 2393

  
2425 2394
    # FIXME: devise a free space model for file based instances as well
2426 2395
    if vg_name is not None:
2427 2396
      test = (constants.NV_VGLIST not in nresult or
2428 2397
              vg_name not in nresult[constants.NV_VGLIST])
2429
      _ErrorIf(test, constants.CV_ENODELVM, node_name,
2430
               "node didn't return data for the volume group '%s'"
2431
               " - it is either missing or broken", vg_name)
2398
      self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
2399
                    "node didn't return data for the volume group '%s'"
2400
                    " - it is either missing or broken", vg_name)
2432 2401
      if not test:
2433 2402
        try:
2434 2403
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2435 2404
        except (ValueError, TypeError):
2436
          _ErrorIf(True, constants.CV_ENODERPC, node_name,
2437
                   "node returned invalid LVM info, check LVM status")
2405
          self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
2406
                        "node returned invalid LVM info, check LVM status")
2438 2407

  
2439 2408
  def _CollectDiskInfo(self, node_uuids, node_image, instanceinfo):
2440 2409
    """Gets per-disk status information for all instances.
......
2606 2575
      return True
2607 2576

  
2608 2577
    self.bad = False
2609
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2610 2578
    verbose = self.op.verbose
2611 2579
    self._feedback_fn = feedback_fn
2612 2580

  
......
2828 2796
        feedback_fn("* Verifying node %s (%s)" % (node_i.name, ntype))
2829 2797

  
2830 2798
      msg = all_nvinfo[node_i.uuid].fail_msg
2831
      _ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
2832
               "while contacting node: %s", msg)
2799
      self._ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
2800
                    "while contacting node: %s", msg)
2833 2801
      if msg:
2834 2802
        nimg.rpc_fail = True
2835 2803
        continue
......
2867 2835

  
2868 2836
        for inst in non_primary_inst:
2869 2837
          test = inst in self.all_inst_info
2870
          _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
2871
                   "instance should not run on node %s", node_i.name)
2872
          _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
2873
                   "node is running unknown instance %s", inst)
2838
          self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
2839
                        "instance should not run on node %s", node_i.name)
2840
          self._ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
2841
                        "node is running unknown instance %s", inst)
2874 2842

  
2875 2843
    self._VerifyGroupDRBDVersion(all_nvinfo)
2876 2844
    self._VerifyGroupLVM(node_image, vg_name)
b/lib/cmdlib/common.py
170 170
  """
171 171
  hm = lu.proc.BuildHooksManager(lu)
172 172
  try:
173
    node_names = [node_name]
174
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=node_names)
173
    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
175 174
  except Exception, err: # pylint: disable=W0703
176 175
    lu.LogWarning("Errors occurred running hooks on %s: %s",
177 176
                  node_name, err)
b/lib/cmdlib/instance.py
501 501
    """
502 502
    self.needed_locks = {}
503 503

  
504
    instance_name = self.op.instance_name
505 504
    # this is just a preventive check, but someone might still add this
506 505
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
506
    if self.op.instance_name in self.cfg.GetInstanceList():
508 507
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
508
                                 self.op.instance_name, errors.ECODE_EXISTS)
510 509

  
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
510
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
512 511

  
513 512
    if self.op.iallocator:
514 513
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
......
654 653
    """
655 654
    assert self.op.mode == constants.INSTANCE_IMPORT
656 655

  
657
    src_node_uuid = self.op.src_node_uuid
658
    src_path = self.op.src_path
659

  
660
    if src_node_uuid is None:
656
    if self.op.src_node_uuid is None:
661 657
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
662 658
      exp_list = self.rpc.call_export_list(locked_nodes)
663 659
      found = False
664 660
      for node in exp_list:
665 661
        if exp_list[node].fail_msg:
666 662
          continue
667
        if src_path in exp_list[node].payload:
663
        if self.op.src_path in exp_list[node].payload:
668 664
          found = True
669 665
          self.op.src_node = node
670
          self.op.src_node_uuid = src_node_uuid = \
671
            self.cfg.GetNodeInfoByName(node).uuid
672
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
673
                                                       src_path)
666
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
667
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
668
                                            self.op.src_path)
674 669
          break
675 670
      if not found:
676 671
        raise errors.OpPrereqError("No export found for relative path %s" %
677
                                   src_path, errors.ECODE_INVAL)
672
                                   self.op.src_path, errors.ECODE_INVAL)
678 673

  
679
    CheckNodeOnline(self, src_node_uuid)
680
    result = self.rpc.call_export_info(src_node_uuid, src_path)
681
    result.Raise("No export or invalid export found in dir %s" % src_path)
674
    CheckNodeOnline(self, self.op.src_node_uuid)
675
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
676
    result.Raise("No export or invalid export found in dir %s" %
677
                 self.op.src_path)
682 678

  
683 679
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
684 680
    if not export_info.has_section(constants.INISECT_EXP):
......
1179 1175
    """Create and add the instance to the cluster.
1180 1176

  
1181 1177
    """
1182
    instance = self.op.instance_name
1183
    pnode_name = self.pnode.name
1184

  
1185 1178
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1186 1179
                self.owned_locks(locking.LEVEL_NODE)), \
1187 1180
      "Node locks differ from node resource locks"
......
1199 1192
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1200 1193
    disks = GenerateDiskTemplate(self,
1201 1194
                                 self.op.disk_template,
1202
                                 instance, self.pnode.uuid,
1195
                                 self.op.instance_name, self.pnode.uuid,
1203 1196
                                 self.secondaries,
1204 1197
                                 self.disks,
1205 1198
                                 self.instance_file_storage_dir,
......
1208 1201
                                 feedback_fn,
1209 1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1210 1203

  
1211
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1204
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1212 1205
                            primary_node=self.pnode.uuid,
1213 1206
                            nics=self.nics, disks=disks,
1214 1207
                            disk_template=self.op.disk_template,
......
1244 1237
        CreateDisks(self, iobj)
1245 1238
      except errors.OpExecError:
1246 1239
        self.LogWarning("Device creation failed")
1247
        self.cfg.ReleaseDRBDMinors(instance)
1240
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1248 1241
        raise
1249 1242

  
1250
    feedback_fn("adding instance %s to cluster config" % instance)
1243
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1251 1244

  
1252 1245
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1253 1246

  
......
1316 1309
            for idx, success in enumerate(result.payload):
1317 1310
              if not success:
1318 1311
                logging.warn("pause-sync of instance %s for disk %d failed",
1319
                             instance, idx)
1312
                             self.op.instance_name, idx)
1320 1313

  
1321 1314
          feedback_fn("* running the instance OS create scripts...")
1322 1315
          # FIXME: pass debug option from opcode to backend
......
1331 1324
            for idx, success in enumerate(result.payload):
1332 1325
              if not success:
1333 1326
                logging.warn("resume-sync of instance %s for disk %d failed",
1334
                             instance, idx)
1327
                             self.op.instance_name, idx)
1335 1328

  
1336 1329
          os_add_result.Raise("Could not add os for instance %s"
1337
                              " on node %s" % (instance, pnode_name))
1330
                              " on node %s" % (self.op.instance_name,
1331
                                               self.pnode.name))
1338 1332

  
1339 1333
      else:
1340 1334
        if self.op.mode == constants.INSTANCE_IMPORT:
......
1362 1356
                                                  iobj, transfers)
1363 1357
          if not compat.all(import_result):
1364 1358
            self.LogWarning("Some disks for instance %s on node %s were not"
1365
                            " imported successfully" % (instance, pnode_name))
1359
                            " imported successfully" % (self.op.instance_name,
1360
                                                        self.pnode.name))
1366 1361

  
1367 1362
          rename_from = self._old_instance_name
1368 1363

  
......
1385 1380
            # TODO: Should the instance still be started, even if some disks
1386 1381
            # failed to import (valid for local imports, too)?
1387 1382
            self.LogWarning("Some disks for instance %s on node %s were not"
1388
                            " imported successfully" % (instance, pnode_name))
1383
                            " imported successfully" % (self.op.instance_name,
1384
                                                        self.pnode.name))
1389 1385

  
1390 1386
          rename_from = self.source_instance_name
1391 1387

  
......
1395 1391
                                       % self.op.mode)
1396 1392

  
1397 1393
        # Run rename script on newly imported instance
1398
        assert iobj.name == instance
1399
        feedback_fn("Running rename script for %s" % instance)
1394
        assert iobj.name == self.op.instance_name
1395
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1400 1396
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1401 1397
                                                   rename_from,
1402 1398
                                                   self.op.debug_level)
1403 1399
        result.Warn("Failed to run rename script for %s on node %s" %
1404
                    (instance, pnode_name), self.LogWarning)
1400
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1405 1401

  
1406 1402
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407 1403

  
1408 1404
    if self.op.start:
1409 1405
      iobj.admin_state = constants.ADMINST_UP
1410 1406
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1407
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1408
                   self.pnode.name)
1412 1409
      feedback_fn("* starting instance...")
1413 1410
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1414 1411
                                            False, self.op.reason)
......
1484 1481
    """Rename the instance.
1485 1482

  
1486 1483
    """
1487
    inst = self.instance
1488
    old_name = inst.name
1484
    old_name = self.instance.name
1489 1485

  
1490 1486
    rename_file_storage = False
1491
    if (inst.disk_template in constants.DTS_FILEBASED and
1492
        self.op.new_name != inst.name):
1493
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1487
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1488
        self.op.new_name != self.instance.name):
1489
      old_file_storage_dir = os.path.dirname(
1490
                               self.instance.disks[0].logical_id[1])
1494 1491
      rename_file_storage = True
1495 1492

  
1496
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1493
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1497 1494
    # Change the instance lock. This is definitely safe while we hold the BGL.
1498 1495
    # Otherwise the new lock would have to be added in acquired mode.
1499 1496
    assert self.REQ_BGL
......
1502 1499
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1503 1500

  
1504 1501
    # re-read the instance from the configuration after rename
1505
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1502
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1506 1503

  
1507 1504
    if rename_file_storage:
1508
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1509
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1505
      new_file_storage_dir = os.path.dirname(
1506
                               renamed_inst.disks[0].logical_id[1])
1507
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1510 1508
                                                     old_file_storage_dir,
1511 1509
                                                     new_file_storage_dir)
1512 1510
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1513 1511
                   " (but the instance has been renamed in Ganeti)" %
1514
                   (self.cfg.GetNodeName(inst.primary_node),
1512
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1515 1513
                    old_file_storage_dir, new_file_storage_dir))
1516 1514

  
1517
    StartInstanceDisks(self, inst, None)
1515
    StartInstanceDisks(self, renamed_inst, None)
1518 1516
    # update info on disks
1519
    info = GetInstanceInfoText(inst)
1520
    for (idx, disk) in enumerate(inst.disks):
1521
      for node_uuid in inst.all_nodes:
1517
    info = GetInstanceInfoText(renamed_inst)
1518
    for (idx, disk) in enumerate(renamed_inst.disks):
1519
      for node_uuid in renamed_inst.all_nodes:
1522 1520
        self.cfg.SetDiskID(disk, node_uuid)
1523 1521
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1524 1522
        result.Warn("Error setting info on node %s for disk %s" %
1525 1523
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1526 1524
    try:
1527
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1528
                                                 old_name, self.op.debug_level)
1525
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1526
                                                 renamed_inst, old_name,
1527
                                                 self.op.debug_level)
1529 1528
      result.Warn("Could not run OS rename script for instance %s on node %s"
1530 1529
                  " (but the instance has been renamed in Ganeti)" %
1531
                  (inst.name, self.cfg.GetNodeName(inst.primary_node)),
1530
                  (renamed_inst.name,
1531
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1532 1532
                  self.LogWarning)
1533 1533
    finally:
1534
      ShutdownInstanceDisks(self, inst)
1534
      ShutdownInstanceDisks(self, renamed_inst)
1535 1535

  
1536
    return inst.name
1536
    return renamed_inst.name
1537 1537

  
1538 1538

  
1539 1539
class LUInstanceRemove(LogicalUnit):
......
1590 1590
    """Remove the instance.
1591 1591

  
1592 1592
    """
1593
    instance = self.instance
1594
    logging.info("Shutting down instance %s on node %s",
1595
                 instance.name, self.cfg.GetNodeName(instance.primary_node))
1593
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1594
                 self.cfg.GetNodeName(self.instance.primary_node))
1596 1595

  
1597
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1596
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1597
                                             self.instance,
1598 1598
                                             self.op.shutdown_timeout,
1599 1599
                                             self.op.reason)
1600 1600
    if self.op.ignore_failures:
1601 1601
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1602 1602
    else:
1603 1603
      result.Raise("Could not shutdown instance %s on node %s" %
1604
                   (instance.name, self.cfg.GetNodeName(instance.primary_node)))
1604
                   (self.instance.name,
1605
                    self.cfg.GetNodeName(self.instance.primary_node)))
1605 1606

  
1606 1607
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1607 1608
            self.owned_locks(locking.LEVEL_NODE_RES))
1608
    assert not (set(instance.all_nodes) -
1609
    assert not (set(self.instance.all_nodes) -
1609 1610
                self.owned_locks(locking.LEVEL_NODE)), \
1610 1611
      "Not owning correct locks"
1611 1612

  
1612
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1613
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1613 1614

  
1614 1615

  
1615 1616
class LUInstanceMove(LogicalUnit):
......
1667 1668
    This checks that the instance is in the cluster.
1668 1669

  
1669 1670
    """
1670
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1671
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1671 1672
    assert self.instance is not None, \
1672 1673
      "Cannot retrieve locked instance %s" % self.op.instance_name
1673 1674

  
1674
    if instance.disk_template not in constants.DTS_COPYABLE:
1675
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1675 1676
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1676
                                 instance.disk_template, errors.ECODE_STATE)
1677
                                 self.instance.disk_template,
1678
                                 errors.ECODE_STATE)
1677 1679

  
1678 1680
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1679 1681
    assert target_node is not None, \
1680 1682
      "Cannot retrieve locked node %s" % self.op.target_node
1681 1683

  
1682 1684
    self.target_node_uuid = target_node.uuid
1683
    if target_node.uuid == instance.primary_node:
1685
    if target_node.uuid == self.instance.primary_node:
1684 1686
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1685
                                 (instance.name, target_node.name),
1687
                                 (self.instance.name, target_node.name),
1686 1688
                                 errors.ECODE_STATE)
1687 1689

  
1688
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1690
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1689 1691

  
1690
    for idx, dsk in enumerate(instance.disks):
1692
    for idx, dsk in enumerate(self.instance.disks):
1691 1693
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1692 1694
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1693 1695
                                   " cannot copy" % idx, errors.ECODE_STATE)
......
1698 1700
    cluster = self.cfg.GetClusterInfo()
1699 1701
    group_info = self.cfg.GetNodeGroup(target_node.group)
1700 1702
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1701
    CheckTargetNodeIPolicy(self, ipolicy, instance, target_node, self.cfg,
1703
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1702 1704
                           ignore=self.op.ignore_ipolicy)
1703 1705

  
1704
    if instance.admin_state == constants.ADMINST_UP:
1706
    if self.instance.admin_state == constants.ADMINST_UP:
1705 1707
      # check memory requirements on the secondary node
1706 1708
      CheckNodeFreeMemory(
1707 1709
          self, target_node.uuid, "failing over instance %s" %
1708
          instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
1709
          self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
1710
          self.instance.name, bep[constants.BE_MAXMEM],
1711
          self.instance.hypervisor,
1712
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1710 1713
    else:
1711 1714
      self.LogInfo("Not checking memory on the secondary node as"
1712 1715
                   " instance will not be started")
1713 1716

  
1714 1717
    # check bridge existance
1715
    CheckInstanceBridgesExist(self, instance, node_uuid=target_node.uuid)
1718
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1716 1719

  
1717 1720
  def Exec(self, feedback_fn):
1718 1721
    """Move an instance.
......
1721 1724
    the data over (slow) and starting it on the new node.
1722 1725

  
1723 1726
    """
1724
    instance = self.instance
1725

  
1726
    source_node = self.cfg.GetNodeInfo(instance.primary_node)
1727
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1727 1728
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1728 1729

  
1729 1730
    self.LogInfo("Shutting down instance %s on source node %s",
1730
                 instance.name, source_node.name)
1731
                 self.instance.name, source_node.name)
1731 1732

  
1732 1733
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1733 1734
            self.owned_locks(locking.LEVEL_NODE_RES))
1734 1735

  
1735
    result = self.rpc.call_instance_shutdown(source_node.uuid, instance,
1736
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1736 1737
                                             self.op.shutdown_timeout,
1737 1738
                                             self.op.reason)
1738 1739
    if self.op.ignore_consistency:
1739 1740
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1740 1741
                  " anyway. Please make sure node %s is down. Error details" %
1741
                  (instance.name, source_node.name, source_node.name),
1742
                  (self.instance.name, source_node.name, source_node.name),
1742 1743
                  self.LogWarning)
1743 1744
    else:
1744 1745
      result.Raise("Could not shutdown instance %s on node %s" %
1745
                   (instance.name, source_node.name))
1746
                   (self.instance.name, source_node.name))
1746 1747

  
1747 1748
    # create the target disks
1748 1749
    try:
1749
      CreateDisks(self, instance, target_node_uuid=target_node.uuid)
1750
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1750 1751
    except errors.OpExecError:
1751 1752
      self.LogWarning("Device creation failed")
1752
      self.cfg.ReleaseDRBDMinors(instance.name)
1753
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1753 1754
      raise
1754 1755

  
1755 1756
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1756 1757

  
1757 1758
    errs = []
1758 1759
    # activate, get path, copy the data over
1759
    for idx, disk in enumerate(instance.disks):
1760
    for idx, disk in enumerate(self.instance.disks):
1760 1761
      self.LogInfo("Copying data for disk %d", idx)
1761
      result = self.rpc.call_blockdev_assemble(target_node.uuid,
1762
                                               (disk, instance), instance.name,
1763
                                               True, idx)
1762
      result = self.rpc.call_blockdev_assemble(
1763
                 target_node.uuid, (disk, self.instance), self.instance.name,
1764
                 True, idx)
1764 1765
      if result.fail_msg:
1765 1766
        self.LogWarning("Can't assemble newly created disk %d: %s",
1766 1767
                        idx, result.fail_msg)
1767 1768
        errs.append(result.fail_msg)
1768 1769
        break
1769 1770
      dev_path = result.payload
1770
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk, instance),
1771
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1772
                                                                self.instance),
1771 1773
                                             target_node.name, dev_path,
1772 1774
                                             cluster_name)
1773 1775
      if result.fail_msg:
......
1779 1781
    if errs:
1780 1782
      self.LogWarning("Some disks failed to copy, aborting")
1781 1783
      try:
1782
        RemoveDisks(self, instance, target_node_uuid=target_node.uuid)
1784
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1783 1785
      finally:
1784
        self.cfg.ReleaseDRBDMinors(instance.name)
1786
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1785 1787
        raise errors.OpExecError("Errors during disk copy: %s" %
1786 1788
                                 (",".join(errs),))
1787 1789

  
1788
    instance.primary_node = target_node.uuid
1789
    self.cfg.Update(instance, feedback_fn)
1790
    self.instance.primary_node = target_node.uuid
1791
    self.cfg.Update(self.instance, feedback_fn)
1790 1792

  
1791 1793
    self.LogInfo("Removing the disks on the original node")
1792
    RemoveDisks(self, instance, target_node_uuid=source_node.uuid)
1794
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1793 1795

  
1794 1796
    # Only start the instance if it's marked as up
1795
    if instance.admin_state == constants.ADMINST_UP:
1797
    if self.instance.admin_state == constants.ADMINST_UP:
1796 1798
      self.LogInfo("Starting instance %s on node %s",
1797
                   instance.name, target_node.name)
1799
                   self.instance.name, target_node.name)
1798 1800

  
1799
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1801
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1800 1802
                                          ignore_secondaries=True)
1801 1803
      if not disks_ok:
1802
        ShutdownInstanceDisks(self, instance)
1804
        ShutdownInstanceDisks(self, self.instance)
1803 1805
        raise errors.OpExecError("Can't activate the instance's disks")
1804 1806

  
1805 1807
      result = self.rpc.call_instance_start(target_node.uuid,
1806
                                            (instance, None, None), False,
1808
                                            (self.instance, None, None), False,
1807 1809
                                            self.op.reason)
1808 1810
      msg = result.fail_msg
1809 1811
      if msg:
1810
        ShutdownInstanceDisks(self, instance)
1812
        ShutdownInstanceDisks(self, self.instance)
1811 1813
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1812
                                 (instance.name, target_node.name, msg))
1814
                                 (self.instance.name, target_node.name, msg))
1813 1815

  
1814 1816

  
1815 1817
class LUInstanceMultiAlloc(NoHooksLU):
......
2571 2573
  def _PreCheckDiskTemplate(self, pnode_info):
2572 2574
    """CheckPrereq checks related to a new disk template."""
2573 2575
    # Arguments are passed to avoid configuration lookups
2574
    instance = self.instance
2575
    pnode_uuid = instance.primary_node
2576
    cluster = self.cluster
2577
    if instance.disk_template == self.op.disk_template:
2576
    pnode_uuid = self.instance.primary_node
2577
    if self.instance.disk_template == self.op.disk_template:
2578 2578
      raise errors.OpPrereqError("Instance already has disk template %s" %
2579
                                 instance.disk_template, errors.ECODE_INVAL)
2579
                                 self.instance.disk_template,
2580
                                 errors.ECODE_INVAL)
2580 2581

  
2581
    if (instance.disk_template,
2582
    if (self.instance.disk_template,
2582 2583
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2583 2584
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2584
                                 " %s to %s" % (instance.disk_template,
2585
                                 " %s to %s" % (self.instance.disk_template,
2585 2586
                                                self.op.disk_template),
2586 2587
                                 errors.ECODE_INVAL)
2587
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2588
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2588 2589
                       msg="cannot change disk template")
2589 2590
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2590 2591
      if self.op.remote_node_uuid == pnode_uuid:
......
2594 2595
      CheckNodeOnline(self, self.op.remote_node_uuid)
2595 2596
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2596 2597
      # FIXME: here we assume that the old instance type is DT_PLAIN
2597
      assert instance.disk_template == constants.DT_PLAIN
2598
      assert self.instance.disk_template == constants.DT_PLAIN
2598 2599
      disks = [{constants.IDISK_SIZE: d.size,
2599 2600
                constants.IDISK_VG: d.logical_id[0]}
2600
               for d in instance.disks]
2601
               for d in self.instance.disks]
2601 2602
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2602 2603
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2603 2604

  
2604 2605
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2605 2606
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2606
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2607
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2607 2608
                                                              snode_group)
2608
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2609
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2609 2610
                             ignore=self.op.ignore_ipolicy)
2610 2611
      if pnode_info.group != snode_info.group:
2611 2612
        self.LogWarning("The primary and secondary nodes are in two"
......
2622 2623
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2623 2624
      if compat.any(map(has_es, nodes)):
2624 2625
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2625
                  " storage is enabled" % (instance.disk_template,
2626
                  " storage is enabled" % (self.instance.disk_template,
2626 2627
                                           self.op.disk_template))
2627 2628
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2628 2629

  
......
2633 2634
    @param ispec: instance specs to be updated with the new disks
2634 2635

  
2635 2636
    """
2636
    instance = self.instance
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2638 2638

  
2639 2639
    excl_stor = compat.any(
2640
      rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes).values()
2640
      rpc.GetExclusiveStorageForNodes(self.cfg,
2641
                                      self.instance.all_nodes).values()
2641 2642
      )
2642 2643

  
2643 2644
    # Check disk modifications. This is done here and not in CheckArguments
2644 2645
    # (as with NICs), because we need to know the instance's disk template
2645 2646
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2646
    if instance.disk_template == constants.DT_EXT:
2647
    if self.instance.disk_template == constants.DT_EXT:
2647 2648
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2648 2649
    else:
2649 2650
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
......
2652 2653
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2653 2654

  
2654 2655
    # Check the validity of the `provider' parameter
2655
    if instance.disk_template in constants.DT_EXT:
2656
    if self.instance.disk_template in constants.DT_EXT:
2656 2657
      for mod in self.diskmod:
2657 2658
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2658 2659
        if mod[0] == constants.DDM_ADD:
......
2678 2679
                                      constants.DT_EXT),
2679 2680
                                     errors.ECODE_INVAL)
2680 2681

  
2681
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2682
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2682 2683
      raise errors.OpPrereqError("Disk operations not supported for"
2683 2684
                                 " diskless instances", errors.ECODE_INVAL)
2684 2685

  
......
2686 2687
      disk.name = params.get(constants.IDISK_NAME, None)
2687 2688

  
2688 2689
    # Verify disk changes (operating on a copy)
2689
    disks = copy.deepcopy(instance.disks)
2690
    disks = copy.deepcopy(self.instance.disks)
2690 2691
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2691 2692
                        _PrepareDiskMod, None)
2692 2693
    utils.ValidateDeviceNames("disk", disks)
......
2694 2695
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2695 2696
                                 " more" % constants.MAX_DISKS,
2696 2697
                                 errors.ECODE_STATE)
2697
    disk_sizes = [disk.size for disk in instance.disks]
2698
    disk_sizes = [disk.size for disk in self.instance.disks]
2698 2699
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2699 2700
                      self.diskmod if op == constants.DDM_ADD)
2700 2701
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2701 2702
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2702 2703

  
2703 2704
    if self.op.offline is not None and self.op.offline:
2704
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2705
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2705 2706
                         msg="can't change to offline")
2706 2707

  
2707 2708
  def CheckPrereq(self):
......
2711 2712

  
2712 2713
    """
2713 2714
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2714
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2715
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2716
    self.cluster = self.cfg.GetClusterInfo()
2715 2717

  
2716
    cluster = self.cluster = self.cfg.GetClusterInfo()
2717 2718
    assert self.instance is not None, \
2718 2719
      "Cannot retrieve locked instance %s" % self.op.instance_name
2719 2720

  
2720
    pnode_uuid = instance.primary_node
2721
    pnode_uuid = self.instance.primary_node
2721 2722

  
2722 2723
    self.warn = []
2723 2724

  
......
2725 2726
        not self.op.force):
2726 2727
      # verify that the instance is not up
2727 2728
      instance_info = self.rpc.call_instance_info(
2728
          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
2729
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2730
          self.instance.hvparams)
2729 2731
      if instance_info.fail_msg:
2730 2732
        self.warn.append("Can't get instance runtime information: %s" %
2731 2733
                         instance_info.fail_msg)
......
2735 2737
                                   errors.ECODE_STATE)
2736 2738

  
2737 2739
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2738
    node_uuids = list(instance.all_nodes)
2740
    node_uuids = list(self.instance.all_nodes)
2739 2741
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2740 2742

  
2741 2743
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
......
2750 2752

  
2751 2753
    # OS change
2752 2754
    if self.op.os_name and not self.op.force:
2753
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2755
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2754 2756
                     self.op.force_variant)
2755 2757
      instance_os = self.op.os_name
2756 2758
    else:
2757
      instance_os = instance.os
2759
      instance_os = self.instance.os
2758 2760

  
2759 2761
    assert not (self.op.disk_template and self.op.disks), \
2760 2762
      "Can't modify disk template and apply disk changes at the same time"
......
2766 2768

  
2767 2769
    # hvparams processing
2768 2770
    if self.op.hvparams:
2769
      hv_type = instance.hypervisor
2770
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2771
      hv_type = self.instance.hypervisor
2772
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2771 2773
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2772
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2774
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2773 2775

  
2774 2776
      # local check
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff