Revision da4a52a3

b/lib/cmdlib/backup.py
136 136
    """Check prerequisites.
137 137

  
138 138
    """
139
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
139
    self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
140 140
    assert self.instance is not None, \
141 141
          "Cannot retrieve locked instance %s" % self.op.instance_name
142 142
    CheckNodeOnline(self, self.instance.primary_node)
......
259 259
    This checks that the instance and node names are valid.
260 260

  
261 261
    """
262
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
262
    self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
263 263
    assert self.instance is not None, \
264 264
          "Cannot retrieve locked instance %s" % self.op.instance_name
265 265
    CheckNodeOnline(self, self.instance.primary_node)
......
504 504
    """Remove any export.
505 505

  
506 506
    """
507
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
507
    (_, inst_name) = self.cfg.ExpandInstanceName(self.op.instance_name)
508 508
    # If the instance was not found we'll try with the name that was passed in.
509 509
    # This will only work if it was an FQDN, though.
510 510
    fqdn_warn = False
511
    if not instance_name:
511
    if not inst_name:
512 512
      fqdn_warn = True
513
      instance_name = self.op.instance_name
513
      inst_name = self.op.instance_name
514 514

  
515 515
    locked_nodes = self.owned_locks(locking.LEVEL_NODE)
516 516
    exportlist = self.rpc.call_export_list(locked_nodes)
......
521 521
        self.LogWarning("Failed to query node %s (continuing): %s",
522 522
                        self.cfg.GetNodeName(node_uuid), msg)
523 523
        continue
524
      if instance_name in exportlist[node_uuid].payload:
524
      if inst_name in exportlist[node_uuid].payload:
525 525
        found = True
526
        result = self.rpc.call_export_remove(node_uuid, instance_name)
526
        result = self.rpc.call_export_remove(node_uuid, inst_name)
527 527
        msg = result.fail_msg
528 528
        if msg:
529 529
          logging.error("Could not remove export for instance %s"
530
                        " on node %s: %s", instance_name,
530
                        " on node %s: %s", inst_name,
531 531
                        self.cfg.GetNodeName(node_uuid), msg)
532 532

  
533 533
    if fqdn_warn and not found:
b/lib/cmdlib/base.py
28 28
from ganeti import locking
29 29
from ganeti import query
30 30
from ganeti import utils
31
from ganeti.cmdlib.common import ExpandInstanceName
31
from ganeti.cmdlib.common import ExpandInstanceUuidAndName
32 32

  
33 33

  
34 34
class ResultWithJobs:
......
322 322
    else:
323 323
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
324 324
        "_ExpandAndLockInstance called with instance-level locks set"
325
    self.op.instance_name = ExpandInstanceName(self.cfg,
326
                                               self.op.instance_name)
325
    (self.op.instance_uuid, self.op.instance_name) = \
326
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
327
                                self.op.instance_name)
327 328
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
328 329

  
329 330
  def _LockInstancesNodes(self, primary_only=False,
......
361 362
    # of self.recalculate_locks[locking.LEVEL_NODE]
362 363
    wanted_node_uuids = []
363 364
    locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
364
    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
365
    for _, instance in self.cfg.GetMultiInstanceInfoByName(locked_i):
365 366
      wanted_node_uuids.append(instance.primary_node)
366 367
      if not primary_only:
367 368
        wanted_node_uuids.extend(instance.secondary_nodes)
b/lib/cmdlib/cluster.py
443 443

  
444 444
  def ExpandNames(self):
445 445
    if self.op.instances:
446
      self.wanted_names = GetWantedInstances(self, self.op.instances)
446
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
447 447
      # Not getting the node allocation lock as only a specific set of
448 448
      # instances (and their nodes) is going to be acquired
449 449
      self.needed_locks = {
......
481 481
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
482 482

  
483 483
    self.wanted_instances = \
484
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
484
        map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
485 485

  
486 486
  def _EnsureChildSizes(self, disk):
487 487
    """Ensure children of the disk have the needed disk size.
......
812 812
                                             for nuuid in inst.all_nodes)])
813 813
        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
814 814
        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
815
        new = ComputeNewInstanceViolations(ipol,
816
                                           new_ipolicy, instances, self.cfg)
815
        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
816
                                           self.cfg)
817 817
        if new:
818 818
          violations.update(new)
819 819

  
......
1373 1373

  
1374 1374
    for inst in self.all_inst_info.values():
1375 1375
      if inst.primary_node in [node.uuid for node in dangling_nodes]:
1376
        dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1376
        dangling_instances.setdefault(inst.primary_node, []).append(inst)
1377 1377
      elif inst.primary_node not in self.all_node_info:
1378
        no_node_instances.append(inst.name)
1378
        no_node_instances.append(inst)
1379 1379

  
1380 1380
    pretty_dangling = [
1381 1381
        "%s (%s)" %
1382 1382
        (node.name,
1383
         utils.CommaJoin(dangling_instances.get(node.uuid,
1384
                                                ["no instances"])))
1383
         utils.CommaJoin(
1384
           self.cfg.GetInstanceNames(
1385
             dangling_instances.get(node.uuid, ["no instances"]))))
1385 1386
        for node in dangling_nodes]
1386 1387

  
1387 1388
    self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
......
1392 1393
    self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
1393 1394
                  None,
1394 1395
                  "the following instances have a non-existing primary-node:"
1395
                  " %s", utils.CommaJoin(no_node_instances))
1396
                  " %s", utils.CommaJoin(
1397
                           self.cfg.GetInstanceNames(no_node_instances)))
1396 1398

  
1397 1399
    return not self.bad
1398 1400

  
......
1468 1470
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1469 1471

  
1470 1472
    # Get instances in node group; this is unsafe and needs verification later
1471
    inst_names = \
1473
    inst_uuids = \
1472 1474
      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
1473 1475

  
1474 1476
    self.needed_locks = {
1475
      locking.LEVEL_INSTANCE: inst_names,
1477
      locking.LEVEL_INSTANCE: self.cfg.GetInstanceNames(inst_uuids),
1476 1478
      locking.LEVEL_NODEGROUP: [self.group_uuid],
1477 1479
      locking.LEVEL_NODE: [],
1478 1480

  
......
1489 1491
      # Get members of node group; this is unsafe and needs verification later
1490 1492
      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1491 1493

  
1492
      all_inst_info = self.cfg.GetAllInstancesInfo()
1493

  
1494 1494
      # In Exec(), we warn about mirrored instances that have primary and
1495 1495
      # secondary living in separate node groups. To fully verify that
1496 1496
      # volumes for these instances are healthy, we will need to do an
1497 1497
      # extra call to their secondaries. We ensure here those nodes will
1498 1498
      # be locked.
1499
      for inst in self.owned_locks(locking.LEVEL_INSTANCE):
1499
      for inst_name in self.owned_locks(locking.LEVEL_INSTANCE):
1500 1500
        # Important: access only the instances whose lock is owned
1501
        if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
1502
          nodes.update(all_inst_info[inst].secondary_nodes)
1501
        instance = self.cfg.GetInstanceInfoByName(inst_name)
1502
        if instance.disk_template in constants.DTS_INT_MIRROR:
1503
          nodes.update(instance.secondary_nodes)
1503 1504

  
1504 1505
      self.needed_locks[locking.LEVEL_NODE] = nodes
1505 1506

  
......
1508 1509
    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
1509 1510

  
1510 1511
    group_node_uuids = set(self.group_info.members)
1511
    group_instances = \
1512
    group_inst_uuids = \
1512 1513
      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
1513 1514

  
1514 1515
    unlocked_node_uuids = \
1515 1516
        group_node_uuids.difference(self.owned_locks(locking.LEVEL_NODE))
1516 1517

  
1517
    unlocked_instances = \
1518
        group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
1518
    unlocked_inst_uuids = \
1519
        group_inst_uuids.difference(
1520
          [self.cfg.GetInstanceInfoByName(name).uuid
1521
           for name in self.owned_locks(locking.LEVEL_INSTANCE)])
1519 1522

  
1520 1523
    if unlocked_node_uuids:
1521 1524
      raise errors.OpPrereqError(
......
1523 1526
        utils.CommaJoin(self.cfg.GetNodeNames(unlocked_node_uuids)),
1524 1527
        errors.ECODE_STATE)
1525 1528

  
1526
    if unlocked_instances:
1527
      raise errors.OpPrereqError("Missing lock for instances: %s" %
1528
                                 utils.CommaJoin(unlocked_instances),
1529
                                 errors.ECODE_STATE)
1529
    if unlocked_inst_uuids:
1530
      raise errors.OpPrereqError(
1531
        "Missing lock for instances: %s" %
1532
        utils.CommaJoin(self.cfg.GetInstanceNames(unlocked_inst_uuids)),
1533
        errors.ECODE_STATE)
1530 1534

  
1531 1535
    self.all_node_info = self.cfg.GetAllNodesInfo()
1532 1536
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
......
1535 1539
    self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
1536 1540
                             for node_uuid in group_node_uuids)
1537 1541

  
1538
    self.my_inst_names = utils.NiceSort(group_instances)
1539
    self.my_inst_info = dict((name, self.all_inst_info[name])
1540
                             for name in self.my_inst_names)
1542
    self.my_inst_uuids = group_inst_uuids
1543
    self.my_inst_info = dict((inst_uuid, self.all_inst_info[inst_uuid])
1544
                             for inst_uuid in group_inst_uuids)
1541 1545

  
1542 1546
    # We detect here the nodes that will need the extra RPC calls for verifying
1543 1547
    # split LV volumes; they should be locked.
......
1817 1821
          msg = "cannot reach the master IP"
1818 1822
        self._ErrorIf(True, constants.CV_ENODENET, ninfo.name, msg)
1819 1823

  
1820
  def _VerifyInstance(self, instance, inst_config, node_image,
1821
                      diskstatus):
1824
  def _VerifyInstance(self, instance, node_image, diskstatus):
1822 1825
    """Verify an instance.
1823 1826

  
1824 1827
    This function checks to see if the required block devices are
......
1826 1829
    state.
1827 1830

  
1828 1831
    """
1829
    pnode = inst_config.primary_node
1830
    pnode_img = node_image[pnode]
1832
    pnode_uuid = instance.primary_node
1833
    pnode_img = node_image[pnode_uuid]
1831 1834
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
1832 1835

  
1833 1836
    node_vol_should = {}
1834
    inst_config.MapLVsByNode(node_vol_should)
1837
    instance.MapLVsByNode(node_vol_should)
1835 1838

  
1836 1839
    cluster = self.cfg.GetClusterInfo()
1837 1840
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
1838 1841
                                                            self.group_info)
1839
    err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
1840
    self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance,
1842
    err = ComputeIPolicyInstanceViolation(ipolicy, instance, self.cfg)
1843
    self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance.name,
1841 1844
                  utils.CommaJoin(err), code=self.ETYPE_WARNING)
1842 1845

  
1843
    for node in node_vol_should:
1844
      n_img = node_image[node]
1846
    for node_uuid in node_vol_should:
1847
      n_img = node_image[node_uuid]
1845 1848
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1846 1849
        # ignore missing volumes on offline or broken nodes
1847 1850
        continue
1848
      for volume in node_vol_should[node]:
1851
      for volume in node_vol_should[node_uuid]:
1849 1852
        test = volume not in n_img.volumes
1850
        self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
1853
        self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance.name,
1851 1854
                      "volume %s missing on node %s", volume,
1852
                      self.cfg.GetNodeName(node))
1855
                      self.cfg.GetNodeName(node_uuid))
1853 1856

  
1854
    if inst_config.admin_state == constants.ADMINST_UP:
1855
      test = instance not in pnode_img.instances and not pnode_img.offline
1856
      self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
1857
    if instance.admin_state == constants.ADMINST_UP:
1858
      test = instance.uuid not in pnode_img.instances and not pnode_img.offline
1859
      self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance.name,
1857 1860
                    "instance not running on its primary node %s",
1858
                     self.cfg.GetNodeName(pnode))
1859
      self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE, instance,
1860
                    "instance is marked as running and lives on"
1861
                    " offline node %s", self.cfg.GetNodeName(pnode))
1861
                     self.cfg.GetNodeName(pnode_uuid))
1862
      self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE,
1863
                    instance.name, "instance is marked as running and lives on"
1864
                    " offline node %s", self.cfg.GetNodeName(pnode_uuid))
1862 1865

  
1863 1866
    diskdata = [(nname, success, status, idx)
1864 1867
                for (nname, disks) in diskstatus.items()
......
1869 1872
      # node here
1870 1873
      snode = node_image[nname]
1871 1874
      bad_snode = snode.ghost or snode.offline
1872
      self._ErrorIf(inst_config.disks_active and
1875
      self._ErrorIf(instance.disks_active and
1873 1876
                    not success and not bad_snode,
1874
                    constants.CV_EINSTANCEFAULTYDISK, instance,
1877
                    constants.CV_EINSTANCEFAULTYDISK, instance.name,
1875 1878
                    "couldn't retrieve status for disk/%s on %s: %s",
1876 1879
                    idx, self.cfg.GetNodeName(nname), bdev_status)
1877 1880

  
1878
      if inst_config.disks_active and success and \
1881
      if instance.disks_active and success and \
1879 1882
         (bdev_status.is_degraded or
1880 1883
          bdev_status.ldisk_status != constants.LDS_OKAY):
1881 1884
        msg = "disk/%s on %s" % (idx, self.cfg.GetNodeName(nname))
......
1885 1888
          msg += "; state is '%s'" % \
1886 1889
                 constants.LDS_NAMES[bdev_status.ldisk_status]
1887 1890

  
1888
        self._Error(constants.CV_EINSTANCEFAULTYDISK, instance, msg)
1891
        self._Error(constants.CV_EINSTANCEFAULTYDISK, instance.name, msg)
1889 1892

  
1890 1893
    self._ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1891
                  constants.CV_ENODERPC, pnode, "instance %s, connection to"
1892
                  " primary node failed", instance)
1893

  
1894
    self._ErrorIf(len(inst_config.secondary_nodes) > 1,
1895
                  constants.CV_EINSTANCELAYOUT,
1896
                  instance, "instance has multiple secondary nodes: %s",
1897
                  utils.CommaJoin(inst_config.secondary_nodes),
1894
                  constants.CV_ENODERPC, self.cfg.GetNodeName(pnode_uuid),
1895
                  "instance %s, connection to primary node failed",
1896
                  instance.name)
1897

  
1898
    self._ErrorIf(len(instance.secondary_nodes) > 1,
1899
                  constants.CV_EINSTANCELAYOUT, instance.name,
1900
                  "instance has multiple secondary nodes: %s",
1901
                  utils.CommaJoin(instance.secondary_nodes),
1898 1902
                  code=self.ETYPE_WARNING)
1899 1903

  
1900
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
1901
                                               inst_config.all_nodes)
1904
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes)
1902 1905
    if any(es_flags.values()):
1903
      if inst_config.disk_template not in constants.DTS_EXCL_STORAGE:
1906
      if instance.disk_template not in constants.DTS_EXCL_STORAGE:
1904 1907
        # Disk template not compatible with exclusive_storage: no instance
1905 1908
        # node should have the flag set
1906 1909
        es_nodes = [n
1907 1910
                    for (n, es) in es_flags.items()
1908 1911
                    if es]
1909
        self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance,
1912
        self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance.name,
1910 1913
                    "instance has template %s, which is not supported on nodes"
1911 1914
                    " that have exclusive storage set: %s",
1912
                    inst_config.disk_template,
1915
                    instance.disk_template,
1913 1916
                    utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
1914
      for (idx, disk) in enumerate(inst_config.disks):
1917
      for (idx, disk) in enumerate(instance.disks):
1915 1918
        self._ErrorIf(disk.spindles is None,
1916
                      constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance,
1919
                      constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
1917 1920
                      "number of spindles not configured for disk %s while"
1918 1921
                      " exclusive storage is enabled, try running"
1919 1922
                      " gnt-cluster repair-disk-sizes", idx)
1920 1923

  
1921
    if inst_config.disk_template in constants.DTS_INT_MIRROR:
1922
      instance_nodes = utils.NiceSort(inst_config.all_nodes)
1924
    if instance.disk_template in constants.DTS_INT_MIRROR:
1925
      instance_nodes = utils.NiceSort(instance.all_nodes)
1923 1926
      instance_groups = {}
1924 1927

  
1925
      for node in instance_nodes:
1926
        instance_groups.setdefault(self.all_node_info[node].group,
1927
                                   []).append(node)
1928
      for node_uuid in instance_nodes:
1929
        instance_groups.setdefault(self.all_node_info[node_uuid].group,
1930
                                   []).append(node_uuid)
1928 1931

  
1929 1932
      pretty_list = [
1930 1933
        "%s (group %s)" % (utils.CommaJoin(self.cfg.GetNodeNames(nodes)),
1931 1934
                           groupinfo[group].name)
1932 1935
        # Sort so that we always list the primary node first.
1933 1936
        for group, nodes in sorted(instance_groups.items(),
1934
                                   key=lambda (_, nodes): pnode in nodes,
1937
                                   key=lambda (_, nodes): pnode_uuid in nodes,
1935 1938
                                   reverse=True)]
1936 1939

  
1937 1940
      self._ErrorIf(len(instance_groups) > 1,
1938 1941
                    constants.CV_EINSTANCESPLITGROUPS,
1939
                    instance, "instance has primary and secondary nodes in"
1942
                    instance.name, "instance has primary and secondary nodes in"
1940 1943
                    " different groups: %s", utils.CommaJoin(pretty_list),
1941 1944
                    code=self.ETYPE_WARNING)
1942 1945

  
1943 1946
    inst_nodes_offline = []
1944
    for snode in inst_config.secondary_nodes:
1947
    for snode in instance.secondary_nodes:
1945 1948
      s_img = node_image[snode]
1946 1949
      self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
1947
                    snode, "instance %s, connection to secondary node failed",
1948
                    instance)
1950
                    self.cfg.GetNodeName(snode),
1951
                    "instance %s, connection to secondary node failed",
1952
                    instance.name)
1949 1953

  
1950 1954
      if s_img.offline:
1951 1955
        inst_nodes_offline.append(snode)
1952 1956

  
1953 1957
    # warn that the instance lives on offline nodes
1954
    self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
1955
                  "instance has offline secondary node(s) %s",
1958
    self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE,
1959
                  instance.name, "instance has offline secondary node(s) %s",
1956 1960
                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
1957 1961
    # ... or ghost/non-vm_capable nodes
1958
    for node in inst_config.all_nodes:
1959
      self._ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
1960
                    instance, "instance lives on ghost node %s",
1961
                    self.cfg.GetNodeName(node))
1962
      self._ErrorIf(not node_image[node].vm_capable,
1963
                    constants.CV_EINSTANCEBADNODE, instance,
1962
    for node_uuid in instance.all_nodes:
1963
      self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
1964
                    instance.name, "instance lives on ghost node %s",
1965
                    self.cfg.GetNodeName(node_uuid))
1966
      self._ErrorIf(not node_image[node_uuid].vm_capable,
1967
                    constants.CV_EINSTANCEBADNODE, instance.name,
1964 1968
                    "instance lives on non-vm_capable node %s",
1965
                    self.cfg.GetNodeName(node))
1969
                    self.cfg.GetNodeName(node_uuid))
1966 1970

  
1967 1971
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1968 1972
    """Verify if there are any unknown volumes in the cluster.
......
1987 1991
                      self.cfg.GetNodeName(node_uuid),
1988 1992
                      "volume %s is unknown", volume)
1989 1993

  
1990
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1994
  def _VerifyNPlusOneMemory(self, node_image, all_insts):
1991 1995
    """Verify N+1 Memory Resilience.
1992 1996

  
1993 1997
    Check that if one single node dies we can still start all the
......
2012 2016
        # nodes, and that's enough warning
2013 2017
        continue
2014 2018
      #TODO(dynmem): also consider ballooning out other instances
2015
      for prinode, instances in n_img.sbp.items():
2019
      for prinode, inst_uuids in n_img.sbp.items():
2016 2020
        needed_mem = 0
2017
        for instance in instances:
2018
          bep = cluster_info.FillBE(instance_cfg[instance])
2021
        for inst_uuid in inst_uuids:
2022
          bep = cluster_info.FillBE(all_insts[inst_uuid])
2019 2023
          if bep[constants.BE_AUTO_BALANCE]:
2020 2024
            needed_mem += bep[constants.BE_MINMEM]
2021 2025
        test = n_img.mfree < needed_mem
......
2168 2172

  
2169 2173
    # compute the DRBD minors
2170 2174
    node_drbd = {}
2171
    for minor, instance in drbd_map[ninfo.uuid].items():
2172
      test = instance not in instanceinfo
2175
    for minor, inst_uuid in drbd_map[ninfo.uuid].items():
2176
      test = inst_uuid not in instanceinfo
2173 2177
      self._ErrorIf(test, constants.CV_ECLUSTERCFG, None,
2174
                    "ghost instance '%s' in temporary DRBD map", instance)
2178
                    "ghost instance '%s' in temporary DRBD map", inst_uuid)
2175 2179
        # ghost instance should not be running, but otherwise we
2176 2180
        # don't give double warnings (both ghost instance and
2177 2181
        # unallocated minor in use)
2178 2182
      if test:
2179
        node_drbd[minor] = (instance, False)
2183
        node_drbd[minor] = (inst_uuid, False)
2180 2184
      else:
2181
        instance = instanceinfo[instance]
2182
        node_drbd[minor] = (instance.name, instance.disks_active)
2185
        instance = instanceinfo[inst_uuid]
2186
        node_drbd[minor] = (inst_uuid, instance.disks_active)
2183 2187

  
2184 2188
    # and now check them
2185 2189
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
......
2190 2194
      # we cannot check drbd status
2191 2195
      return
2192 2196

  
2193
    for minor, (iname, must_exist) in node_drbd.items():
2197
    for minor, (inst_uuid, must_exist) in node_drbd.items():
2194 2198
      test = minor not in used_minors and must_exist
2195 2199
      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
2196
                    "drbd minor %d of instance %s is not active", minor, iname)
2200
                    "drbd minor %d of instance %s is not active", minor,
2201
                    self.cfg.GetInstanceName(inst_uuid))
2197 2202
    for minor in used_minors:
2198 2203
      test = minor not in node_drbd
2199 2204
      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
......
2381 2386
    if test:
2382 2387
      nimg.hyp_fail = True
2383 2388
    else:
2384
      nimg.instances = idata
2389
      nimg.instances = [inst.uuid for (_, inst) in
2390
                        self.cfg.GetMultiInstanceInfoByName(idata)]
2385 2391

  
2386 2392
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2387 2393
    """Verifies and computes a node information map
......
2424 2430

  
2425 2431
    @type node_uuids: list of strings
2426 2432
    @param node_uuids: Node UUIDs
2427
    @type node_image: dict of (name, L{objects.Node})
2433
    @type node_image: dict of (UUID, L{objects.Node})
2428 2434
    @param node_image: Node objects
2429
    @type instanceinfo: dict of (name, L{objects.Instance})
2435
    @type instanceinfo: dict of (UUID, L{objects.Instance})
2430 2436
    @param instanceinfo: Instance objects
2431 2437
    @rtype: {instance: {node: [(succes, payload)]}}
2432 2438
    @return: a dictionary of per-instance dictionaries with nodes as
......
2440 2446
    diskless = constants.DT_DISKLESS
2441 2447

  
2442 2448
    for nuuid in node_uuids:
2443
      node_instances = list(itertools.chain(node_image[nuuid].pinst,
2444
                                            node_image[nuuid].sinst))
2445
      diskless_instances.update(inst for inst in node_instances
2446
                                if instanceinfo[inst].disk_template == diskless)
2447
      disks = [(inst, disk)
2448
               for inst in node_instances
2449
               for disk in instanceinfo[inst].disks]
2449
      node_inst_uuids = list(itertools.chain(node_image[nuuid].pinst,
2450
                                             node_image[nuuid].sinst))
2451
      diskless_instances.update(uuid for uuid in node_inst_uuids
2452
                                if instanceinfo[uuid].disk_template == diskless)
2453
      disks = [(inst_uuid, disk)
2454
               for inst_uuid in node_inst_uuids
2455
               for disk in instanceinfo[inst_uuid].disks]
2450 2456

  
2451 2457
      if not disks:
2452 2458
        # No need to collect data
......
2456 2462

  
2457 2463
      # _AnnotateDiskParams makes already copies of the disks
2458 2464
      devonly = []
2459
      for (inst, dev) in disks:
2460
        (anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
2465
      for (inst_uuid, dev) in disks:
2466
        (anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
2467
                                          self.cfg)
2461 2468
        self.cfg.SetDiskID(anno_disk, nuuid)
2462 2469
        devonly.append(anno_disk)
2463 2470

  
......
2497 2504
                              node.name, idx, i)
2498 2505
              data.append((False, "Invalid result from the remote node"))
2499 2506

  
2500
      for ((inst, _), status) in zip(disks, data):
2501
        instdisk.setdefault(inst, {}).setdefault(node.uuid, []).append(status)
2507
      for ((inst_uuid, _), status) in zip(disks, data):
2508
        instdisk.setdefault(inst_uuid, {}).setdefault(node.uuid, []) \
2509
          .append(status)
2502 2510

  
2503 2511
    # Add empty entries for diskless instances.
2504
    for inst in diskless_instances:
2505
      assert inst not in instdisk
2506
      instdisk[inst] = {}
2512
    for inst_uuid in diskless_instances:
2513
      assert inst_uuid not in instdisk
2514
      instdisk[inst_uuid] = {}
2507 2515

  
2508 2516
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2509 2517
                      len(nuuids) <= len(instanceinfo[inst].all_nodes) and
......
2667 2675
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2668 2676
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2669 2677
      bridges.add(default_nicpp[constants.NIC_LINK])
2670
    for instance in self.my_inst_info.values():
2671
      for nic in instance.nics:
2678
    for inst_uuid in self.my_inst_info.values():
2679
      for nic in inst_uuid.nics:
2672 2680
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2673 2681
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2674 2682
          bridges.add(full_nic[constants.NIC_LINK])
......
2692 2700
    if oob_paths:
2693 2701
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2694 2702

  
2695
    for instance in self.my_inst_names:
2696
      inst_config = self.my_inst_info[instance]
2697
      if inst_config.admin_state == constants.ADMINST_OFFLINE:
2703
    for inst_uuid in self.my_inst_uuids:
2704
      instance = self.my_inst_info[inst_uuid]
2705
      if instance.admin_state == constants.ADMINST_OFFLINE:
2698 2706
        i_offline += 1
2699 2707

  
2700
      for nuuid in inst_config.all_nodes:
2708
      for nuuid in instance.all_nodes:
2701 2709
        if nuuid not in node_image:
2702 2710
          gnode = self.NodeImage(uuid=nuuid)
2703 2711
          gnode.ghost = (nuuid not in self.all_node_info)
2704 2712
          node_image[nuuid] = gnode
2705 2713

  
2706
      inst_config.MapLVsByNode(node_vol_should)
2714
      instance.MapLVsByNode(node_vol_should)
2707 2715

  
2708
      pnode = inst_config.primary_node
2709
      node_image[pnode].pinst.append(instance)
2716
      pnode = instance.primary_node
2717
      node_image[pnode].pinst.append(instance.uuid)
2710 2718

  
2711
      for snode in inst_config.secondary_nodes:
2719
      for snode in instance.secondary_nodes:
2712 2720
        nimg = node_image[snode]
2713
        nimg.sinst.append(instance)
2721
        nimg.sinst.append(instance.uuid)
2714 2722
        if pnode not in nimg.sbp:
2715 2723
          nimg.sbp[pnode] = []
2716
        nimg.sbp[pnode].append(instance)
2724
        nimg.sbp[pnode].append(instance.uuid)
2717 2725

  
2718 2726
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
2719 2727
                                               self.my_node_info.keys())
......
2842 2850
          self._VerifyNodeOS(node_i, nimg, refos_img)
2843 2851
        self._VerifyNodeBridges(node_i, nresult, bridges)
2844 2852

  
2845
        # Check whether all running instancies are primary for the node. (This
2853
        # Check whether all running instances are primary for the node. (This
2846 2854
        # can no longer be done from _VerifyInstance below, since some of the
2847 2855
        # wrong instances could be from other node groups.)
2848
        non_primary_inst = set(nimg.instances).difference(nimg.pinst)
2856
        non_primary_inst_uuids = set(nimg.instances).difference(nimg.pinst)
2849 2857

  
2850
        for inst in non_primary_inst:
2851
          test = inst in self.all_inst_info
2852
          self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
2858
        for inst_uuid in non_primary_inst_uuids:
2859
          test = inst_uuid in self.all_inst_info
2860
          self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE,
2861
                        self.cfg.GetInstanceName(inst_uuid),
2853 2862
                        "instance should not run on node %s", node_i.name)
2854 2863
          self._ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
2855
                        "node is running unknown instance %s", inst)
2864
                        "node is running unknown instance %s", inst_uuid)
2856 2865

  
2857 2866
    self._VerifyGroupDRBDVersion(all_nvinfo)
2858 2867
    self._VerifyGroupLVM(node_image, vg_name)
......
2862 2871
                              node_image[node_uuid], vg_name)
2863 2872

  
2864 2873
    feedback_fn("* Verifying instance status")
2865
    for instance in self.my_inst_names:
2874
    for inst_uuid in self.my_inst_uuids:
2875
      instance = self.my_inst_info[inst_uuid]
2866 2876
      if verbose:
2867
        feedback_fn("* Verifying instance %s" % instance)
2868
      inst_config = self.my_inst_info[instance]
2869
      self._VerifyInstance(instance, inst_config, node_image,
2870
                           instdisk[instance])
2877
        feedback_fn("* Verifying instance %s" % instance.name)
2878
      self._VerifyInstance(instance, node_image, instdisk[inst_uuid])
2871 2879

  
2872 2880
      # If the instance is non-redundant we cannot survive losing its primary
2873 2881
      # node, so we are not N+1 compliant.
2874
      if inst_config.disk_template not in constants.DTS_MIRRORED:
2882
      if instance.disk_template not in constants.DTS_MIRRORED:
2875 2883
        i_non_redundant.append(instance)
2876 2884

  
2877
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2885
      if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
2878 2886
        i_non_a_balanced.append(instance)
2879 2887

  
2880 2888
    feedback_fn("* Verifying orphan volumes")
......
2883 2891
    # We will get spurious "unknown volume" warnings if any node of this group
2884 2892
    # is secondary for an instance whose primary is in another group. To avoid
2885 2893
    # them, we find these instances and add their volumes to node_vol_should.
2886
    for inst in self.all_inst_info.values():
2887
      for secondary in inst.secondary_nodes:
2894
    for instance in self.all_inst_info.values():
2895
      for secondary in instance.secondary_nodes:
2888 2896
        if (secondary in self.my_node_info
2889
            and inst.name not in self.my_inst_info):
2890
          inst.MapLVsByNode(node_vol_should)
2897
            and instance.name not in self.my_inst_info):
2898
          instance.MapLVsByNode(node_vol_should)
2891 2899
          break
2892 2900

  
2893 2901
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
b/lib/cmdlib/common.py
65 65
  return full_name
66 66

  
67 67

  
68
def ExpandInstanceName(cfg, name):
68
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
69 69
  """Wrapper over L{_ExpandItemName} for instance."""
70
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
70
  (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
71
  if expected_uuid is not None and uuid != expected_uuid:
72
    raise errors.OpPrereqError(
73
      "The instances UUID '%s' does not match the expected UUID '%s' for"
74
      " instance '%s'. Maybe the instance changed since you submitted this"
75
      " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
76
  return (uuid, full_name)
71 77

  
72 78

  
73 79
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
......
99 105
  return dict.fromkeys(locking.LEVELS, 1)
100 106

  
101 107

  
102
def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
108
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
103 109
  """Checks if the instances in a node group are still correct.
104 110

  
105 111
  @type cfg: L{config.ConfigWriter}
106 112
  @param cfg: The cluster configuration
107 113
  @type group_uuid: string
108 114
  @param group_uuid: Node group UUID
109
  @type owned_instances: set or frozenset
110
  @param owned_instances: List of currently owned instances
115
  @type owned_instance_names: set or frozenset
116
  @param owned_instance_names: List of currently owned instances
111 117

  
112 118
  """
113
  wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
114
  if owned_instances != wanted_instances:
119
  wanted_instances = frozenset(cfg.GetInstanceNames(
120
                                 cfg.GetNodeGroupInstances(group_uuid)))
121
  if owned_instance_names != wanted_instances:
115 122
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
116 123
                               " locks were acquired, wanted '%s', have '%s';"
117 124
                               " retry the operation" %
118 125
                               (group_uuid,
119 126
                                utils.CommaJoin(wanted_instances),
120
                                utils.CommaJoin(owned_instances)),
127
                                utils.CommaJoin(owned_instance_names)),
121 128
                               errors.ECODE_STATE)
122 129

  
123 130
  return wanted_instances
......
144 151
  return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
145 152

  
146 153

  
147
def GetWantedInstances(lu, instances):
154
def GetWantedInstances(lu, short_inst_names):
148 155
  """Returns list of checked and expanded instance names.
149 156

  
150 157
  @type lu: L{LogicalUnit}
151 158
  @param lu: the logical unit on whose behalf we execute
152
  @type instances: list
153
  @param instances: list of instance names or None for all instances
154
  @rtype: list
155
  @return: the list of instances, sorted
159
  @type short_inst_names: list
160
  @param short_inst_names: list of instance names or None for all instances
161
  @rtype: tuple of lists
162
  @return: tuple of (instance UUIDs, instance names)
156 163
  @raise errors.OpPrereqError: if the instances parameter is wrong type
157 164
  @raise errors.OpPrereqError: if any of the passed instances is not found
158 165

  
159 166
  """
160
  if instances:
161
    wanted = [ExpandInstanceName(lu.cfg, name) for name in instances]
167
  if short_inst_names:
168
    inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
169
                  for name in short_inst_names]
162 170
  else:
163
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
164
  return wanted
171
    inst_uuids = lu.cfg.GetInstanceList()
172
  return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
165 173

  
166 174

  
167 175
def RunPostHook(lu, node_name):
......
794 802
  @type cfg: L{config.ConfigWriter}
795 803
  @param cfg: Cluster configuration
796 804
  @type instances: dict; string as key, L{objects.Instance} as value
797
  @param instances: Dictionary, instance name as key, instance object as value
805
  @param instances: Dictionary, instance UUID as key, instance object as value
798 806
  @type owned_groups: iterable of string
799 807
  @param owned_groups: List of owned groups
800 808
  @type owned_node_uuids: iterable of string
......
803 811
  @param cur_group_uuid: Optional group UUID to check against instance's groups
804 812

  
805 813
  """
806
  for (name, inst) in instances.items():
814
  for (uuid, inst) in instances.items():
807 815
    assert owned_node_uuids.issuperset(inst.all_nodes), \
808
      "Instance %s's nodes changed while we kept the lock" % name
816
      "Instance %s's nodes changed while we kept the lock" % inst.name
809 817

  
810
    inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups)
818
    inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
811 819

  
812 820
    assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
813
      "Instance %s has no node in group %s" % (name, cur_group_uuid)
821
      "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
814 822

  
815 823

  
816
def CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
817
                            primary_only=False):
824
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
818 825
  """Checks if the owned node groups are still correct for an instance.
819 826

  
820 827
  @type cfg: L{config.ConfigWriter}
821 828
  @param cfg: The cluster configuration
822
  @type instance_name: string
823
  @param instance_name: Instance name
829
  @type inst_uuid: string
830
  @param inst_uuid: Instance UUID
824 831
  @type owned_groups: set or frozenset
825 832
  @param owned_groups: List of currently owned node groups
826 833
  @type primary_only: boolean
827 834
  @param primary_only: Whether to check node groups for only the primary node
828 835

  
829 836
  """
830
  inst_groups = cfg.GetInstanceNodeGroups(instance_name, primary_only)
837
  inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
831 838

  
832 839
  if not owned_groups.issuperset(inst_groups):
833 840
    raise errors.OpPrereqError("Instance %s's node groups changed since"
834 841
                               " locks were acquired, current groups are"
835 842
                               " are '%s', owning groups '%s'; retry the"
836 843
                               " operation" %
837
                               (instance_name,
844
                               (cfg.GetInstanceName(inst_uuid),
838 845
                                utils.CommaJoin(inst_groups),
839 846
                                utils.CommaJoin(owned_groups)),
840 847
                               errors.ECODE_STATE)
b/lib/cmdlib/group.py
203 203
                                            self.node_data, instance_data)
204 204

  
205 205
    if new_splits:
206
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
206
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(
207
                         self.cfg.GetInstanceNames(new_splits)))
207 208

  
208 209
      if not self.op.force:
209 210
        raise errors.OpExecError("The following instances get split by this"
......
216 217
        if previous_splits:
217 218
          self.LogWarning("In addition, these already-split instances continue"
218 219
                          " to be split across groups: %s",
219
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
220
                          utils.CommaJoin(utils.NiceSort(
221
                            self.cfg.GetInstanceNames(previous_splits))))
220 222

  
221 223
  def Exec(self, feedback_fn):
222 224
    """Assign nodes to a new group.
......
262 264

  
263 265
      if len(set(node_data[node_uuid].group
264 266
                 for node_uuid in inst.all_nodes)) > 1:
265
        previously_split_instances.add(inst.name)
267
        previously_split_instances.add(inst.uuid)
266 268

  
267 269
      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
268 270
                 for node_uuid in inst.all_nodes)) > 1:
269
        all_split_instances.add(inst.name)
271
        all_split_instances.add(inst.uuid)
270 272

  
271 273
    return (list(all_split_instances - previously_split_instances),
272 274
            list(previously_split_instances & all_split_instances))
......
339 341
        for instance in all_instances.values():
340 342
          node = instance.primary_node
341 343
          if node in node_to_group:
342
            group_to_instances[node_to_group[node]].append(instance.name)
344
            group_to_instances[node_to_group[node]].append(instance.uuid)
343 345

  
344 346
        if not do_nodes:
345 347
          # Do not pass on node information if it was not requested.
......
412 414
      # Lock instances optimistically, needs verification once group lock has
413 415
      # been acquired
414 416
      self.needed_locks[locking.LEVEL_INSTANCE] = \
415
          self.cfg.GetNodeGroupInstances(self.group_uuid)
417
        self.cfg.GetInstanceNames(
418
          self.cfg.GetNodeGroupInstances(self.group_uuid))
416 419

  
417 420
  @staticmethod
418 421
  def _UpdateAndVerifyDiskParams(old, new):
......
427 430
    """Check prerequisites.
428 431

  
429 432
    """
430
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
433
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
431 434

  
432 435
    # Check if locked instances are still correct
433
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
436
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
434 437

  
435 438
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
436 439
    cluster = self.cfg.GetClusterInfo()
......
477 480
                                           group_policy=True)
478 481

  
479 482
      new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
480
      inst_filter = lambda inst: inst.name in owned_instances
481
      instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
483
      instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names)
482 484
      gmi = ganeti.masterd.instance
483 485
      violations = \
484 486
          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
......
709 711
      # Lock instances optimistically, needs verification once node and group
710 712
      # locks have been acquired
711 713
      self.needed_locks[locking.LEVEL_INSTANCE] = \
712
        self.cfg.GetNodeGroupInstances(self.group_uuid)
714
        self.cfg.GetInstanceNames(
715
          self.cfg.GetNodeGroupInstances(self.group_uuid))
713 716

  
714 717
    elif level == locking.LEVEL_NODEGROUP:
715 718
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
......
723 726
                           for instance_name in
724 727
                             self.owned_locks(locking.LEVEL_INSTANCE)
725 728
                           for group_uuid in
726
                             self.cfg.GetInstanceNodeGroups(instance_name))
729
                             self.cfg.GetInstanceNodeGroups(
730
                               self.cfg.GetInstanceInfoByName(instance_name)
731
                                 .uuid))
727 732
      else:
728 733
        # No target groups, need to lock all of them
729 734
        lock_groups = locking.ALL_SET
......
746 751
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
747 752

  
748 753
  def CheckPrereq(self):
749
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
754
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
750 755
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
751 756
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
752 757

  
......
754 759
    assert self.group_uuid in owned_groups
755 760

  
756 761
    # Check if locked instances are still correct
757
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
762
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
758 763

  
759 764
    # Get instance information
760
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
765
    self.instances = \
766
      dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
761 767

  
762 768
    # Check if node groups for locked instances are still correct
763 769
    CheckInstancesNodeGroups(self.cfg, self.instances,
......
797 803
    return (run_nodes, run_nodes)
798 804

  
799 805
  def Exec(self, feedback_fn):
800
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
806
    inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
801 807

  
802 808
    assert self.group_uuid not in self.target_uuids
803 809

  
804
    req = iallocator.IAReqGroupChange(instances=instances,
810
    req = iallocator.IAReqGroupChange(instances=inst_names,
805 811
                                      target_groups=self.target_uuids)
806 812
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
807 813

  
......
851 857
      # Lock instances optimistically, needs verification once node and group
852 858
      # locks have been acquired
853 859
      self.needed_locks[locking.LEVEL_INSTANCE] = \
854
        self.cfg.GetNodeGroupInstances(self.group_uuid)
860
        self.cfg.GetInstanceNames(
861
          self.cfg.GetNodeGroupInstances(self.group_uuid))
855 862

  
856 863
    elif level == locking.LEVEL_NODEGROUP:
857 864
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
......
863 870
            # later on
864 871
            [group_uuid
865 872
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
866
             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
873
             for group_uuid in
874
               self.cfg.GetInstanceNodeGroups(
875
                 self.cfg.GetInstanceInfoByName(instance_name).uuid)])
867 876

  
868 877
    elif level == locking.LEVEL_NODE:
869 878
      # This will only lock the nodes in the group to be verified which contain
......
877 886
      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
878 887

  
879 888
  def CheckPrereq(self):
880
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
889
    owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
881 890
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
882 891
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
883 892

  
884 893
    assert self.group_uuid in owned_groups
885 894

  
886 895
    # Check if locked instances are still correct
887
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
896
    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
888 897

  
889 898
    # Get instance information
890
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
899
    self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
891 900

  
892 901
    # Check if node groups for locked instances are still correct
893 902
    CheckInstancesNodeGroups(self.cfg, self.instances,
b/lib/cmdlib/instance.py
48 48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53 53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54 54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
......
392 392

  
393 393
    # instance name verification
394 394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
395
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname.name
397 397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
398
      self.check_ip = self.hostname.ip
399 399
    else:
400 400
      self.check_ip = None
401 401

  
......
503 503

  
504 504
    # this is just a preventive check, but someone might still add this
505 505
    # instance in the meantime, and creation will fail at lock-add time
506
    if self.op.instance_name in self.cfg.GetInstanceList():
506
    if self.op.instance_name in\
507
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
507 508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 509
                                 self.op.instance_name, errors.ECODE_EXISTS)
509 510

  
......
1188 1189
    else:
1189 1190
      network_port = None
1190 1191

  
1192
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1193

  
1191 1194
    # This is ugly but we got a chicken-egg problem here
1192 1195
    # We can only take the group disk parameters, as the instance
1193 1196
    # has no disks yet (we are generating them right here).
1194 1197
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1195 1198
    disks = GenerateDiskTemplate(self,
1196 1199
                                 self.op.disk_template,
1197
                                 self.op.instance_name, self.pnode.uuid,
1200
                                 instance_uuid, self.pnode.uuid,
1198 1201
                                 self.secondaries,
1199 1202
                                 self.disks,
1200 1203
                                 self.instance_file_storage_dir,
......
1203 1206
                                 feedback_fn,
1204 1207
                                 self.cfg.GetGroupDiskParams(nodegroup))
1205 1208

  
1206
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1209
    iobj = objects.Instance(name=self.op.instance_name,
1210
                            uuid=instance_uuid,
1211
                            os=self.op.os_type,
1207 1212
                            primary_node=self.pnode.uuid,
1208 1213
                            nics=self.nics, disks=disks,
1209 1214
                            disk_template=self.op.disk_template,
......
1281 1286

  
1282 1287
    if disk_abort:
1283 1288
      RemoveDisks(self, iobj)
1284
      self.cfg.RemoveInstance(iobj.name)
1289
      self.cfg.RemoveInstance(iobj.uuid)
1285 1290
      # Make sure the instance lock gets removed
1286 1291
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287 1292
      raise errors.OpExecError("There are some degraded disks for"
......
1455 1460
    This checks that the instance is in the cluster and is not running.
1456 1461

  
1457 1462
    """
1458
    self.op.instance_name = ExpandInstanceName(self.cfg,
1459
                                               self.op.instance_name)
1460
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1463
    (self.op.instance_uuid, self.op.instance_name) = \
1464
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1465
                                self.op.instance_name)
1466
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1461 1467
    assert instance is not None
1462 1468
    CheckNodeOnline(self, instance.primary_node)
1463 1469
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
......
1474 1480
                                   (hostname.ip, new_name),
1475 1481
                                   errors.ECODE_NOTUNIQUE)
1476 1482

  
1477
    instance_list = self.cfg.GetInstanceList()
1478
    if new_name in instance_list and new_name != instance.name:
1483
    instance_names = [inst.name for
1484
                      inst in self.cfg.GetAllInstancesInfo().values()]
1485
    if new_name in instance_names and new_name != instance.name:
1479 1486
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1480 1487
                                 new_name, errors.ECODE_EXISTS)
1481 1488

  
......
1492 1499
                               self.instance.disks[0].logical_id[1])
1493 1500
      rename_file_storage = True
1494 1501

  
1495
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1502
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1496 1503
    # Change the instance lock. This is definitely safe while we hold the BGL.
1497 1504
    # Otherwise the new lock would have to be added in acquired mode.
1498 1505
    assert self.REQ_BGL
......
1501 1508
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1502 1509

  
1503 1510
    # re-read the instance from the configuration after rename
1504
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1511
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1505 1512

  
1506 1513
    if rename_file_storage:
1507 1514
      new_file_storage_dir = os.path.dirname(
......
1584 1591
    This checks that the instance is in the cluster.
1585 1592

  
1586 1593
    """
1587
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1594
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1588 1595
    assert self.instance is not None, \
1589 1596
      "Cannot retrieve locked instance %s" % self.op.instance_name
1590 1597

  
......
1670 1677
    This checks that the instance is in the cluster.
1671 1678

  
1672 1679
    """
1673
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1680
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1674 1681
    assert self.instance is not None, \
1675 1682
      "Cannot retrieve locked instance %s" % self.op.instance_name
1676 1683

  
......
1752 1759
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1753 1760
    except errors.OpExecError:
1754 1761
      self.LogWarning("Device creation failed")
1755
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1762
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1756 1763
      raise
1757 1764

  
1758 1765
    cluster_name = self.cfg.GetClusterInfo().cluster_name
......
1785 1792
      try:
1786 1793
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1787 1794
      finally:
1788
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1795
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1789 1796
        raise errors.OpExecError("Errors during disk copy: %s" %
1790 1797
                                 (",".join(errs),))
1791 1798

  
......
2376 2383
      # Acquire locks for the instance's nodegroups optimistically. Needs
2377 2384
      # to be verified in CheckPrereq
2378 2385
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2379
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2386
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2380 2387
    elif level == locking.LEVEL_NODE:
2381 2388
      self._LockInstancesNodes()
2382 2389
      if self.op.disk_template and self.op.remote_node:
......
2714 2721

  
2715 2722
    """
2716 2723
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2717
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2724
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2718 2725
    self.cluster = self.cfg.GetClusterInfo()
2719 2726

  
2720 2727
    assert self.instance is not None, \
......
3031 3038
                  constants.IDISK_NAME: d.name}
3032 3039
                 for d in self.instance.disks]
3033 3040
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3034
                                     self.instance.name, pnode_uuid,
3041
                                     self.instance.uuid, pnode_uuid,
3035 3042
                                     [snode_uuid], disk_info, None, None, 0,
3036 3043
                                     feedback_fn, self.diskparams)
3037 3044
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
......
3160 3167

  
3161 3168
    disk = \
3162 3169
      GenerateDiskTemplate(self, self.instance.disk_template,
3163
                           self.instance.name, self.instance.primary_node,
3170
                           self.instance.uuid, self.instance.primary_node,
3164 3171
                           self.instance.secondary_nodes, [params], file_path,
3165 3172
                           file_driver, idx, self.Log, self.diskparams)[0]
3166 3173

  
......
3314 3321
      try:
3315 3322
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3316 3323
      except:
3317
        self.cfg.ReleaseDRBDMinors(self.instance.name)
3324
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3318 3325
        raise
3319 3326
      result.append(("disk_template", self.op.disk_template))
3320 3327

  
......
3359 3366
      pass
3360 3367
    elif self.op.offline:
3361 3368
      # Mark instance as offline
3362
      self.cfg.MarkInstanceOffline(self.instance.name)
3369
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3363 3370
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3364 3371
    else:
3365 3372
      # Mark instance as online, but stopped
3366
      self.cfg.MarkInstanceDown(self.instance.name)
3373
      self.cfg.MarkInstanceDown(self.instance.uuid)
3367 3374
      result.append(("admin_state", constants.ADMINST_DOWN))
3368 3375

  
3369 3376
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
......
3413 3420

  
3414 3421
        # Lock all groups used by instance optimistically; this requires going
3415 3422
        # via the node before it's locked, requiring verification later on
3416
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3423
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3417 3424
        lock_groups.update(instance_groups)
3418 3425
      else:
3419 3426
        # No target groups, need to lock all of them
......
3429 3436

  
3430 3437
        # Lock all nodes in all potential target groups
3431 3438
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3432
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3439
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3433 3440
        member_nodes = [node_uuid
3434 3441
                        for group in lock_groups
3435 3442
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
......
3439 3446
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3440 3447

  
3441 3448
  def CheckPrereq(self):
3442
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3449
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3443 3450
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3444 3451
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3445 3452

  
3446 3453
    assert (self.req_target_uuids is None or
3447 3454
            owned_groups.issuperset(self.req_target_uuids))
3448
    assert owned_instances == set([self.op.instance_name])
3455
    assert owned_instance_names == set([self.op.instance_name])
3449 3456

  
3450 3457
    # Get instance information
3451
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3458
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3452 3459

  
3453 3460
    # Check if node groups for locked instance are still correct
3454 3461
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3455 3462
      ("Instance %s's nodes changed while we kept the lock" %
3456 3463
       self.op.instance_name)
3457 3464

  
3458
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3465
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3459 3466
                                          owned_groups)
3460 3467

  
3461 3468
    if self.req_target_uuids:
b/lib/cmdlib/instance_migration.py
30 30
from ganeti.masterd import iallocator
31 31
from ganeti import utils
32 32
from ganeti.cmdlib.base import LogicalUnit, Tasklet
33
from ganeti.cmdlib.common import ExpandInstanceName, \
33
from ganeti.cmdlib.common import ExpandInstanceUuidAndName, \
34 34
  CheckIAllocatorOrNode, ExpandNodeUuidAndName
35 35
from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
36 36
  ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
......
72 72
  if level == locking.LEVEL_NODE_ALLOC:
73 73
    assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
74 74

  
75
    instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
75
    instance = lu.cfg.GetInstanceInfo(lu.op.instance_uuid)
76 76

  
77 77
    # Node locks are already declared here rather than at LEVEL_NODE as we need
78 78
    # the instance object anyway to declare the node allocation lock.
......
118 118
    _ExpandNamesForMigration(self)
119 119

  
120 120
    self._migrater = \
121
      TLMigrateInstance(self, self.op.instance_name, False, True, False,
122
                        self.op.ignore_consistency, True,
121
      TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
122
                        False, True, False, self.op.ignore_consistency, True,
123 123
                        self.op.shutdown_timeout, self.op.ignore_ipolicy)
124 124

  
125 125
    self.tasklets = [self._migrater]
......
177 177
    _ExpandNamesForMigration(self)
178 178

  
179 179
    self._migrater = \
180
      TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
181
                        False, self.op.allow_failover, False,
180
      TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
181
                        self.op.cleanup, False, self.op.allow_failover, False,
182 182
                        self.op.allow_runtime_changes,
183 183
                        constants.DEFAULT_SHUTDOWN_TIMEOUT,
184 184
                        self.op.ignore_ipolicy)
......
255 255
  _MIGRATION_POLL_INTERVAL = 1      # seconds
256 256
  _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
257 257

  
258
  def __init__(self, lu, instance_name, cleanup, failover, fallback,
259
               ignore_consistency, allow_runtime_changes, shutdown_timeout,
260
               ignore_ipolicy):
258
  def __init__(self, lu, instance_uuid, instance_name, cleanup, failover,
259
               fallback, ignore_consistency, allow_runtime_changes,
260
               shutdown_timeout, ignore_ipolicy):
261 261
    """Initializes this class.
262 262

  
263 263
    """
264 264
    Tasklet.__init__(self, lu)
265 265

  
266 266
    # Parameters
267
    self.instance_uuid = instance_uuid
267 268
    self.instance_name = instance_name
268 269
    self.cleanup = cleanup
269 270
    self.live = False # will be overridden later
......
280 281
    This checks that the instance is in the cluster.
281 282

  
282 283
    """
283
    instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
284
    self.instance = self.cfg.GetInstanceInfo(instance_name)
284
    (self.instance_uuid, self.instance_name) = \
285
      ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
286
                                self.instance_name)
287
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
285 288
    assert self.instance is not None
286 289
    cluster = self.cfg.GetClusterInfo()
287 290

  
......
448 451

  
449 452
    # FIXME: add a self.ignore_ipolicy option
450 453
    req = iallocator.IAReqRelocate(
451
          name=self.instance_name,
454
          inst_uuid=self.instance_uuid,
452 455
          relocate_from_node_uuids=[self.instance.primary_node])
453 456
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
454 457

  
b/lib/cmdlib/instance_operation.py
94 94
    This checks that the instance is in the cluster.
95 95

  
96 96
    """
97
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
97
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
98 98
    assert self.instance is not None, \
99 99
      "Cannot retrieve locked instance %s" % self.op.instance_name
100 100

  
......
147 147

  
148 148
    """
149 149
    if not self.op.no_remember:
150
      self.cfg.MarkInstanceUp(self.instance.name)
150
      self.cfg.MarkInstanceUp(self.instance.uuid)
151 151

  
152 152
    if self.primary_offline:
153 153
      assert self.op.ignore_offline_nodes
......
200 200
    This checks that the instance is in the cluster.
201 201

  
202 202
    """
203
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
203
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
204 204
    assert self.instance is not None, \
205 205
      "Cannot retrieve locked instance %s" % self.op.instance_name
206 206

  
......
224 224
    # If the instance is offline we shouldn't mark it as down, as that
225 225
    # resets the offline flag.
226 226
    if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
227
      self.cfg.MarkInstanceDown(self.instance.name)
227
      self.cfg.MarkInstanceDown(self.instance.uuid)
228 228

  
229 229
    if self.primary_offline:
230 230
      assert self.op.ignore_offline_nodes
......
272 272
    This checks that the instance is in the cluster and is not running.
273 273

  
274 274
    """
275
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
275
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
276 276
    assert instance is not None, \
277 277
      "Cannot retrieve locked instance %s" % self.op.instance_name
278 278
    CheckNodeOnline(self, instance.primary_node, "Instance primary node"
......
367 367
    This checks that the instance is in the cluster.
368 368

  
369 369
    """
370
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
370
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
371 371
    assert self.instance is not None, \
372 372
      "Cannot retrieve locked instance %s" % self.op.instance_name
373 373
    CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
......
421 421
        raise errors.OpExecError("Could not start instance for"
422 422
                                 " full reboot: %s" % msg)
423 423

  
424
    self.cfg.MarkInstanceUp(self.instance.name)
424
    self.cfg.MarkInstanceUp(self.instance.uuid)
425 425

  
426 426

  
427 427
def GetInstanceConsole(cluster, instance, primary_node):
......
466 466
    This checks that the instance is in the cluster.
467 467

  
468 468
    """
469
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
469
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
470 470
    assert self.instance is not None, \
471 471
      "Cannot retrieve locked instance %s" % self.op.instance_name
472 472
    CheckNodeOnline(self, self.instance.primary_node)
b/lib/cmdlib/instance_query.py
47 47
    lu.share_locks = ShareAll()
48 48

  
49 49
    if self.names:
50
      self.wanted = GetWantedInstances(lu, self.names)
50
      (_, self.wanted) = GetWantedInstances(lu, self.names)
51 51
    else:
52 52
      self.wanted = locking.ALL_SET
53 53

  
......
73 73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74 74
          set(group_uuid
75 75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
76
              for group_uuid in
77
                lu.cfg.GetInstanceNodeGroups(
78
                  lu.cfg.GetInstanceInfoByName(instance_name).uuid))
77 79
      elif level == locking.LEVEL_NODE:
78 80
        lu._LockInstancesNodes() # pylint: disable=W0212
79 81

  
......
81 83
        lu.needed_locks[locking.LEVEL_NETWORK] = \
82 84
          frozenset(net_uuid
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff